filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_13282
|
import json
from pprint import pprint
import pandas as pd
from influxdb import InfluxDBClient, DataFrameClient
df = pd.read_parquet("test2-large.gzip.parquet")
df = df.fillna(0)
print(df.dtypes)
print(df.index[:1])
print(len(df))
db_name = 'test_large_4'
# Testing dataframe client
# client = DataFrameClient('localhost', 8086, 'root', 'root', db_name)
# print(client)
# created = client.create_database(db_name)
# print(created)
#
# client.write_points(df, db_name, protocol="line", batch_size=100)
# print("influx done")
#
# testing json client
db_name = 'json_2'
client = InfluxDBClient('localhost', 8086, 'root', 'root', db_name)
client.create_database(db_name)
print("influx json start")
temp = []
for index, row in df.iterrows():
body = json.loads(row.to_json())
json_body = {
"measurement": db_name,
"time": index.isoformat(),
"fields": body
}
temp.append(json_body)
if len(temp) == 100:
client.write_points(temp)
temp = []
print(index, index.isoformat())
# pprint(json_body)
|
the-stack_106_13284
|
#! /usr/bin/env python3.6
"""
server.py
Stripe Sample.
Python 3.6 or newer required.
"""
import stripe
import json
import os
from flask import Flask, render_template, jsonify, request, send_from_directory
from dotenv import load_dotenv, find_dotenv
# Setup Stripe python client library
load_dotenv(find_dotenv())
stripe.api_key = os.getenv('STRIPE_SECRET_KEY')
stripe.api_version = os.getenv('STRIPE_API_VERSION')
static_dir = str(os.path.abspath(os.path.join(__file__ , "..", os.getenv("STATIC_DIR"))))
app = Flask(__name__, static_folder=static_dir,
static_url_path="", template_folder=static_dir)
@app.route('/', methods=['GET'])
def get_checkout_page():
# Display checkout page
return render_template('index.html')
@app.route('/config', methods=['GET'])
def get_PUBLISHABLE_KEY():
return jsonify({
'publicKey': os.getenv('STRIPE_PUBLISHABLE_KEY'),
'amount': os.getenv('AMOUNT'),
'currency': os.getenv('CURRENCY')
})
def calculate_order_amount(items):
# Replace this constant with a calculation of the order's amount
# Calculate the order total on the server to prevent
# people from directly manipulating the amount on the client
return os.getenv('AMOUNT')
@app.route('/create-payment-intent', methods=['POST'])
def create_payment():
data = json.loads(request.data)
# Create a PaymentIntent with the order amount and currency
intent = stripe.PaymentIntent.create(
payment_method_types=["sepa_debit"],
amount=calculate_order_amount(data['items']),
currency=os.getenv('CURRENCY')
)
try:
# Send publishable key and PaymentIntent details to client
return jsonify({'publicKey': os.getenv('STRIPE_PUBLISHABLE_KEY'), 'clientSecret': intent.client_secret})
except Exception as e:
return jsonify(error=str(e)), 403
@app.route('/webhook', methods=['POST'])
def webhook_received():
# You can use webhooks to receive information about asynchronous payment events.
# For more about our webhook events check out https://stripe.com/docs/webhooks.
webhook_secret = os.getenv('STRIPE_WEBHOOK_SECRET')
request_data = json.loads(request.data)
if webhook_secret:
# Retrieve the event by verifying the signature using the raw body and secret if webhook signing is configured.
signature = request.headers.get('stripe-signature')
try:
event = stripe.Webhook.construct_event(
payload=request.data, sig_header=signature, secret=webhook_secret)
data = event['data']
except Exception as e:
return e
# Get the type of webhook event sent - used to check the status of PaymentIntents.
event_type = event['type']
else:
data = request_data['data']
event_type = request_data['type']
data_object = data['object']
if event_type == 'payment_intent.succeeded':
print('💰 Payment received!')
# Fulfill any orders, e-mail receipts, etc
# To cancel the payment you will need to issue a Refund (https://stripe.com/docs/api/refunds)
elif event_type == 'payment_intent.payment_failed':
print('❌ Payment failed.')
return jsonify({'status': 'success'})
if __name__ == '__main__':
app.run()
|
the-stack_106_13287
|
from manimlib.imports import *
# By EulerTour
# https://www.patreon.com/eulertour
class Ball(Circle):
CONFIG = {
"radius": 0.4,
"fill_color": BLUE,
"fill_opacity": 1,
"color": BLUE
}
def __init__(self, ** kwargs):
Circle.__init__(self, ** kwargs)
self.velocity = np.array((2, 0, 0))
def get_top(self):
return self.get_center()[1] + self.radius
def get_bottom(self):
return self.get_center()[1] - self.radius
def get_right_edge(self):
return self.get_center()[0] + self.radius
def get_left_edge(self):
return self.get_center()[0] - self.radius
class Box(Rectangle):
CONFIG = {
"height": 6,
"width": FRAME_WIDTH - 2,
"color": GREEN_C
}
def __init__(self, ** kwargs):
Rectangle.__init__(self, ** kwargs) # Edges
self.top = 0.5 * self.height
self.bottom = -0.5 * self.height
self.right_edge = 0.5 * self.width
self.left_edge = -0.5 * self.width
class BouncingBall(Scene):
CONFIG = {
"bouncing_time": 10,
}
def construct(self):
box = Box()
ball = Ball()
self.play(FadeIn(box))
self.play(FadeIn(ball))
def update_ball(ball,dt):
ball.acceleration = np.array((0, -5, 0))
ball.velocity = ball.velocity + ball.acceleration * dt
ball.shift(ball.velocity * dt) # Bounce off ground and roof
if ball.get_bottom() <= box.bottom*0.96 or \
ball.get_top() >= box.top*0.96:
ball.velocity[1] = -ball.velocity[1]
# Bounce off walls
if ball.get_left_edge() <= box.left_edge or \
ball.get_right_edge() >= box.right_edge:
ball.velocity[0] = -ball.velocity[0]
ball.add_updater(update_ball)
self.add(ball)
self.wait(self.bouncing_time)
ball.clear_updaters()
self.wait(3)
|
the-stack_106_13288
|
import collections
import numpy as np
from generic.data_provider.nlp_utils import padder
from generic.data_provider.batchifier import AbstractBatchifier
class CLEVRBatchifier(AbstractBatchifier):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def apply(self, games):
batch = collections.defaultdict(list)
batch_size = len(games)
assert batch_size > 0
for i, game in enumerate(games):
batch["raw"].append(game)
# Get question
question = self.tokenizer.encode_question(game.question)
batch['question'].append(question)
# Get answers
answer = self.tokenizer.encode_answer(game.answer)
batch['answer'].append(answer)
# retrieve the image source type
img = game.image.get_image()
if "image" not in batch: # initialize an empty array for better memory consumption
batch["image"] = np.zeros((batch_size,) + img.shape, dtype=np.float32)
batch["image"][i] = img
# pad the questions
batch['question'], batch['seq_length'] = padder(batch['question'],
padding_symbol=self.tokenizer.padding_token)
return batch
|
the-stack_106_13289
|
import torch
import torch.nn as nn
from torch.nn import Parameter
torch.manual_seed(233)
# Decoder based models
import torch
import torch.nn as nn
import random
import numpy as np
class BLSTM(nn.Module):
"""
Simple hierarchical decoder. Like fastText, it first encodes the document once,
and then uses a GRU to predict the categories from a top-down approach.
"""
def __init__(self, vocab_size=1, embedding_dim=300,
category_emb_dim=64, hidden_size=200, label_size=1,
pad_token=1, position_size=500, position_dim=50, **kwargs):
"""
:param vocab_size:
:param embedding_dim:
:param category_emb_dim:
:param total_cats:
:param label_size: number of total categories
:param pad_token:
:param kwargs:
"""
super(BLSTM, self).__init__()
self.vocab_size = vocab_size
self.label_size = label_size
self.position_size = position_size
self.position_dim = position_dim
self.pad_token = pad_token
self.category_emb = category_emb_dim
self.embedding = nn.Embedding(
vocab_size,
embedding_dim,
pad_token
)
self.position_embedding = nn.Embedding(
position_size,
position_dim
)
self.category_embedding = nn.Embedding(
label_size,
category_emb_dim
)
self.word_LSTM = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_size,
bidirectional=True
)
self.sent_LSTM = nn.LSTM(
input_size=embedding_dim,
hidden_size=hidden_size,
bidirectional=True
)
self.decoder = nn.GRU(category_emb_dim, embedding_dim, batch_first=True)
self.decoder2linear = nn.Linear(embedding_dim, label_size)
self.logSoftMax = nn.LogSoftmax()
def init_weights(self):
initrange = 0.1
self.embedding.weight.data.uniform(-initrange, initrange)
nn.init.xavier_normal(
self.decoder2linear.weight,
gain=nn.init.calculate_gain('tanh')
)
def encode(self, src, src_lengths):
"""
Encode the documents
:param src: documents
:param src_lengths: length of the documents
:return:
"""
src_emb = self.embedding(src) #need to check if sentence info is preserved
src_emb = torch.mean(src_emb,1)
return src_emb
def forward(self, categories, hidden_state):
"""
:param src: document to classify
:param src_lengths: length of the documents
:param num_cats: number of times to unroll
:param categories: # keep starting category symbol as 0, such as
categories = torch.zeros(batch_size,1)
:return:
"""
cat_emb = self.category_embedding(categories)
output, hidden_state = self.decoder(cat_emb, hidden_state)
logits = self.decoder2linear(output)
out = self.logSoftMax(logits.view(-1, self.label_size))
return out, hidden_state
def batchNLLLoss(self, src, src_lengths, categories, tf_ratio=1.0):
"""
Calculate the negative log likelihood loss while predicting the categories
:param src: documents to be classified
:param src_lengths: length of the docs
:param categories: hierarchical categories
:param tf_ratio: teacher forcing ratio
:return:
"""
loss_fn = nn.NLLLoss()
loss = 0
accs = []
hidden_state = self.encode(src, src_lengths).unsqueeze(0)
cat_len = categories.size(1) - 1
out = None
use_tf = True if (random.random() < tf_ratio) else False
if use_tf:
for i in range(cat_len):
inp_cat = categories[:,i]
inp_cat = inp_cat.unsqueeze(1)
out, hidden_state = self.forward(inp_cat, hidden_state)
target_cat = categories[:,i+1]
loss += loss_fn(out, target_cat)
_, out_pred = torch.max(out.data, 1)
acc = (out_pred == target_cat.data).sum() / len(target_cat)
accs.append(acc)
else:
for i in range(cat_len):
if i == 0:
inp_cat = categories[:,i].unsqueeze(1) # starting token
else:
topv, topi = out.data.topk(1)
inp_cat = topi
out, hidden_state = self.forward(inp_cat, hidden_state)
target_cat = categories[:, i+1]
loss += loss_fn(out, target_cat)
_, out_pred = torch.max(out.data, 1)
acc = (out_pred == target_cat.data).sum() / len(target_cat)
accs.append(acc)
return loss, np.mean(accs)
|
the-stack_106_13290
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2019
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the boundary attack `BoundaryAttack`. This is a black-box attack which only requires class
predictions.
| Paper link: https://arxiv.org/abs/1712.04248
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, Tuple, TYPE_CHECKING
import numpy as np
from tqdm.auto import tqdm, trange
from art.attacks.attack import EvasionAttack
from art.config import ART_NUMPY_DTYPE
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from art.utils import compute_success, to_categorical, check_and_transform_label_format
if TYPE_CHECKING:
from art.utils import CLASSIFIER_TYPE
logger = logging.getLogger(__name__)
class BoundaryAttack(EvasionAttack):
"""
Implementation of the boundary attack from Brendel et al. (2018). This is a powerful black-box attack that
only requires final class prediction.
| Paper link: https://arxiv.org/abs/1712.04248
"""
attack_params = EvasionAttack.attack_params + [
"targeted",
"delta",
"epsilon",
"step_adapt",
"max_iter",
"num_trial",
"sample_size",
"init_size",
"batch_size",
"verbose",
]
_estimator_requirements = (BaseEstimator, ClassifierMixin)
def __init__(
self,
estimator: "CLASSIFIER_TYPE",
batch_size: int = 64,
targeted: bool = True,
delta: float = 0.01,
epsilon: float = 0.01,
step_adapt: float = 0.667,
max_iter: int = 5000,
num_trial: int = 25,
sample_size: int = 20,
init_size: int = 100,
min_epsilon: Optional[float] = None,
verbose: bool = True,
) -> None:
"""
Create a boundary attack instance.
:param estimator: A trained classifier.
:param batch_size: The size of the batch used by the estimator during inference.
:param targeted: Should the attack target one specific class.
:param delta: Initial step size for the orthogonal step.
:param epsilon: Initial step size for the step towards the target.
:param step_adapt: Factor by which the step sizes are multiplied or divided, must be in the range (0, 1).
:param max_iter: Maximum number of iterations.
:param num_trial: Maximum number of trials per iteration.
:param sample_size: Number of samples per trial.
:param init_size: Maximum number of trials for initial generation of adversarial examples.
:param min_epsilon: Stop attack if perturbation is smaller than `min_epsilon`.
:param verbose: Show progress bars.
"""
super().__init__(estimator=estimator)
self._targeted = targeted
self.delta = delta
self.epsilon = epsilon
self.step_adapt = step_adapt
self.max_iter = max_iter
self.num_trial = num_trial
self.sample_size = sample_size
self.init_size = init_size
self.min_epsilon = min_epsilon
self.batch_size = batch_size
self.verbose = verbose
self._check_params()
self.curr_adv = None
def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Generate adversarial samples and return them in an array.
:param x: An array with the original inputs to be attacked.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,). If `self.targeted` is true, then `y` represents the target labels.
:param x_adv_init: Initial array to act as initial adversarial examples. Same shape as `x`.
:type x_adv_init: `np.ndarray`
:return: An array holding the adversarial examples.
"""
y = check_and_transform_label_format(y, self.estimator.nb_classes, return_one_hot=False)
# Get clip_min and clip_max from the classifier or infer them from data
if self.estimator.clip_values is not None:
clip_min, clip_max = self.estimator.clip_values
else:
clip_min, clip_max = np.min(x), np.max(x)
# Prediction from the original images
preds = np.argmax(self.estimator.predict(x, batch_size=self.batch_size), axis=1)
# Prediction from the initial adversarial examples if not None
x_adv_init = kwargs.get("x_adv_init")
if x_adv_init is not None:
init_preds = np.argmax(self.estimator.predict(x_adv_init, batch_size=self.batch_size), axis=1)
else:
init_preds = [None] * len(x)
x_adv_init = [None] * len(x)
# Assert that, if attack is targeted, y is provided
if self.targeted and y is None:
raise ValueError("Target labels `y` need to be provided for a targeted attack.")
# Some initial setups
x_adv = x.astype(ART_NUMPY_DTYPE)
# Generate the adversarial samples
for ind, val in enumerate(tqdm(x_adv, desc="Boundary attack", disable=not self.verbose)):
if self.targeted:
x_adv[ind] = self._perturb(
x=val,
y=y[ind],
y_p=preds[ind],
init_pred=init_preds[ind],
adv_init=x_adv_init[ind],
clip_min=clip_min,
clip_max=clip_max,
)
else:
x_adv[ind] = self._perturb(
x=val,
y=-1,
y_p=preds[ind],
init_pred=init_preds[ind],
adv_init=x_adv_init[ind],
clip_min=clip_min,
clip_max=clip_max,
)
if y is not None:
y = to_categorical(y, self.estimator.nb_classes)
logger.info(
"Success rate of Boundary attack: %.2f%%",
100 * compute_success(self.estimator, x, y, x_adv, self.targeted, batch_size=self.batch_size),
)
return x_adv
def _perturb(
self,
x: np.ndarray,
y: int,
y_p: int,
init_pred: int,
adv_init: np.ndarray,
clip_min: float,
clip_max: float,
) -> np.ndarray:
"""
Internal attack function for one example.
:param x: An array with one original input to be attacked.
:param y: If `self.targeted` is true, then `y` represents the target label.
:param y_p: The predicted label of x.
:param init_pred: The predicted label of the initial image.
:param adv_init: Initial array to act as an initial adversarial example.
:param clip_min: Minimum value of an example.
:param clip_max: Maximum value of an example.
:return: An adversarial example.
"""
# First, create an initial adversarial sample
initial_sample = self._init_sample(x, y, y_p, init_pred, adv_init, clip_min, clip_max)
# If an initial adversarial example is not found, then return the original image
if initial_sample is None:
return x
# If an initial adversarial example found, then go with boundary attack
x_adv = self._attack(
initial_sample[0],
x,
y_p,
initial_sample[1],
self.delta,
self.epsilon,
clip_min,
clip_max,
)
return x_adv
def _attack(
self,
initial_sample: np.ndarray,
original_sample: np.ndarray,
y_p: int,
target: int,
initial_delta: float,
initial_epsilon: float,
clip_min: float,
clip_max: float,
) -> np.ndarray:
"""
Main function for the boundary attack.
:param initial_sample: An initial adversarial example.
:param original_sample: The original input.
:param y_p: The predicted label of the original input.
:param target: The target label.
:param initial_delta: Initial step size for the orthogonal step.
:param initial_epsilon: Initial step size for the step towards the target.
:param clip_min: Minimum value of an example.
:param clip_max: Maximum value of an example.
:return: an adversarial example.
"""
# Get initialization for some variables
x_adv = initial_sample
self.curr_delta = initial_delta
self.curr_epsilon = initial_epsilon
self.curr_adv = x_adv
# Main loop to wander around the boundary
for _ in trange(self.max_iter, desc="Boundary attack - iterations", disable=not self.verbose):
# Trust region method to adjust delta
for _ in range(self.num_trial):
potential_advs = []
for _ in range(self.sample_size):
potential_adv = x_adv + self._orthogonal_perturb(self.curr_delta, x_adv, original_sample)
potential_adv = np.clip(potential_adv, clip_min, clip_max)
potential_advs.append(potential_adv)
preds = np.argmax(
self.estimator.predict(np.array(potential_advs), batch_size=self.batch_size),
axis=1,
)
if self.targeted:
satisfied = preds == target
else:
satisfied = preds != y_p
delta_ratio = np.mean(satisfied)
if delta_ratio < 0.2:
self.curr_delta *= self.step_adapt
elif delta_ratio > 0.5:
self.curr_delta /= self.step_adapt
if delta_ratio > 0:
x_advs = np.array(potential_advs)[np.where(satisfied)[0]]
break
else:
logger.warning("Adversarial example found but not optimal.")
return x_adv
# Trust region method to adjust epsilon
for _ in range(self.num_trial):
perturb = np.repeat(np.array([original_sample]), len(x_advs), axis=0) - x_advs
perturb *= self.curr_epsilon
potential_advs = x_advs + perturb
potential_advs = np.clip(potential_advs, clip_min, clip_max)
preds = np.argmax(
self.estimator.predict(potential_advs, batch_size=self.batch_size),
axis=1,
)
if self.targeted:
satisfied = preds == target
else:
satisfied = preds != y_p
epsilon_ratio = np.mean(satisfied)
if epsilon_ratio < 0.2:
self.curr_epsilon *= self.step_adapt
elif epsilon_ratio > 0.5:
self.curr_epsilon /= self.step_adapt
if epsilon_ratio > 0:
x_adv = self._best_adv(original_sample, potential_advs[np.where(satisfied)[0]])
self.curr_adv = x_adv
break
else:
logger.warning("Adversarial example found but not optimal.")
return self._best_adv(original_sample, x_advs)
if self.min_epsilon is not None and self.curr_epsilon < self.min_epsilon:
return x_adv
return x_adv
def _orthogonal_perturb(self, delta: float, current_sample: np.ndarray, original_sample: np.ndarray) -> np.ndarray:
"""
Create an orthogonal perturbation.
:param delta: Initial step size for the orthogonal step.
:param current_sample: Current adversarial example.
:param original_sample: The original input.
:return: a possible perturbation.
"""
# Generate perturbation randomly
perturb = np.random.randn(*self.estimator.input_shape).astype(ART_NUMPY_DTYPE)
# Rescale the perturbation
perturb /= np.linalg.norm(perturb)
perturb *= delta * np.linalg.norm(original_sample - current_sample)
# Project the perturbation onto sphere
direction = original_sample - current_sample
direction_flat = direction.flatten()
perturb_flat = perturb.flatten()
direction_flat /= np.linalg.norm(direction_flat)
perturb_flat -= np.dot(perturb_flat, direction_flat.T) * direction_flat
perturb = perturb_flat.reshape(self.estimator.input_shape)
hypotenuse = np.sqrt(1 + delta ** 2)
perturb = ((1 - hypotenuse) * (current_sample - original_sample) + perturb) / hypotenuse
return perturb
def _init_sample(
self,
x: np.ndarray,
y: int,
y_p: int,
init_pred: int,
adv_init: np.ndarray,
clip_min: float,
clip_max: float,
) -> Optional[Tuple[np.ndarray, int]]:
"""
Find initial adversarial example for the attack.
:param x: An array with one original input to be attacked.
:param y: If `self.targeted` is true, then `y` represents the target label.
:param y_p: The predicted label of x.
:param init_pred: The predicted label of the initial image.
:param adv_init: Initial array to act as an initial adversarial example.
:param clip_min: Minimum value of an example.
:param clip_max: Maximum value of an example.
:return: an adversarial example.
"""
nprd = np.random.RandomState()
initial_sample = None
if self.targeted:
# Attack satisfied
if y == y_p:
return None
# Attack unsatisfied yet and the initial image satisfied
if adv_init is not None and init_pred == y:
return adv_init.astype(ART_NUMPY_DTYPE), init_pred
# Attack unsatisfied yet and the initial image unsatisfied
for _ in range(self.init_size):
random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype)
random_class = np.argmax(
self.estimator.predict(np.array([random_img]), batch_size=self.batch_size),
axis=1,
)[0]
if random_class == y:
initial_sample = random_img, random_class
logger.info("Found initial adversarial image for targeted attack.")
break
else:
logger.warning("Failed to draw a random image that is adversarial, attack failed.")
else:
# The initial image satisfied
if adv_init is not None and init_pred != y_p:
return adv_init.astype(ART_NUMPY_DTYPE), init_pred
# The initial image unsatisfied
for _ in range(self.init_size):
random_img = nprd.uniform(clip_min, clip_max, size=x.shape).astype(x.dtype)
random_class = np.argmax(
self.estimator.predict(np.array([random_img]), batch_size=self.batch_size),
axis=1,
)[0]
if random_class != y_p:
initial_sample = random_img, random_class
logger.info("Found initial adversarial image for untargeted attack.")
break
else:
logger.warning("Failed to draw a random image that is adversarial, attack failed.")
return initial_sample
@staticmethod
def _best_adv(original_sample: np.ndarray, potential_advs: np.ndarray) -> np.ndarray:
"""
From the potential adversarial examples, find the one that has the minimum L2 distance from the original sample
:param original_sample: The original input.
:param potential_advs: Array containing the potential adversarial examples
:return: The adversarial example that has the minimum L2 distance from the original input
"""
shape = potential_advs.shape
min_idx = np.linalg.norm(original_sample.flatten() - potential_advs.reshape(shape[0], -1), axis=1).argmin()
return potential_advs[min_idx]
def _check_params(self) -> None:
if not isinstance(self.max_iter, (int, np.int)) or self.max_iter < 0:
raise ValueError("The number of iterations must be a non-negative integer.")
if not isinstance(self.num_trial, (int, np.int)) or self.num_trial < 0:
raise ValueError("The number of trials must be a non-negative integer.")
if not isinstance(self.sample_size, (int, np.int)) or self.sample_size <= 0:
raise ValueError("The number of samples must be a positive integer.")
if not isinstance(self.init_size, (int, np.int)) or self.init_size <= 0:
raise ValueError("The number of initial trials must be a positive integer.")
if self.epsilon <= 0:
raise ValueError("The initial step size for the step towards the target must be positive.")
if self.delta <= 0:
raise ValueError("The initial step size for the orthogonal step must be positive.")
if self.step_adapt <= 0 or self.step_adapt >= 1:
raise ValueError("The adaptation factor must be in the range (0, 1).")
if self.min_epsilon is not None and (isinstance(self.min_epsilon, float) or self.min_epsilon <= 0):
raise ValueError("The minimum epsilon must be a positive float.")
if not isinstance(self.verbose, bool):
raise ValueError("The argument `verbose` has to be of type bool.")
|
the-stack_106_13298
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from . import transforms as T
def build_transforms(cfg, is_train=True):
if is_train:
flip_horizontal_prob = cfg.DATA.HORIZON_FLIP_PROB_TRAIN
flip_vertical_prob = cfg.DATA.VERTICAL_FLIP_PROB_TRAIN
rotation_prob = cfg.DATA.RORATION_PROB_TRAIN
rotation_degrees = cfg.DATA.ROTATION_DEGREES
brightness = cfg.DATA.BRIGHTNESS
contrast = cfg.DATA.CONTRAST
saturation = cfg.DATA.SATURATION
hue = cfg.DATA.HUE
else:
flip_horizontal_prob = 0.0
flip_vertical_prob = 0.0
rotation_prob = 0.0
rotation_degrees = 0
brightness = 0.0
contrast = 0.0
saturation = 0.0
hue = 0.0
input_size = cfg.DATA.INPUT_SIZE
to_bgr255 = cfg.DATA.TO_BGR255
normalize_transform = T.Normalize(
mean=cfg.DATA.PIXEL_MEAN, std=cfg.DATA.PIXEL_STD, to_bgr255=to_bgr255
)
color_jitter = T.ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
)
transform = T.Compose(
[
color_jitter,
T.RandomResizedCrop(input_size),
T.RandomHorizontalFlip(flip_horizontal_prob),
T.RandomVerticalFlip(flip_vertical_prob),
T.RandomRotation(rotation_degrees, rotation_prob),
T.ToTensor(),
normalize_transform,
]
)
return transform
|
the-stack_106_13299
|
# Author: btjanaka (Bryon Tjanaka)
# Problem: (HackerRank) triangle-numbers
# Title: Triangle Numbers
# Link: https://www.hackerrank.com/challenges/triangle-numbers/problem
# Idea: Try solving the first 10 rows or so by hand to quickly see a pattern.
# (No need to solve the entire row, just the first four numbers or so of each
# row.)
# Difficulty: easy
# Tags: math, ad-hoc
for _ in range(int(input())):
n = int(input())
if n == 1 or n == 2:
# First two rows never have even numbers.
print(-1)
elif n % 4 == 0:
print(3)
elif n % 2 == 0:
print(4)
else:
# Odd rows always have an even number in the second position.
print(2)
|
the-stack_106_13300
|
#! python3
"""
from: https://adventofcode.com/2019/day/3
--- Part Two ---
It turns out that this circuit is very timing-sensitive; you actually need to minimize the signal
delay.
To do this, calculate the number of steps each wire takes to reach each intersection; choose the
intersection where the sum of both wires' steps is lowest. If a wire visits a position on the grid
multiple times, use the steps value from the first time it visits that position when calculating
the total value of a specific intersection.
The number of steps a wire takes is the total number of grid squares the wire has entered to get to
that location, including the intersection being considered. Again consider the example from above:
...........
.+-----+...
.|.....|...
.|..+--X-+.
.|..|..|.|.
.|.-X--+.|.
.|..|....|.
.|.......|.
.o-------+.
...........
In the above example, the intersection closest to the central port is reached after
8+5+5+2 = 20 steps by the first wire and 7+6+4+3 = 20 steps by the second wire for a total of
20+20 = 40 steps.
However, the top-right intersection is better: the first wire takes only 8+5+2 = 15 and the second
wire takes only 7+6+2 = 15, a total of 15+15 = 30 steps.
Here are the best steps for the extra examples from above:
R75,D30,R83,U83,L12,D49,R71,U7,L72
U62,R66,U55,R34,D71,R55,D58,R83 = 610 steps
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7 = 410 steps
What is the fewest combined steps the wires must take to reach an intersection?
"""
import os
def generate_trail(wire_directions):
"""Given a list of directions, generate a set and list of positions for the wire's path"""
set_trail = set()
list_trail = list()
current_location = (0, 0)
for direction in wire_directions:
heading = direction[0]
distance = int(direction[1:])
while distance > 0:
if heading == "R":
current_location = (current_location[0] + 1, current_location[1])
elif heading == "L":
current_location = (current_location[0] - 1, current_location[1])
elif heading == "U":
current_location = (current_location[0], current_location[1] + 1)
elif heading == "D":
current_location = (current_location[0], current_location[1] - 1)
else:
raise Exception
set_trail.add(current_location)
list_trail.append(current_location)
distance -= 1
return (set_trail, list_trail)
def main():
"""Solve the problem!"""
script_dir = os.path.dirname(__file__)
file_path = os.path.join(script_dir, './input.txt')
with open(file_path) as input_file:
wire1_directions = input_file.readline().split(",")
wire2_directions = input_file.readline().split(",")
wire1_points, wire1_path = generate_trail(wire1_directions)
wire2_points, wire2_path = generate_trail(wire2_directions)
crosses = wire1_points.intersection(wire2_points)
min_distance = None
for cross in crosses:
wire1_steps = wire1_path.index(cross) + 1
wire2_steps = wire2_path.index(cross) + 1
distance = wire1_steps + wire2_steps
if(min_distance is None or distance < min_distance):
min_distance = distance
print(min_distance)
if __name__ == "__main__":
main()
|
the-stack_106_13301
|
"""Test for DSMR components.
Tests setup of the DSMR component and ensure incoming telegrams cause
Entity to be updated with new values.
"""
import asyncio
import datetime
from decimal import Decimal
from unittest.mock import Mock
import asynctest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.dsmr.sensor import DerivativeDSMREntity
import pytest
from tests.common import assert_setup_component
@pytest.fixture
def mock_connection_factory(monkeypatch):
"""Mock the create functions for serial and TCP Asyncio connections."""
from dsmr_parser.clients.protocol import DSMRProtocol
transport = asynctest.Mock(spec=asyncio.Transport)
protocol = asynctest.Mock(spec=DSMRProtocol)
@asyncio.coroutine
def connection_factory(*args, **kwargs):
"""Return mocked out Asyncio classes."""
return (transport, protocol)
connection_factory = Mock(wraps=connection_factory)
# apply the mock to both connection factories
monkeypatch.setattr(
'dsmr_parser.clients.protocol.create_dsmr_reader',
connection_factory)
monkeypatch.setattr(
'dsmr_parser.clients.protocol.create_tcp_dsmr_reader',
connection_factory)
return connection_factory, transport, protocol
@asyncio.coroutine
def test_default_setup(hass, mock_connection_factory):
"""Test the default setup."""
(connection_factory, transport, protocol) = mock_connection_factory
from dsmr_parser.obis_references import (
CURRENT_ELECTRICITY_USAGE,
ELECTRICITY_ACTIVE_TARIFF,
)
from dsmr_parser.objects import CosemObject
config = {'platform': 'dsmr'}
telegram = {
CURRENT_ELECTRICITY_USAGE: CosemObject([
{'value': Decimal('0.0'), 'unit': 'kWh'}
]),
ELECTRICITY_ACTIVE_TARIFF: CosemObject([
{'value': '0001', 'unit': ''}
]),
}
with assert_setup_component(1):
yield from async_setup_component(hass, 'sensor',
{'sensor': config})
telegram_callback = connection_factory.call_args_list[0][0][2]
# make sure entities have been created and return 'unknown' state
power_consumption = hass.states.get('sensor.power_consumption')
assert power_consumption.state == 'unknown'
assert power_consumption.attributes.get('unit_of_measurement') is None
# simulate a telegram pushed from the smartmeter and parsed by dsmr_parser
telegram_callback(telegram)
# after receiving telegram entities need to have the chance to update
yield from asyncio.sleep(0)
# ensure entities have new state value after incoming telegram
power_consumption = hass.states.get('sensor.power_consumption')
assert power_consumption.state == '0.0'
assert power_consumption.attributes.get('unit_of_measurement') == 'kWh'
# tariff should be translated in human readable and have no unit
power_tariff = hass.states.get('sensor.power_tariff')
assert power_tariff.state == 'low'
assert power_tariff.attributes.get('unit_of_measurement') == ''
@asyncio.coroutine
def test_derivative():
"""Test calculation of derivative value."""
from dsmr_parser.objects import MBusObject
config = {'platform': 'dsmr'}
entity = DerivativeDSMREntity('test', '1.0.0', config)
yield from entity.async_update()
assert entity.state is None, 'initial state not unknown'
entity.telegram = {
'1.0.0': MBusObject([
{'value': datetime.datetime.fromtimestamp(1551642213)},
{'value': Decimal(745.695), 'unit': 'm3'},
])
}
yield from entity.async_update()
assert entity.state is None, \
'state after first update should still be unknown'
entity.telegram = {
'1.0.0': MBusObject([
{'value': datetime.datetime.fromtimestamp(1551642543)},
{'value': Decimal(745.698), 'unit': 'm3'},
])
}
yield from entity.async_update()
assert abs(entity.state - 0.033) < 0.00001, \
'state should be hourly usage calculated from first and second update'
assert entity.unit_of_measurement == 'm3/h'
@asyncio.coroutine
def test_tcp(hass, mock_connection_factory):
"""If proper config provided TCP connection should be made."""
(connection_factory, transport, protocol) = mock_connection_factory
config = {
'platform': 'dsmr',
'host': 'localhost',
'port': 1234,
}
with assert_setup_component(1):
yield from async_setup_component(hass, 'sensor',
{'sensor': config})
assert connection_factory.call_args_list[0][0][0] == 'localhost'
assert connection_factory.call_args_list[0][0][1] == '1234'
@asyncio.coroutine
def test_connection_errors_retry(hass, monkeypatch, mock_connection_factory):
"""Connection should be retried on error during setup."""
(connection_factory, transport, protocol) = mock_connection_factory
config = {
'platform': 'dsmr',
'reconnect_interval': 0,
}
# override the mock to have it fail the first time
first_fail_connection_factory = Mock(
wraps=connection_factory, side_effect=[
TimeoutError])
monkeypatch.setattr(
'dsmr_parser.clients.protocol.create_dsmr_reader',
first_fail_connection_factory)
yield from async_setup_component(hass, 'sensor', {'sensor': config})
# wait for sleep to resolve
yield from hass.async_block_till_done()
assert first_fail_connection_factory.call_count == 2, \
'connecting not retried'
@asyncio.coroutine
def test_reconnect(hass, monkeypatch, mock_connection_factory):
"""If transport disconnects, the connection should be retried."""
(connection_factory, transport, protocol) = mock_connection_factory
config = {
'platform': 'dsmr',
'reconnect_interval': 0,
}
# mock waiting coroutine while connection lasts
closed = asyncio.Event()
# Handshake so that `hass.async_block_till_done()` doesn't cycle forever
closed2 = asyncio.Event()
@asyncio.coroutine
def wait_closed():
yield from closed.wait()
closed2.set()
closed.clear()
protocol.wait_closed = wait_closed
yield from async_setup_component(hass, 'sensor', {'sensor': config})
assert connection_factory.call_count == 1
# indicate disconnect, release wait lock and allow reconnect to happen
closed.set()
# wait for lock set to resolve
yield from closed2.wait()
closed2.clear()
assert not closed.is_set()
closed.set()
yield from hass.async_block_till_done()
assert connection_factory.call_count >= 2, \
'connecting not retried'
|
the-stack_106_13303
|
# Copyright Google LLC All Rights Reserved.
#
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE file at https://angular.io/license
"""Run Angular's AOT template compiler
"""
load("//packages/bazel/src/ng_module:partial_compilation.bzl", "NgPartialCompilationInfo")
load(
"//packages/bazel/src:external.bzl",
"COMMON_ATTRIBUTES",
"COMMON_OUTPUTS",
"DEFAULT_NG_COMPILER",
"DEFAULT_NG_XI18N",
"DEPS_ASPECTS",
"LinkablePackageInfo",
"NpmPackageInfo",
"TsConfigInfo",
"compile_ts",
"js_ecma_script_module_info",
"js_module_info",
"js_named_module_info",
"node_modules_aspect",
"ts_providers_dict_to_struct",
"tsc_wrapped_tsconfig",
)
# enable_perf_logging controls whether Ivy's performance tracing system will be enabled for any
# compilation which includes this provider.
NgPerfInfo = provider(fields = ["enable_perf_logging"])
def is_perf_requested(ctx):
return ctx.attr.perf_flag != None and ctx.attr.perf_flag[NgPerfInfo].enable_perf_logging == True
def _is_partial_compilation_enabled(ctx):
"""Whether partial compilation is enabled for this target."""
return ctx.attr._partial_compilation_flag[NgPartialCompilationInfo].enabled
def _get_ivy_compilation_mode(ctx):
"""Gets the Ivy compilation mode based on the current build settings."""
return "partial" if _is_partial_compilation_enabled(ctx) else "full"
def _basename_of(ctx, file):
ext_len = len(".ts")
if file.short_path.endswith(".ng.html"):
ext_len = len(".ng.html")
elif file.short_path.endswith(".html"):
ext_len = len(".html")
return file.short_path[len(ctx.label.package) + 1:-ext_len]
# Return true if run with bazel (the open-sourced version of blaze), false if
# run with blaze.
def _is_bazel():
return not hasattr(native, "genmpm") # this_is_bazel
def _flat_module_out_file(ctx):
"""Provide a default for the flat_module_out_file attribute.
We cannot use the default="" parameter of ctx.attr because the value is calculated
from other attributes (name)
Args:
ctx: skylark rule execution context
Returns:
a basename used for the flat module out (no extension)
"""
if getattr(ctx.attr, "flat_module_out_file", False):
return ctx.attr.flat_module_out_file
return "%s_public_index" % ctx.label.name
def _should_produce_flat_module_outs(ctx):
"""Should we produce flat module outputs.
We only produce flat module outs when we expect the ng_module is meant to be published,
based on the presence of the module_name attribute.
Args:
ctx: skylark rule execution context
Returns:
true iff we should run the bundle_index_host to produce flat module metadata and bundle index
"""
return _is_bazel() and ctx.attr.module_name
# Calculate the expected output of the template compiler for every source in
# in the library. Most of these will be produced as empty files but it is
# unknown, without parsing, which will be empty.
def _expected_outs(ctx):
devmode_js_files = []
closure_js_files = []
declaration_files = []
transpilation_infos = []
flat_module_out_prodmode_file = None
factory_basename_set = depset([_basename_of(ctx, src) for src in ctx.files.factories])
for src in ctx.files.srcs + ctx.files.assets:
package_prefix = ctx.label.package + "/" if ctx.label.package else ""
# Strip external repository name from path if src is from external repository
# If src is from external repository, it's short_path will be ../<external_repo_name>/...
short_path = src.short_path if src.short_path[0:2] != ".." else "/".join(src.short_path.split("/")[2:])
if short_path.endswith(".ts") and not short_path.endswith(".d.ts"):
basename = short_path[len(package_prefix):-len(".ts")]
if (len(factory_basename_set.to_list()) == 0 or basename in factory_basename_set.to_list()):
if _generate_ve_shims(ctx):
devmode_js = [
".ngfactory.js",
".ngsummary.js",
".js",
]
else:
devmode_js = [".js"]
# Only ngc produces .json files, they're not needed in Ivy.
else:
devmode_js = [".js"]
if not _is_bazel():
devmode_js += [".ngfactory.js"]
else:
continue
filter_summaries = ctx.attr.filter_summaries
declarations = [f.replace(".js", ".d.ts") for f in devmode_js]
for devmode_ext in devmode_js:
devmode_js_file = ctx.actions.declare_file(basename + devmode_ext)
devmode_js_files.append(devmode_js_file)
if not filter_summaries or not devmode_ext.endswith(".ngsummary.js"):
closure_ext = devmode_ext.replace(".js", ".mjs")
closure_js_file = ctx.actions.declare_file(basename + closure_ext)
closure_js_files.append(closure_js_file)
transpilation_infos.append(struct(closure = closure_js_file, devmode = devmode_js_file))
declaration_files += [ctx.actions.declare_file(basename + ext) for ext in declarations]
# We do this just when producing a flat module index for a publishable ng_module
if _should_produce_flat_module_outs(ctx):
flat_module_out_name = _flat_module_out_file(ctx)
# Note: We keep track of the prodmode flat module output for `ng_packager` which
# uses it as entry-point for producing FESM bundles.
# TODO: Remove flat module from `ng_module` and detect package entry-point reliably
# in Ivy. Related discussion: https://github.com/angular/angular/pull/36971#issuecomment-625282383.
flat_module_out_prodmode_file = ctx.actions.declare_file("%s.mjs" % flat_module_out_name)
closure_js_files.append(flat_module_out_prodmode_file)
devmode_js_files.append(ctx.actions.declare_file("%s.js" % flat_module_out_name))
bundle_index_typings = ctx.actions.declare_file("%s.d.ts" % flat_module_out_name)
declaration_files.append(bundle_index_typings)
else:
bundle_index_typings = None
dev_perf_files = []
prod_perf_files = []
# In Ivy mode, dev and prod builds both produce a .json output containing performance metrics
# from the compiler for that build.
if is_perf_requested(ctx):
dev_perf_files = [ctx.actions.declare_file(ctx.label.name + "_perf_dev.json")]
prod_perf_files = [ctx.actions.declare_file(ctx.label.name + "_perf_prod.json")]
return struct(
closure_js = closure_js_files,
devmode_js = devmode_js_files,
declarations = declaration_files,
transpilation_infos = transpilation_infos,
bundle_index_typings = bundle_index_typings,
dev_perf_files = dev_perf_files,
prod_perf_files = prod_perf_files,
flat_module_out_prodmode_file = flat_module_out_prodmode_file,
)
# Determines if we need to generate View Engine shims (.ngfactory and .ngsummary files)
def _generate_ve_shims(ctx):
return _is_bazel() and getattr(ctx.attr, "generate_ve_shims", False) == True
def _ngc_tsconfig(ctx, files, srcs, **kwargs):
generate_ve_shims = _generate_ve_shims(ctx)
compilation_mode = _get_ivy_compilation_mode(ctx)
is_devmode = "devmode_manifest" in kwargs
outs = _expected_outs(ctx)
if is_devmode:
expected_outs = outs.devmode_js + outs.declarations
else:
expected_outs = outs.closure_js
if not ctx.attr.type_check and ctx.attr.strict_templates:
fail("Cannot set type_check = False and strict_templates = True for ng_module()")
if ctx.attr.experimental_extended_template_diagnostics and not ctx.attr.strict_templates:
fail("Cannot set `experimental_extended_template_diagnostics = True` **and** `strict_templates = False` for `ng_module()`")
angular_compiler_options = {
"enableResourceInlining": ctx.attr.inline_resources,
"generateCodeForLibraries": False,
"allowEmptyCodegenFiles": True,
"generateNgFactoryShims": True if generate_ve_shims else False,
"generateNgSummaryShims": True if generate_ve_shims else False,
"fullTemplateTypeCheck": ctx.attr.type_check,
"strictTemplates": ctx.attr.strict_templates,
"compilationMode": compilation_mode,
# In Google3 we still want to use the symbol factory re-exports in order to
# not break existing apps inside Google. Unlike Bazel, Google3 does not only
# enforce strict dependencies of source files, but also for generated files
# (such as the factory files). Therefore in order to avoid that generated files
# introduce new module dependencies (which aren't explicitly declared), we need
# to enable external symbol re-exports by default when running with Blaze.
"createExternalSymbolFactoryReexports": (not _is_bazel()),
# FIXME: wrong place to de-dupe
"expectedOut": depset([o.path for o in expected_outs]).to_list(),
# We instruct the compiler to use the host for import generation in Blaze. By default,
# module names between source files of the same compilation unit are relative paths. This
# is not desired in google3 where the generated module names are used as qualified names
# for aliased exports. We disable relative paths and always use manifest paths in google3.
"_useHostForImportGeneration": (not _is_bazel()),
"_useManifestPathsAsModuleName": (not _is_bazel()),
}
if is_perf_requested(ctx):
# In Ivy mode, set the `tracePerformance` Angular compiler option to enable performance
# metric output.
if is_devmode:
perf_path = outs.dev_perf_files[0].path
else:
perf_path = outs.prod_perf_files[0].path
angular_compiler_options["tracePerformance"] = perf_path
if _should_produce_flat_module_outs(ctx):
angular_compiler_options["flatModuleId"] = ctx.attr.module_name
angular_compiler_options["flatModuleOutFile"] = _flat_module_out_file(ctx)
angular_compiler_options["flatModulePrivateSymbolPrefix"] = "_".join(
[ctx.workspace_name] + ctx.label.package.split("/") + [ctx.label.name, ""],
)
tsconfig = dict(tsc_wrapped_tsconfig(ctx, files, srcs, **kwargs), **{
"angularCompilerOptions": angular_compiler_options,
})
# For prodmode, the compilation target is set to `ES2020`. `@bazel/typecript`
# using the `create_tsconfig` function sets `ES2015` by default.
# https://github.com/bazelbuild/rules_nodejs/blob/901df3868e3ceda177d3ed181205e8456a5592ea/third_party/github.com/bazelbuild/rules_typescript/internal/common/tsconfig.bzl#L195
# TODO(devversion): In the future, combine prodmode and devmode so we can get rid of the
# ambiguous terminology and concept that can result in slow-down for development workflows.
if not is_devmode:
# Note: Keep in sync with the `prodmode_target` for `ts_library` in `tools/defaults.bzl`
tsconfig["compilerOptions"]["target"] = "es2020"
else:
# For devmode output, we use ES2015 to match with what `ts_library` produces by default.
# https://github.com/bazelbuild/rules_nodejs/blob/9b36274dba34204625579463e3da054a9f42cb47/packages/typescript/internal/build_defs.bzl#L83.
tsconfig["compilerOptions"]["target"] = "es2015"
return tsconfig
# Extra options passed to Node when running ngc.
_EXTRA_NODE_OPTIONS_FLAGS = [
# Expose the v8 garbage collection API to JS.
"--node_options=--expose-gc",
# Show ~full stack traces, instead of cutting off after 10 items.
"--node_options=--stack-trace-limit=100",
# Give 4 GB RAM to node to allow bigger google3 modules to compile.
"--node_options=--max-old-space-size=4096",
]
def ngc_compile_action(
ctx,
label,
inputs,
outputs,
tsconfig_file,
node_opts,
locale = None,
i18n_args = [],
target_flavor = "prodmode"):
"""Helper function to create the ngc action.
This is exposed for google3 to wire up i18n replay rules, and is not intended
as part of the public API.
Args:
ctx: skylark context
label: the label of the ng_module being compiled
inputs: passed to the ngc action's inputs
outputs: passed to the ngc action's outputs
tsconfig_file: tsconfig file with settings used for the compilation
node_opts: list of strings, extra nodejs options.
locale: i18n locale, or None
i18n_args: additional command-line arguments to ngc
target_flavor: Whether prodmode or devmode output is being built.
Returns:
the parameters of the compilation which will be used to replay the ngc action for i18N.
"""
ngc_compilation_mode = "%s %s" % (_get_ivy_compilation_mode(ctx), target_flavor)
mnemonic = "AngularTemplateCompile"
progress_message = "Compiling Angular templates (%s) %s" % (
ngc_compilation_mode,
label,
)
if locale:
mnemonic = "AngularI18NMerging"
supports_workers = "0"
progress_message = ("Recompiling Angular templates (ngc - %s) %s for locale %s" %
(target_flavor, label, locale))
else:
supports_workers = str(int(ctx.attr._supports_workers))
arguments = (list(_EXTRA_NODE_OPTIONS_FLAGS) +
["--node_options=%s" % opt for opt in node_opts])
# One at-sign makes this a params-file, enabling the worker strategy.
# Two at-signs escapes the argument so it's passed through to ngc
# rather than the contents getting expanded.
if supports_workers == "1":
arguments += ["@@" + tsconfig_file.path]
else:
arguments += ["-p", tsconfig_file.path]
arguments += i18n_args
ctx.actions.run(
progress_message = progress_message,
mnemonic = mnemonic,
inputs = inputs,
outputs = outputs,
arguments = arguments,
executable = ctx.executable.compiler,
execution_requirements = {
"supports-workers": supports_workers,
},
)
if not locale and not ctx.attr.no_i18n:
return struct(
label = label,
tsconfig = tsconfig_file,
inputs = inputs,
outputs = outputs,
compiler = ctx.executable.compiler,
)
return None
def _filter_ts_inputs(all_inputs):
# The compiler only needs to see TypeScript sources from the npm dependencies,
# but may need to look at package.json files as well.
return [
f
for f in all_inputs
if f.path.endswith(".js") or f.path.endswith(".ts") or f.path.endswith(".json")
]
def _compile_action(
ctx,
inputs,
outputs,
tsconfig_file,
node_opts,
target_flavor):
# Give the Angular compiler all the user-listed assets
file_inputs = list(ctx.files.assets)
if (type(inputs) == type([])):
file_inputs.extend(inputs)
else:
# inputs ought to be a list, but allow depset as well
# so that this can change independently of rules_typescript
# TODO(alexeagle): remove this case after update (July 2019)
file_inputs.extend(inputs.to_list())
if hasattr(ctx.attr, "node_modules"):
file_inputs.extend(_filter_ts_inputs(ctx.files.node_modules))
# If the user supplies a tsconfig.json file, the Angular compiler needs to read it
if hasattr(ctx.attr, "tsconfig") and ctx.file.tsconfig:
file_inputs.append(ctx.file.tsconfig)
if TsConfigInfo in ctx.attr.tsconfig:
file_inputs += ctx.attr.tsconfig[TsConfigInfo].deps
# Also include files from npm fine grained deps as action_inputs.
# These deps are identified by the NpmPackageInfo provider.
for d in ctx.attr.deps:
if NpmPackageInfo in d:
# Note: we can't avoid calling .to_list() on sources
file_inputs.extend(_filter_ts_inputs(d[NpmPackageInfo].sources.to_list()))
# Collect the inputs and summary files from our deps
action_inputs = depset(file_inputs)
return ngc_compile_action(ctx, ctx.label, action_inputs, outputs, tsconfig_file, node_opts, None, [], target_flavor)
def _prodmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
return _compile_action(ctx, inputs, outputs + outs.closure_js + outs.prod_perf_files, tsconfig_file, node_opts, "prodmode")
def _devmode_compile_action(ctx, inputs, outputs, tsconfig_file, node_opts):
outs = _expected_outs(ctx)
compile_action_outputs = outputs + outs.devmode_js + outs.declarations + outs.dev_perf_files
_compile_action(ctx, inputs, compile_action_outputs, tsconfig_file, node_opts, "devmode")
# Note: We need to define `label` and `srcs_files` as `tsc_wrapped` passes
# them and Starlark would otherwise error at runtime.
# buildifier: disable=unused-variable
def _ts_expected_outs(ctx, label, srcs_files = []):
return _expected_outs(ctx)
def ng_module_impl(ctx, ts_compile_actions):
"""Implementation function for the ng_module rule.
This is exposed so that google3 can have its own entry point that re-uses this
and is not meant as a public API.
Args:
ctx: the skylark rule context
ts_compile_actions: generates all the actions to run an ngc compilation
Returns:
the result of the ng_module rule as a dict, suitable for
conversion by ts_providers_dict_to_struct
"""
providers = ts_compile_actions(
ctx,
is_library = True,
compile_action = _prodmode_compile_action,
devmode_compile_action = _devmode_compile_action,
tsc_wrapped_tsconfig = _ngc_tsconfig,
outputs = _ts_expected_outs,
)
outs = _expected_outs(ctx)
providers["angular"] = {}
if _should_produce_flat_module_outs(ctx):
providers["angular"]["flat_module_metadata"] = struct(
module_name = ctx.attr.module_name,
typings_file = outs.bundle_index_typings,
flat_module_out_prodmode_file = outs.flat_module_out_prodmode_file,
)
return providers
def _ng_module_impl(ctx):
ts_providers = ng_module_impl(ctx, compile_ts)
# Add in new JS providers
# See design doc https://docs.google.com/document/d/1ggkY5RqUkVL4aQLYm7esRW978LgX3GUCnQirrk5E1C0/edit#
# and issue https://github.com/bazelbuild/rules_nodejs/issues/57 for more details.
ts_providers["providers"].extend([
js_module_info(
sources = ts_providers["typescript"]["es5_sources"],
deps = ctx.attr.deps,
),
js_named_module_info(
sources = ts_providers["typescript"]["es5_sources"],
deps = ctx.attr.deps,
),
js_ecma_script_module_info(
sources = ts_providers["typescript"]["es6_sources"],
deps = ctx.attr.deps,
),
# TODO: Add remaining shared JS providers from design doc
# (JSModuleInfo) and remove legacy "typescript" provider
# once it is no longer needed.
])
if ctx.attr.package_name:
path = "/".join([p for p in [ctx.bin_dir.path, ctx.label.workspace_root, ctx.label.package] if p])
ts_providers["providers"].append(LinkablePackageInfo(
package_name = ctx.attr.package_name,
package_path = ctx.attr.package_path,
path = path,
files = ts_providers["typescript"]["es5_sources"],
))
return ts_providers_dict_to_struct(ts_providers)
NG_MODULE_ATTRIBUTES = {
"srcs": attr.label_list(allow_files = [".ts"]),
"deps": attr.label_list(
doc = "Targets that are imported by this target",
aspects = [node_modules_aspect] + DEPS_ASPECTS,
),
"assets": attr.label_list(
doc = ".html and .css files needed by the Angular compiler",
allow_files = [
".css",
# TODO(alexeagle): change this to ".ng.html" when usages updated
".html",
],
),
"factories": attr.label_list(
allow_files = [".ts", ".html"],
mandatory = False,
),
"filter_summaries": attr.bool(default = False),
"type_check": attr.bool(default = True),
"strict_templates": attr.bool(default = False),
"experimental_extended_template_diagnostics": attr.bool(
default = False,
doc = "Experimental option, not publicly supported.",
),
"inline_resources": attr.bool(default = True),
"no_i18n": attr.bool(default = False),
"compiler": attr.label(
doc = """Sets a different ngc compiler binary to use for this library.
The default ngc compiler depends on the `//@angular/bazel`
target which is setup for projects that use bazel managed npm deps that
fetch the @angular/bazel npm package.
""",
default = Label(DEFAULT_NG_COMPILER),
executable = True,
cfg = "exec",
),
"ng_xi18n": attr.label(
default = Label(DEFAULT_NG_XI18N),
executable = True,
cfg = "exec",
),
"_partial_compilation_flag": attr.label(
default = "//packages/bazel/src:partial_compilation",
providers = [NgPartialCompilationInfo],
doc = "Internal attribute which points to the partial compilation build setting.",
),
# In the angular/angular monorepo, //tools:defaults.bzl wraps the ng_module rule in a macro
# which sets this attribute to the //packages/compiler-cli:ng_perf flag.
# This is done to avoid exposing the flag to user projects, which would require:
# * defining the flag within @angular/bazel and referencing it correctly here, and
# * committing to the flag and its semantics (including the format of perf JSON files)
# as something users can depend upon.
"perf_flag": attr.label(
providers = [NgPerfInfo],
doc = "Private API to control production of performance metric JSON files",
),
"_supports_workers": attr.bool(default = True),
# Matches the API of the `ts_library` rule from `@bazel/concatjs`.
# https://github.com/bazelbuild/rules_nodejs/blob/398d351a3f2a9b2ebf6fc31fb5882cce7eedfd7b/packages/typescript/internal/build_defs.bzl#L435-L446.
"package_name": attr.string(
doc = """The package name that the linker will link this `ng_module` output as.
If `package_path` is set, the linker will link this package under `<package_path>/node_modules/<package_name>`.
If `package_path` is not set, the package will be linked in the top-level workspace node_modules folder.""",
),
# Matches the API of the `ts_library` rule from `@bazel/concatjs`.
# https://github.com/bazelbuild/rules_nodejs/blob/398d351a3f2a9b2ebf6fc31fb5882cce7eedfd7b/packages/typescript/internal/build_defs.bzl#L435-L446.
"package_path": attr.string(
doc = """The package path in the workspace that the linker will link this `ng_module` output to.
If `package_path` is set, the linker will link this package under `<package_path>/node_modules/<package_name>`.
If `package_path` is not set, the package will be linked in the top-level workspace node_modules folder.""",
),
}
NG_MODULE_RULE_ATTRS = dict(dict(COMMON_ATTRIBUTES, **NG_MODULE_ATTRIBUTES), **{
"tsconfig": attr.label(allow_single_file = True),
"node_modules": attr.label(
doc = """The npm packages which should be available during the compile.
The default value of `//typescript:typescript__typings` is
for projects that use bazel managed npm deps. This default is in place
since code compiled by ng_module will always depend on at least the
typescript default libs which are provided by
`//typescript:typescript__typings`.
This attribute is DEPRECATED. As of version 0.18.0 the recommended
approach to npm dependencies is to use fine grained npm dependencies
which are setup with the `yarn_install` or `npm_install` rules.
For example, in targets that used a `//:node_modules` filegroup,
```
ng_module(
name = "my_lib",
...
node_modules = "//:node_modules",
)
```
which specifies all files within the `//:node_modules` filegroup
to be inputs to the `my_lib`. Using fine grained npm dependencies,
`my_lib` is defined with only the npm dependencies that are
needed:
```
ng_module(
name = "my_lib",
...
deps = [
"@npm//@types/foo",
"@npm//@types/bar",
"@npm//foo",
"@npm//bar",
...
],
)
```
In this case, only the listed npm packages and their
transitive deps are includes as inputs to the `my_lib` target
which reduces the time required to setup the runfiles for this
target (see https://github.com/bazelbuild/bazel/issues/5153).
The default typescript libs are also available via the node_modules
default in this case.
The @npm external repository and the fine grained npm package
targets are setup using the `yarn_install` or `npm_install` rule
in your WORKSPACE file:
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
""",
default = Label(
# BEGIN-DEV-ONLY
"@npm" +
# END-DEV-ONLY
"//typescript:typescript__typings",
),
),
"entry_point": attr.label(allow_single_file = True),
# Default is %{name}_public_index
# The suffix points to the generated "bundle index" files that users import from
# The default is intended to avoid collisions with the users input files.
# Later packaging rules will point to these generated files as the entry point
# into the package.
# See the flatModuleOutFile documentation in
# https://github.com/angular/angular/blob/main/packages/compiler-cli/src/transformers/api.ts
"flat_module_out_file": attr.string(),
# Should the rule generate ngfactory and ngsummary shim files?
"generate_ve_shims": attr.bool(default = False),
})
ng_module = rule(
implementation = _ng_module_impl,
attrs = NG_MODULE_RULE_ATTRS,
outputs = COMMON_OUTPUTS,
)
"""
Run the Angular AOT template compiler.
This rule extends the [ts_library] rule.
[ts_library]: https://bazelbuild.github.io/rules_nodejs/TypeScript.html#ts_library
"""
def ng_module_macro(tsconfig = None, **kwargs):
"""Wraps `ng_module` to set the default for the `tsconfig` attribute.
This must be a macro so that the string is converted to a label in the context of the
workspace that declares the `ng_module` target, rather than the workspace that defines
`ng_module`, or the workspace where the build is taking place.
This macro is re-exported as `ng_module` in the public API.
Args:
tsconfig: the label pointing to a tsconfig.json file
**kwargs: remaining args to pass to the ng_module rule
"""
if not tsconfig:
tsconfig = "//:tsconfig.json"
ng_module(tsconfig = tsconfig, **kwargs)
|
the-stack_106_13306
|
'''
@author: Pranshu Aggarwal
@problem: https://hack.codingblocks.com/app/practice/1/488/problem
Algorithm:
1. Set Middlerow = n, Middlecol = n+1, nn1 = 1, nn2 = 1, nsp = 2n-1, last = 2n+1
2. for row:=1 to end+1 step by 1
for col:=1 to end+1 step by 1
if col <= (number of numbers in block1 nn1) then Print(Middlecol - col + " ")
else if col <= (number of spaces nsp + nn1) then Print(" ")
else if col <= (nsp + nn1 + number of numbers in block2 nn2) then Print(col - Middlecol + " ")
3. nn1 = nn1+1 if row <= Middlerow else nn1-1
4. nn2 = nn2+1 if row <= Middlerow else nn2-1
5. nsp = nsp-2 if row <= Middlerow else nsp+2
6. Print(newline)
7. end for loop
'''
import re
def print_inverted_hour_glass(n):
middlerow, middlecol, nn1, nn2, nsp, last = n, n + 1, 1, 1, 2 * n - 1, 2 * n + 1
for row in range(1, last + 1):
for col in range(1, last + 1):
if col <= nn1: print(str(middlecol - col) + " ", end='')
elif col <= (nsp + nn1): print(" ", end='')
elif col <= (nsp + nn1 + nn2): print(str(col - middlecol) + " ", end='')
nn1 = nn1 + 1 if row <= middlerow else nn1 - 1
nn2 = nn2 + 1 if row <= middlerow else nn2 - 1
nsp = nsp - 2 if row <= middlerow else nsp + 2
print()
# Taking irregular testcases like a Scanner Class in java
def take_input(n):
arr = []
while True:
if len(arr) == n: break
line = input().strip()
re.sub(' +', ' ', line)
arr.extend(line.split())
return " ".join(arr).strip()
if __name__ == "__main__":
n = int(take_input(1))
print_inverted_hour_glass(n)
|
the-stack_106_13307
|
"""drf_email_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('auth/', include('users.urls')),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
]
|
the-stack_106_13310
|
#
# Copyright 2020 Delphix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=missing-docstring
import argparse
from typing import Iterable
import drgn
import sdb
from sdb.commands.internal import fmt
class ZFSHistogram(sdb.Command):
"""
Print ZFS Histogram and print its median segment size.
NOTE
The median is just an approximation as we can't tell the
exact size of each bucket within a histogram bucket.
EXAMPLES
Dump the histogram of the normal metaslab class of the rpool:
sdb> spa rpool | member spa_normal_class.mc_histogram | zhist
seg-size count
-------- -----
512.0B: 4359 *******************
1.0KB: 3328 ***************
2.0KB: 3800 *****************
4.0KB: 3536 ***************
8.0KB: 3983 *****************
16.0KB: 4876 *********************
32.0KB: 9138 ****************************************
64.0KB: 4508 ********************
128.0KB: 2783 ************
256.0KB: 1952 *********
512.0KB: 1218 *****
1.0MB: 675 ***
2.0MB: 486 **
4.0MB: 267 *
8.0MB: 110
16.0MB: 50
32.0MB: 18
64.0MB: 8
128.0MB: 11
256.0MB: 102
Approx. Median: 339.7MB
"""
names = ["zfs_histogram", "zhist"]
@classmethod
def _init_parser(cls, name: str) -> argparse.ArgumentParser:
parser = super()._init_parser(name)
parser.add_argument("offset", nargs="?", default=0, type=int)
return parser
@staticmethod
def histogram_median(hist: drgn.Object, offset: int = 0) -> int:
"""
Returns the approximated median of a ZFS histogram.
"""
canonical_type = sdb.type_canonicalize(hist.type_)
assert canonical_type.kind == drgn.TypeKind.ARRAY
assert sdb.type_canonicalize(
canonical_type.type).kind == drgn.TypeKind.INT
total_space = 0
for (bucket, value) in enumerate(hist):
total_space += int(value) << (bucket + offset)
if total_space == 0:
return 0
space_left, median = total_space / 2, 0
for (bucket, value) in enumerate(hist):
space_in_bucket = int(value) << (bucket + offset)
if space_left <= space_in_bucket:
median = 1 << (bucket + offset - 1)
#
# Size of segments may vary within one bucket thus we
# attempt to approximate the median by looking at the
# number of segments in the bucket and assuming that
# they are evenly distributed along the bucket's range.
#
bucket_fill = space_left / space_in_bucket
median += round(median * bucket_fill)
break
space_left -= space_in_bucket
return median
@staticmethod
def print_histogram_median(hist: drgn.Object,
offset: int = 0,
indent: int = 0) -> None:
median = ZFSHistogram.histogram_median(hist, offset)
if median > 0:
print(f'{" " * indent}Approx. Median: {fmt.size_nicenum(median)}')
@staticmethod
def print_histogram(hist: drgn.Object,
offset: int = 0,
indent: int = 0) -> None:
canonical_type = sdb.type_canonicalize(hist.type_)
assert canonical_type.kind == drgn.TypeKind.ARRAY
assert sdb.type_canonicalize(
canonical_type.type).kind == drgn.TypeKind.INT
max_count = 0
min_bucket = (len(hist) - 1)
max_bucket = 0
for (bucket, value) in enumerate(hist):
count = int(value)
if bucket < min_bucket and count > 0:
min_bucket = bucket
if bucket > max_bucket and count > 0:
max_bucket = bucket
if count > max_count:
max_count = count
HISTOGRAM_WIDTH_MAX = 40
max_count = max(max_count, HISTOGRAM_WIDTH_MAX)
if min_bucket > max_bucket:
print(f'{" " * indent}** No histogram data available **')
return
print(f'{" " * indent}seg-size count')
print(f'{" " * indent}{"-" * 8} {"-" * 5}')
for bucket in range(min_bucket, max_bucket + 1):
count = int(hist[bucket])
stars = round(count * HISTOGRAM_WIDTH_MAX / max_count)
print(f'{" " * indent}{fmt.size_nicenum(2**(bucket+offset)):>8}: '
f'{count:>6} {"*" * stars}')
ZFSHistogram.print_histogram_median(hist, offset, indent)
def _call(self, objs: Iterable[drgn.Object]) -> None:
for obj in objs:
ZFSHistogram.print_histogram(obj, self.args.offset)
|
the-stack_106_13313
|
import logging
import sys
from app import app
from data import model
from data.database import (
RepositoryTag,
Repository,
TagToRepositoryTag,
TagManifest,
ManifestLegacyImage,
)
logger = logging.getLogger(__name__)
def _vs(first, second):
return "%s vs %s" % (first, second)
def verify_backfill(namespace_name):
logger.info("Checking namespace %s", namespace_name)
namespace_user = model.user.get_namespace_user(namespace_name)
assert namespace_user
repo_tags = (
RepositoryTag.select()
.join(Repository)
.where(Repository.namespace_user == namespace_user)
.where(RepositoryTag.hidden == False)
)
repo_tags = list(repo_tags)
logger.info("Found %s tags", len(repo_tags))
for index, repo_tag in enumerate(repo_tags):
logger.info(
"Checking tag %s under repository %s (%s/%s)",
repo_tag.name,
repo_tag.repository.name,
index + 1,
len(repo_tags),
)
tag = TagToRepositoryTag.get(repository_tag=repo_tag).tag
assert not tag.hidden
assert tag.repository == repo_tag.repository
assert tag.name == repo_tag.name, _vs(tag.name, repo_tag.name)
assert tag.repository == repo_tag.repository, _vs(tag.repository_id, repo_tag.repository_id)
assert tag.reversion == repo_tag.reversion, _vs(tag.reversion, repo_tag.reversion)
start_check = int(tag.lifetime_start_ms // 1000) == repo_tag.lifetime_start_ts
assert start_check, _vs(tag.lifetime_start_ms, repo_tag.lifetime_start_ts)
if repo_tag.lifetime_end_ts is not None:
end_check = int(tag.lifetime_end_ms // 1000) == repo_tag.lifetime_end_ts
assert end_check, _vs(tag.lifetime_end_ms, repo_tag.lifetime_end_ts)
else:
assert tag.lifetime_end_ms is None
try:
tag_manifest = tag.manifest
repo_tag_manifest = TagManifest.get(tag=repo_tag)
digest_check = tag_manifest.digest == repo_tag_manifest.digest
assert digest_check, _vs(tag_manifest.digest, repo_tag_manifest.digest)
bytes_check = tag_manifest.manifest_bytes == repo_tag_manifest.json_data
assert bytes_check, _vs(tag_manifest.manifest_bytes, repo_tag_manifest.json_data)
except TagManifest.DoesNotExist:
logger.info("No tag manifest found for repository tag %s", repo_tag.id)
mli = ManifestLegacyImage.get(manifest=tag_manifest)
assert mli.repository == repo_tag.repository
manifest_legacy_image = mli.image
assert manifest_legacy_image == repo_tag.image, _vs(
manifest_legacy_image.id, repo_tag.image_id
)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
verify_backfill(sys.argv[1])
|
the-stack_106_13316
|
# Copyright (c) 2018, NVIDIA CORPORATION.
"""
Test related to Index
"""
import numpy as np
import pandas as pd
import pytest
from cudf.dataframe import DataFrame
from cudf.dataframe.index import (
CategoricalIndex,
DatetimeIndex,
GenericIndex,
RangeIndex,
as_index,
)
from cudf.tests.utils import assert_eq
def test_df_set_index_from_series():
df = DataFrame()
df["a"] = list(range(10))
df["b"] = list(range(0, 20, 2))
# Check set_index(Series)
df2 = df.set_index(df["b"])
assert list(df2.columns) == ["a", "b"]
sliced_strided = df2.loc[2:6]
print(sliced_strided)
assert len(sliced_strided) == 3
assert list(sliced_strided.index.values) == [2, 4, 6]
def test_df_set_index_from_name():
df = DataFrame()
df["a"] = list(range(10))
df["b"] = list(range(0, 20, 2))
# Check set_index(column_name)
df2 = df.set_index("b")
print(df2)
# 1 less column because 'b' is used as index
assert list(df2.columns) == ["a"]
sliced_strided = df2.loc[2:6]
print(sliced_strided)
assert len(sliced_strided) == 3
assert list(sliced_strided.index.values) == [2, 4, 6]
def test_df_slice_empty_index():
df = DataFrame()
assert isinstance(df.index, RangeIndex)
assert isinstance(df.index[:1], RangeIndex)
with pytest.raises(IndexError):
df.index[1]
def test_index_find_label_range():
# Monotonic Index
idx = GenericIndex(np.asarray([4, 5, 6, 10]))
assert idx.find_label_range(4, 6) == (0, 3)
assert idx.find_label_range(5, 10) == (1, 4)
assert idx.find_label_range(0, 6) == (0, 3)
assert idx.find_label_range(4, 11) == (0, 4)
# Non-monotonic Index
idx_nm = GenericIndex(np.asarray([5, 4, 6, 10]))
assert idx_nm.find_label_range(4, 6) == (1, 3)
assert idx_nm.find_label_range(5, 10) == (0, 4)
# Last value not found
with pytest.raises(ValueError) as raises:
idx_nm.find_label_range(0, 6)
raises.match("value not found")
# Last value not found
with pytest.raises(ValueError) as raises:
idx_nm.find_label_range(4, 11)
raises.match("value not found")
def test_index_comparision():
start, stop = 10, 34
rg = RangeIndex(start, stop)
gi = GenericIndex(np.arange(start, stop))
assert rg.equals(gi)
assert gi.equals(rg)
assert not rg[:-1].equals(gi)
assert rg[:-1].equals(gi[:-1])
@pytest.mark.parametrize(
"func", [lambda x: x.min(), lambda x: x.max(), lambda x: x.sum()]
)
def test_reductions(func):
x = np.asarray([4, 5, 6, 10])
idx = GenericIndex(np.asarray([4, 5, 6, 10]))
assert func(x) == func(idx)
def test_name():
idx = GenericIndex(np.asarray([4, 5, 6, 10]), name="foo")
assert idx.name == "foo"
def test_index_immutable():
start, stop = 10, 34
rg = RangeIndex(start, stop)
with pytest.raises(TypeError):
rg[1] = 5
gi = GenericIndex(np.arange(start, stop))
with pytest.raises(TypeError):
gi[1] = 5
def test_categorical_index():
pdf = pd.DataFrame()
pdf["a"] = [1, 2, 3]
pdf["index"] = pd.Categorical(["a", "b", "c"])
pdf = pdf.set_index("index")
gdf1 = DataFrame.from_pandas(pdf)
gdf2 = DataFrame()
gdf2["a"] = [1, 2, 3]
gdf2["index"] = pd.Categorical(["a", "b", "c"])
gdf2 = gdf2.set_index("index")
assert isinstance(gdf1.index, CategoricalIndex)
assert_eq(pdf, gdf1)
assert_eq(pdf.index, gdf1.index)
assert isinstance(gdf2.index, CategoricalIndex)
assert_eq(pdf, gdf2)
assert_eq(pdf.index, gdf2.index)
def test_pandas_as_index():
# Define Pandas Indexes
pdf_int_index = pd.Int64Index([1, 2, 3, 4, 5])
pdf_float_index = pd.Float64Index([1.0, 2.0, 3.0, 4.0, 5.0])
pdf_datetime_index = pd.DatetimeIndex(
[1000000, 2000000, 3000000, 4000000, 5000000]
)
pdf_category_index = pd.CategoricalIndex(["a", "b", "c", "b", "a"])
# Define cudf Indexes
gdf_int_index = as_index(pdf_int_index)
gdf_float_index = as_index(pdf_float_index)
gdf_datetime_index = as_index(pdf_datetime_index)
gdf_category_index = as_index(pdf_category_index)
# Check instance types
assert isinstance(gdf_int_index, GenericIndex)
assert isinstance(gdf_float_index, GenericIndex)
assert isinstance(gdf_datetime_index, DatetimeIndex)
assert isinstance(gdf_category_index, CategoricalIndex)
# Check equality
assert_eq(pdf_int_index, gdf_int_index)
assert_eq(pdf_float_index, gdf_float_index)
assert_eq(pdf_datetime_index, gdf_datetime_index)
assert_eq(pdf_category_index, gdf_category_index)
def test_index_rename():
pds = pd.Index([1, 2, 3], name="asdf")
gds = as_index(pds)
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
"""
From here on testing recursive creation
and if name is being handles in recursive creation.
"""
pds = pd.Index(expect)
gds = as_index(got)
assert_eq(pds, gds)
pds = pd.Index(pds, name="abc")
gds = as_index(gds, name="abc")
assert_eq(pds, gds)
def test_set_index_as_property():
cdf = DataFrame()
col1 = np.arange(10)
col2 = np.arange(0, 20, 2)
cdf["a"] = col1
cdf["b"] = col2
# Check set_index(Series)
cdf.index = cdf["b"]
np.testing.assert_array_equal(cdf.index.values, col2)
with pytest.raises(ValueError):
cdf.index = [list(range(10))]
idx = np.arange(0, 1000, 100)
cdf.index = idx
np.testing.assert_array_equal(cdf.index.values, idx)
df = cdf.to_pandas()
np.testing.assert_array_equal(df.index.values, idx)
head = cdf.head().to_pandas()
np.testing.assert_array_equal(head.index.values, idx[:5])
|
the-stack_106_13320
|
from threading import local, Lock
from json_stream.dump import JSONStreamEncoder, _original_default
_lock = Lock() # locked during state changes
_counter = 0 # number of patched threads
_thread = local() # is *this* thread patched
_patched = Lock() # locked while patch is active
class ThreadSafeJSONStreamEncoder(JSONStreamEncoder):
def __enter__(self):
global _counter
with _lock:
if _counter == 0:
# patch if we are first
_patched.acquire()
super().__enter__()
_thread.patched = True
_counter += 1
def __exit__(self, exc_type, exc_val, exc_tb):
global _counter
with _lock:
_counter -= 1
if _counter == 0:
# unpatch if we are last
super().__exit__(exc_type, exc_val, exc_tb)
_patched.release()
_thread.patched = False
def default(self, obj):
# if we end up being called by a thread that is _not_
# patch (i.e. in a ThreadSafeJSONStreamEncoder
# context), we must ensure that the patch is not active
if not getattr(_thread, "patched", False):
# block until any patching has been removed
with _patched:
# patch cannot be applied while in here
assert not getattr(_thread, "patched", False)
return _original_default
return super().default(obj)
|
the-stack_106_13321
|
"""Script containing the TraCI vehicle kernel class."""
from flow.core.kernel.vehicle import KernelVehicle
import traci.constants as tc
from traci.exceptions import FatalTraCIError, TraCIException
import numpy as np
import collections
import warnings
from flow.controllers.car_following_models import SimCarFollowingController
from flow.controllers.rlcontroller import RLController
from flow.controllers.lane_change_controllers import SimLaneChangeController
from bisect import bisect_left
import itertools
from copy import deepcopy
# colors for vehicles
WHITE = (255, 255, 255)
CYAN = (0, 255, 255)
RED = (255, 0, 0)
PURPLE = (255,0,255)
class TraCIVehicle(KernelVehicle):
"""Flow kernel for the TraCI API.
Extends flow.core.kernel.vehicle.base.KernelVehicle
"""
def __init__(self,
master_kernel,
sim_params):
"""See parent class."""
KernelVehicle.__init__(self, master_kernel, sim_params)
self.__ids = [] # ids of all vehicles
self.__human_ids = [] # ids of human-driven vehicles
self.__controlled_ids = [] # ids of flow-controlled vehicles
self.__controlled_lc_ids = [] # ids of flow lc-controlled vehicles
self.__rl_ids = [] # ids of rl-controlled vehicles
self.__observed_ids = [] # ids of the observed vehicles
# vehicles: Key = Vehicle ID, Value = Dictionary describing the vehicle
# Ordered dictionary used to keep neural net inputs in order
self.__vehicles = collections.OrderedDict()
# create a sumo_observations variable that will carry all information
# on the state of the vehicles for a given time step
self.__sumo_obs = {}
# total number of vehicles in the network
self.num_vehicles = 0
# number of steps into rollout
self.time_counter = 0
# number of RL vehicles in the network
self.num_rl_vehicles = 0
# contains the parameters associated with each type of vehicle
self.type_parameters = {}
# contain the minGap attribute of each type of vehicle
self.minGap = {}
# list of vehicle ids located in each edge in the network
self._ids_by_edge = dict()
# number of vehicles that entered the network for every time-step
self._num_departed = []
self._departed_ids = []
# number of vehicles to exit the network for every time-step
self._num_arrived = []
self._arrived_ids = []
# whether or not to automatically color vehicles
try:
self._color_vehicles = sim_params.color_vehicles
except AttributeError:
self._color_vehicles = False
def initialize(self, vehicles):
"""Initialize vehicle state information.
This is responsible for collecting vehicle type information from the
VehicleParams object and placing them within the Vehicles kernel.
Parameters
----------
vehicles : flow.core.params.VehicleParams
initial vehicle parameter information, including the types of
individual vehicles and their initial speeds
"""
self.type_parameters = vehicles.type_parameters
self.minGap = vehicles.minGap
self.num_vehicles = len(self.__ids)
self.num_rl_vehicles = len(self.__rl_ids)
self.__vehicles.clear()
for typ in vehicles.initial:
for i in range(typ['num_vehicles']):
veh_id = '{}_{}'.format(typ['veh_id'], i)
self.__vehicles[veh_id] = dict()
self.__vehicles[veh_id]['type'] = typ['veh_id']
self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed']
self.num_vehicles += 1
if typ['acceleration_controller'][0] == RLController:
if veh_id not in self.__rl_ids:
self.__rl_ids.append(veh_id)
self.num_rl_vehicles += 1
else:
if veh_id not in self.__human_ids:
self.__human_ids.append(veh_id)
if typ['acceleration_controller'][0] != SimCarFollowingController:
self.__controlled_ids.append(veh_id)
if typ['lane_change_controller'][0] != SimLaneChangeController:
self.__controlled_lc_ids.append(veh_id)
def update(self, reset):
"""See parent class.
The following actions are performed:
* The state of all vehicles is modified to match their state at the
current time step. This includes states specified by SUMO, and states
explicitly defined by Flow, e.g., "num_arrived".
* If vehicles exit the network, they are removed from the vehicles
class, and newly departed vehicles are introduced to the class.
"""
self.time_counter += 1
vehicle_obs = {}
for veh_id in self.__ids:
vehicle_obs[veh_id] = \
self.kernel_api.vehicle.getSubscriptionResults(veh_id)
sim_obs = self.kernel_api.simulation.getSubscriptionResults()
# remove exiting vehicles from the vehicles class
for veh_id in sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]:
if veh_id not in sim_obs[tc.VAR_TELEPORT_STARTING_VEHICLES_IDS]:
self.remove(veh_id)
# remove exiting vehicles from the vehicle subscription if they
# haven't been removed already
if veh_id in vehicle_obs:
if vehicle_obs[veh_id] is None:
vehicle_obs.pop(veh_id, None)
else:
# this is meant to resolve the KeyError bug when there are
# collisions
vehicle_obs[veh_id] = self.__sumo_obs[veh_id]
# add entering vehicles into the vehicles class
for veh_id in sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]:
if veh_id in self.get_ids() and vehicle_obs[veh_id] is not None:
# this occurs when a vehicle is actively being removed and
# placed again in the network to ensure a constant number of
# total vehicles (e.g., TrafficLightGridEnv). In this case, the vehicle
# is already in the class; its state data just needs to be
# updated
pass
else:
veh_type = self.kernel_api.vehicle.getTypeID(veh_id)
obs = self._add_departed(veh_id, veh_type)
# add the subscription information of the new vehicle
vehicle_obs[veh_id] = obs
if reset:
self.time_counter = 0
# reset all necessary values
self.prev_last_lc = dict()
for veh_id in self.__rl_ids:
self.__vehicles[veh_id]["last_lc"] = -float("inf")
self.prev_last_lc[veh_id] = -float("inf")
self._num_departed.clear()
self._num_arrived.clear()
self._departed_ids.clear()
self._arrived_ids.clear()
# add vehicles from a network template, if applicable
if hasattr(self.master_kernel.network.network,
"template_vehicles"):
for veh_id in self.master_kernel.network.network.\
template_vehicles:
vals = deepcopy(self.master_kernel.network.network.
template_vehicles[veh_id])
# a step is executed during initialization, so add this sim
# step to the departure time of vehicles
vals['depart'] = str(
float(vals['depart']) + 2 * self.sim_step)
self.kernel_api.vehicle.addFull(
veh_id, 'route{}_0'.format(veh_id), **vals)
else:
# update the "last_lc" variable
for veh_id in self.__rl_ids:
prev_lane = self.get_lane(veh_id)
if vehicle_obs[veh_id][tc.VAR_LANE_INDEX] != prev_lane:
self.__vehicles[veh_id]["last_lc"] = self.time_counter
# updated the list of departed and arrived vehicles
self._num_departed.append(
len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]))
self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]))
self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])
self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])
# update the "headway", "leader", and "follower" variables
for veh_id in self.__ids:
if vehicle_obs.get(veh_id, {}) == None:
print("the missing vehicle in update() is:",veh_id)
continue
_position = vehicle_obs.get(veh_id, {}).get(
tc.VAR_POSITION, -1001)
_angle = vehicle_obs.get(veh_id, {}).get(tc.VAR_ANGLE, -1001)
_time_step = sim_obs[tc.VAR_TIME_STEP]
_time_delta = sim_obs[tc.VAR_DELTA_T]
#self.__vehicles[veh_id]["orientation"] = list(_position) + [_angle]
self.__vehicles[veh_id]["orientation"] = [_position] + [_angle]
self.__vehicles[veh_id]["timestep"] = _time_step
self.__vehicles[veh_id]["timedelta"] = _time_delta
headway = vehicle_obs.get(veh_id, {}).get(tc.VAR_LEADER, None)
# check for a collided vehicle or a vehicle with no leader
if headway is None:
self.__vehicles[veh_id]["leader"] = None
self.__vehicles[veh_id]["follower"] = None
self.__vehicles[veh_id]["headway"] = 1e+3
self.__vehicles[veh_id]["follower_headway"] = 1e+3
else:
min_gap = self.minGap[self.get_type(veh_id)]
self.__vehicles[veh_id]["headway"] = headway[1] + min_gap
self.__vehicles[veh_id]["leader"] = headway[0]
if headway[0] in self.__vehicles:
leader = self.__vehicles[headway[0]]
# if veh_id is closer from leader than another follower
# (in case followers are in different converging edges)
if ("follower_headway" not in leader or
headway[1] + min_gap < leader["follower_headway"]):
leader["follower"] = veh_id
leader["follower_headway"] = headway[1] + min_gap
# update the SUMO observations variable
self.__sumo_obs = vehicle_obs.copy()
# update the lane leaders data for each vehicle
self._multi_lane_headways()
# make sure the RL vehicle list is still sorted
self.__rl_ids.sort()
def _add_departed(self, veh_id, veh_type):
"""Add a vehicle that entered the network from an inflow or reset.
Parameters
----------
veh_id: str
name of the vehicle
veh_type: str
type of vehicle, as specified to sumo
Returns
-------
dict
subscription results from the new vehicle
"""
if veh_type not in self.type_parameters:
raise KeyError("Entering vehicle is not a valid type.")
if veh_id not in self.__ids:
self.__ids.append(veh_id)
if veh_id not in self.__vehicles:
self.num_vehicles += 1
self.__vehicles[veh_id] = dict()
# specify the type
self.__vehicles[veh_id]["type"] = veh_type
car_following_params = \
self.type_parameters[veh_type]["car_following_params"]
# specify the acceleration controller class
accel_controller = \
self.type_parameters[veh_type]["acceleration_controller"]
self.__vehicles[veh_id]["acc_controller"] = \
accel_controller[0](veh_id,
car_following_params=car_following_params,
**accel_controller[1])
# specify the lane-changing controller class
lc_controller = \
self.type_parameters[veh_type]["lane_change_controller"]
self.__vehicles[veh_id]["lane_changer"] = \
lc_controller[0](veh_id=veh_id, **lc_controller[1])
# specify the routing controller class
rt_controller = self.type_parameters[veh_type]["routing_controller"]
if rt_controller is not None:
self.__vehicles[veh_id]["router"] = \
rt_controller[0](veh_id=veh_id, router_params=rt_controller[1])
else:
self.__vehicles[veh_id]["router"] = None
# add the vehicle's id to the list of vehicle ids
if accel_controller[0] == RLController:
if veh_id not in self.__rl_ids:
self.__rl_ids.append(veh_id)
self.num_rl_vehicles += 1
else:
if veh_id not in self.__human_ids:
self.__human_ids.append(veh_id)
if accel_controller[0] != SimCarFollowingController:
self.__controlled_ids.append(veh_id)
if lc_controller[0] != SimLaneChangeController:
self.__controlled_lc_ids.append(veh_id)
# subscribe the new vehicle
self.kernel_api.vehicle.subscribe(veh_id, [
tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID,
tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE,
tc.VAR_SPEED_WITHOUT_TRACI
])
self.kernel_api.vehicle.subscribeLeader(veh_id, 2000)
# some constant vehicle parameters to the vehicles class
self.__vehicles[veh_id]["length"] = self.kernel_api.vehicle.getLength(
veh_id)
# set the "last_lc" parameter of the vehicle
self.__vehicles[veh_id]["last_lc"] = -float("inf")
# specify the initial speed
self.__vehicles[veh_id]["initial_speed"] = \
self.type_parameters[veh_type]["initial_speed"]
# specify the time step that the vehicle departed
self.__vehicles[veh_id]["timestep_departed"] = self.time_counter * self.sim_step
# set the speed mode for the vehicle
speed_mode = self.type_parameters[veh_type][
"car_following_params"].speed_mode
self.kernel_api.vehicle.setSpeedMode(veh_id, speed_mode)
# set the lane changing mode for the vehicle
lc_mode = self.type_parameters[veh_type][
"lane_change_params"].lane_change_mode
self.kernel_api.vehicle.setLaneChangeMode(veh_id, lc_mode)
# get initial state info
self.__sumo_obs[veh_id] = dict()
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = \
self.kernel_api.vehicle.getRoadID(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANEPOSITION] = \
self.kernel_api.vehicle.getLanePosition(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANE_INDEX] = \
self.kernel_api.vehicle.getLaneIndex(veh_id)
self.__sumo_obs[veh_id][tc.VAR_SPEED] = \
self.kernel_api.vehicle.getSpeed(veh_id)
# make sure that the order of rl_ids is kept sorted
self.__rl_ids.sort()
# get the subscription results from the new vehicle
new_obs = self.kernel_api.vehicle.getSubscriptionResults(veh_id)
return new_obs
def remove(self, veh_id):
"""See parent class."""
# remove from sumo
if veh_id in self.kernel_api.vehicle.getIDList():
self.kernel_api.vehicle.unsubscribe(veh_id)
self.kernel_api.vehicle.remove(veh_id)
if veh_id in self.__ids:
self.__ids.remove(veh_id)
# remove from the vehicles kernel
if veh_id in self.__vehicles:
del self.__vehicles[veh_id]
if veh_id in self.__sumo_obs:
del self.__sumo_obs[veh_id]
# remove it from all other id lists (if it is there)
if veh_id in self.__human_ids:
self.__human_ids.remove(veh_id)
if veh_id in self.__controlled_ids:
self.__controlled_ids.remove(veh_id)
if veh_id in self.__controlled_lc_ids:
self.__controlled_lc_ids.remove(veh_id)
elif veh_id in self.__rl_ids:
self.__rl_ids.remove(veh_id)
# make sure that the rl ids remain sorted
self.__rl_ids.sort()
# modify the number of vehicles and RL vehicles
self.num_vehicles = len(self.get_ids())
self.num_rl_vehicles = len(self.get_rl_ids())
def test_set_speed(self, veh_id, speed):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_SPEED] = speed
def test_set_edge(self, veh_id, edge):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = edge
def set_follower(self, veh_id, follower):
"""Set the follower of the specified vehicle."""
self.__vehicles[veh_id]["follower"] = follower
def set_headway(self, veh_id, headway):
"""Set the headway of the specified vehicle."""
self.__vehicles[veh_id]["headway"] = headway
def get_orientation(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["orientation"]
def get_timestep(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timestep"]
def get_timedelta(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timedelta"]
def get_type(self, veh_id):
"""Return the type of the vehicle of veh_id."""
return self.__vehicles[veh_id]["type"]
def get_initial_speed(self, veh_id):
"""Return the initial speed of the vehicle of veh_id."""
return self.__vehicles[veh_id]["initial_speed"]
def get_timestep_departed(self, veh_id):
"""Return the departure timestep of vehicle(s) veh_id."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_timestep_departed(v_id) for v_id in veh_id]
return self.__vehicles[veh_id]["timestep_departed"]
def get_ids(self):
"""See parent class."""
return self.__ids
def get_human_ids(self):
"""See parent class."""
return self.__human_ids
def get_controlled_ids(self):
"""See parent class."""
return self.__controlled_ids
def get_controlled_lc_ids(self):
"""See parent class."""
return self.__controlled_lc_ids
def get_rl_ids(self):
"""See parent class."""
return self.__rl_ids
def set_observed(self, veh_id):
"""See parent class."""
if veh_id not in self.__observed_ids:
self.__observed_ids.append(veh_id)
def remove_observed(self, veh_id):
"""See parent class."""
if veh_id in self.__observed_ids:
self.__observed_ids.remove(veh_id)
def get_observed_ids(self):
"""See parent class."""
return self.__observed_ids
def get_ids_by_edge(self, edges):
"""See parent class."""
if isinstance(edges, (list, np.ndarray)):
return sum([self.get_ids_by_edge(edge) for edge in edges], [])
return self._ids_by_edge.get(edges, []) or []
def get_inflow_rate(self, time_span):
"""See parent class."""
if len(self._num_departed) == 0:
return 0
num_inflow = self._num_departed[-int(time_span / self.sim_step):]
return 3600 * sum(num_inflow) / (len(num_inflow) * self.sim_step)
def get_outflow_rate(self, time_span):
"""See parent class."""
if len(self._num_arrived) == 0:
return 0
num_outflow = self._num_arrived[-int(time_span / self.sim_step):]
return 3600 * sum(num_outflow) / (len(num_outflow) * self.sim_step)
def get_num_arrived(self):
"""See parent class."""
if len(self._num_arrived) > 0:
return self._num_arrived[-1]
else:
return 0
def get_arrived_ids(self):
"""See parent class."""
if len(self._arrived_ids) > 0:
return self._arrived_ids[-1]
else:
return 0
def get_departed_ids(self):
"""See parent class."""
if len(self._departed_ids) > 0:
return self._departed_ids[-1]
else:
return 0
def get_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED, error)
def get_default_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_default_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED_WITHOUT_TRACI,
error)
def get_position(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_position(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANEPOSITION, error)
def get_edge(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_edge(vehID, error) for vehID in veh_id]
if self.__sumo_obs.get(veh_id, {}) == None:
print("the missing vehicle in get_edge is:",veh_id)
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_ROAD_ID, error)
def get_lane(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANE_INDEX, error)
def get_route(self, veh_id, error=[]):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_route(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_EDGES, error)
def get_length(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_length(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("length", error)
def get_leader(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_leader(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("leader", error)
def get_follower(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_follower(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("follower", error)
def get_headway(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_follower_headway(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_follower_headway(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("follower_headway", error)
def get_last_lc(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
if veh_id not in self.__rl_ids:
warnings.warn('Vehicle {} is not RL vehicle, "last_lc" term set to'
' {}.'.format(veh_id, error))
return error
else:
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_acc_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_acc_controller(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("acc_controller", error)
def get_lane_changing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_lane_changing_controller(vehID, error)
for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("lane_changer", error)
def get_routing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_routing_controller(vehID, error) for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("router", error)
def set_lane_headways(self, veh_id, lane_headways):
"""Set the lane headways of the specified vehicle."""
self.__vehicles[veh_id]["lane_headways"] = lane_headways
def get_lane_headways(self, veh_id, error=[]):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_headways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_headways", error)
def get_lane_leaders_speed(self, veh_id, error=[]):
"""See parent class."""
lane_leaders = self.get_lane_leaders(veh_id)
return [0 if lane_leader == '' else self.get_speed(lane_leader)
for lane_leader in lane_leaders]
def get_lane_followers_speed(self, veh_id, error=[]):
"""See parent class."""
lane_followers = self.get_lane_followers(veh_id)
return [0 if lane_follower == '' else self.get_speed(lane_follower)
for lane_follower in lane_followers]
def set_lane_leaders(self, veh_id, lane_leaders):
"""Set the lane leaders of the specified vehicle."""
self.__vehicles[veh_id]["lane_leaders"] = lane_leaders
def get_lane_leaders(self, veh_id, error=[]):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_leaders(vehID, error) for vehID in veh_id]
return self.__vehicles[veh_id]["lane_leaders"]
def set_lane_tailways(self, veh_id, lane_tailways):
"""Set the lane tailways of the specified vehicle."""
self.__vehicles[veh_id]["lane_tailways"] = lane_tailways
def get_lane_tailways(self, veh_id, error=[]):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_tailways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_tailways", error)
def set_lane_followers(self, veh_id, lane_followers):
"""Set the lane followers of the specified vehicle."""
self.__vehicles[veh_id]["lane_followers"] = lane_followers
def get_lane_followers(self, veh_id, error=[]):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_followers(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_followers", error)
def _multi_lane_headways(self):
"""Compute multi-lane data for all vehicles.
This includes the lane leaders/followers/headways/tailways/
leader velocity/follower velocity for all
vehicles in the network.
"""
edge_list = self.master_kernel.network.get_edge_list()
junction_list = self.master_kernel.network.get_junction_list()
tot_list = edge_list + junction_list
num_edges = (len(self.master_kernel.network.get_edge_list()) + len(
self.master_kernel.network.get_junction_list()))
# maximum number of lanes in the network
max_lanes = max([self.master_kernel.network.num_lanes(edge_id)
for edge_id in tot_list])
# Key = edge id
# Element = list, with the ith element containing tuples with the name
# and position of all vehicles in lane i
edge_dict = dict.fromkeys(tot_list)
# add the vehicles to the edge_dict element
for veh_id in self.get_ids():
if self.__sumo_obs.get(veh_id, {}) == None:
continue
edge = self.get_edge(veh_id)
lane = self.get_lane(veh_id)
pos = self.get_position(veh_id)
if edge:
if edge_dict[edge] is None:
edge_dict[edge] = [[] for _ in range(max_lanes)]
edge_dict[edge][lane].append((veh_id, pos))
# sort all lanes in each edge by position
for edge in tot_list:
if edge_dict[edge] is None:
del edge_dict[edge]
else:
for lane in range(max_lanes):
edge_dict[edge][lane].sort(key=lambda x: x[1])
for veh_id in self.get_rl_ids():
# collect the lane leaders, followers, headways, and tailways for
# each vehicle
edge = self.get_edge(veh_id)
if edge:
headways, tailways, leaders, followers = \
self._multi_lane_headways_util(veh_id, edge_dict,
num_edges)
# add the above values to the vehicles class
self.set_lane_headways(veh_id, headways)
self.set_lane_tailways(veh_id, tailways)
self.set_lane_leaders(veh_id, leaders)
self.set_lane_followers(veh_id, followers)
self._ids_by_edge = dict().fromkeys(edge_list)
for edge_id in edge_dict:
edges = list(itertools.chain.from_iterable(edge_dict[edge_id]))
# check for edges with no vehicles
if len(edges) > 0:
edges, _ = zip(*edges)
self._ids_by_edge[edge_id] = list(edges)
else:
self._ids_by_edge[edge_id] = []
def _multi_lane_headways_util(self, veh_id, edge_dict, num_edges):
"""Compute multi-lane data for the specified vehicle.
Parameters
----------
veh_id : str
name of the vehicle
edge_dict : dict < list<tuple> >
Key = Edge name
Index = lane index
Element = list sorted by position of (vehicle id, position)
Returns
-------
headway : list<float>
Index = lane index
Element = headway at this lane
tailway : list<float>
Index = lane index
Element = tailway at this lane
lead_speed : list<str>
Index = lane index
Element = speed of leader at this lane
follow_speed : list<str>
Index = lane index
Element = speed of follower at this lane
leader : list<str>
Index = lane index
Element = leader at this lane
follower : list<str>
Index = lane index
Element = follower at this lane
"""
this_pos = self.get_position(veh_id)
this_edge = self.get_edge(veh_id)
this_lane = self.get_lane(veh_id)
num_lanes = self.master_kernel.network.num_lanes(this_edge)
# set default values for all output values
headway = [1000] * num_lanes
tailway = [1000] * num_lanes
leader = [""] * num_lanes
follower = [""] * num_lanes
for lane in range(num_lanes):
# check the vehicle's current edge for lane leaders and followers
if len(edge_dict[this_edge][lane]) > 0:
ids, positions = zip(*edge_dict[this_edge][lane])
ids = list(ids)
positions = list(positions)
index = bisect_left(positions, this_pos)
# if you are at the end or the front of the edge, the lane
# leader is in the edges in front of you
if (lane == this_lane and index < len(positions) - 1) \
or (lane != this_lane and index < len(positions)):
# check if the index does not correspond to the current
# vehicle
if ids[index] == veh_id:
leader[lane] = ids[index + 1]
headway[lane] = (positions[index + 1] - this_pos -
self.get_length(leader[lane]))
else:
leader[lane] = ids[index]
headway[lane] = (positions[index] - this_pos
- self.get_length(leader[lane]))
# you are in the back of the queue, the lane follower is in the
# edges behind you
if index > 0:
follower[lane] = ids[index - 1]
tailway[lane] = (this_pos - positions[index - 1]
- self.get_length(veh_id))
# if lane leader not found, check next edges
if leader[lane] == "":
headway[lane], leader[lane] = self._next_edge_leaders(
veh_id, edge_dict, lane, num_edges)
# if lane follower not found, check previous edges
if follower[lane] == "":
tailway[lane], follower[lane] = self._prev_edge_followers(
veh_id, edge_dict, lane, num_edges)
return headway, tailway, leader, follower
def _next_edge_leaders(self, veh_id, edge_dict, lane, num_edges):
"""Search for leaders in the next edge.
Looks to the edges/junctions in front of the vehicle's current edge
for potential leaders. This is currently done by only looking one
edge/junction forwards.
Returns
-------
headway : float
lane headway for the specified lane
leader : str
lane leader for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
headway = 1000 # env.network.length
leader = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.network.next_edge(edge, lane)) == 0:
break
add_length += self.master_kernel.network.edge_length(edge)
edge, lane = self.master_kernel.network.next_edge(edge, lane)[0]
try:
if len(edge_dict[edge][lane]) > 0:
leader = edge_dict[edge][lane][0][0]
headway = edge_dict[edge][lane][0][1] - pos + add_length \
- self.get_length(leader)
except KeyError:
# current edge has no vehicles, so move on
continue
# stop if a lane follower is found
if leader != "":
break
return headway, leader
def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges):
"""Search for followers in the previous edge.
Looks to the edges/junctions behind the vehicle's current edge for
potential followers. This is currently done by only looking one
edge/junction backwards.
Returns
-------
tailway : float
lane tailway for the specified lane
follower : str
lane follower for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
tailway = 1000 # env.network.length
follower = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.network.prev_edge(edge, lane)) == 0:
break
edge, lane = self.master_kernel.network.prev_edge(edge, lane)[0]
add_length += self.master_kernel.network.edge_length(edge)
try:
if len(edge_dict[edge][lane]) > 0:
tailway = pos - edge_dict[edge][lane][-1][1] + add_length \
- self.get_length(veh_id)
follower = edge_dict[edge][lane][-1][0]
except KeyError:
# current edge has no vehicles, so move on
continue
# stop if a lane follower is found
if follower != "":
break
return tailway, follower
def apply_acceleration(self, veh_ids, acc):
"""See parent class."""
# to handle the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
acc = [acc]
for i, vid in enumerate(veh_ids):
if acc[i] is not None and vid in self.get_ids():
this_vel = self.get_speed(vid)
next_vel = max([this_vel + acc[i] * self.sim_step, 0])
self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3)
def apply_lane_change(self, veh_ids, direction):
"""See parent class."""
# to handle the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
direction = [direction]
# if any of the directions are not -1, 0, or 1, raise a ValueError
if any(d not in [-1, 0, 1] for d in direction):
raise ValueError(
"Direction values for lane changes may only be: -1, 0, or 1.")
for i, veh_id in enumerate(veh_ids):
# check for no lane change
if direction[i] == 0:
continue
# compute the target lane, and clip it so vehicle don't try to lane
# change out of range
this_lane = self.get_lane(veh_id)
this_edge = self.get_edge(veh_id)
target_lane = min(
max(this_lane + direction[i], 0),
self.master_kernel.network.num_lanes(this_edge) - 1)
# perform the requested lane action action in TraCI
if target_lane != this_lane:
self.kernel_api.vehicle.changeLane(
veh_id, int(target_lane), 100000.0)
if veh_id in self.get_rl_ids():
self.prev_last_lc[veh_id] = \
self.__vehicles[veh_id]["last_lc"]
def choose_routes(self, veh_ids, route_choices):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
route_choices = [route_choices]
for i, veh_id in enumerate(veh_ids):
if route_choices[i] is not None:
self.kernel_api.vehicle.setRoute(
vehID=veh_id, edgeList=route_choices[i])
def get_x_by_id(self, veh_id):
"""See parent class."""
if self.get_edge(veh_id) == '':
# occurs when a vehicle crashes is teleported for some other reason
return 0.
return self.master_kernel.network.get_x(
self.get_edge(veh_id), self.get_position(veh_id))
def update_vehicle_colors(self):
"""See parent class.
The colors of all vehicles are updated as follows:
- red: autonomous (rl) vehicles
- white: unobserved human-driven vehicles
- cyan: observed human-driven vehicles
"""
for veh_id in self.get_rl_ids():
try:
# color rl vehicles red
self.set_color(veh_id=veh_id, color=RED)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating rl vehicle colors:', e)
# color vehicles white if not observed and cyan if observed
for veh_id in self.get_human_ids():
if veh_id.find("emergency") != -1:
color = (0, 255, 0)
self.set_color(veh_id=veh_id, color=color)
elif veh_id.find("jordan") != -1:
self.set_color(veh_id=veh_id, color=PURPLE)
else:
try:
color = CYAN if veh_id in self.get_observed_ids() else WHITE
self.set_color(veh_id=veh_id, color=color)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating human vehicle colors:', e)
"""
try:
color = CYAN if veh_id in self.get_observed_ids() else WHITE
self.set_color(veh_id=veh_id, color=color)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating human vehicle colors:', e)"""
# clear the list of observed vehicles
for veh_id in self.get_observed_ids():
self.remove_observed(veh_id)
def get_color(self, veh_id):
"""See parent class.
This does not pass the last term (i.e. transparency).
"""
r, g, b, t = self.kernel_api.vehicle.getColor(veh_id)
return r, g, b
def set_color(self, veh_id, color):
"""See parent class.
The last term for sumo (transparency) is set to 255.
"""
if self._color_vehicles:
r, g, b = color
self.kernel_api.vehicle.setColor(
vehID=veh_id, color=(r, g, b, 255))
def add(self, veh_id, type_id, edge, pos, lane, speed):
"""See parent class."""
if veh_id in self.master_kernel.network.rts:
# If the vehicle has its own route, use that route. This is used in
# the case of network templates.
route_id = 'route{}_0'.format(veh_id)
else:
num_routes = len(self.master_kernel.network.rts[edge])
frac = [val[1] for val in self.master_kernel.network.rts[edge]]
route_id = 'route{}_{}'.format(edge, np.random.choice(
[i for i in range(num_routes)], size=1, p=frac)[0])
self.kernel_api.vehicle.addFull(
veh_id,
route_id,
typeID=str(type_id),
departLane=str(lane),
departPos=str(pos),
departSpeed=str(speed))
def get_max_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_max_speed(vehID, error) for vehID in veh_id]
return self.kernel_api.vehicle.getMaxSpeed(veh_id)
def set_max_speed(self, veh_id, max_speed):
"""See parent class."""
self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed)
|
the-stack_106_13323
|
import os
import json
import requests
def w3c_validator(document, output='json'):
'''
Programmatic checking of modern HTML documents using the API provided by
the W3C HTML Checker.
Parameters
----------
* out: str {gnu, json, xhtml, xml, text}
'''
w3c_validator = 'https://validator.w3.org/nu/?out={}'.format(output)
headers = {'Content-Type': 'text/html; charset=utf-8'}
response = requests.post(w3c_validator, data=document, headers=headers)
response.raise_for_status()
w3c_result = response.content.decode()
if output == 'json':
results = json.loads(w3c_result)['messages']
if results:
return results
else:
return True
else:
return w3c_result
|
the-stack_106_13327
|
t = int(input())
while t>0:
a = int(input())
sa = set(int(i) for i in input().split())
b = int(input())
sb = set(int(i) for i in input().split())
flag = 0
for i in sa:
if i in sb:
continue
else:
flag = 1
if flag == 1:
print("False")
else:
print("True")
t -= 1
|
the-stack_106_13328
|
import subprocess
from colors import bcolors
from tempfile import NamedTemporaryFile
def paint_red(s):
return f"{bcolors.FAIL}{s}{bcolors.ENDC}"
def paint_green(s):
return f"{bcolors.OKGREEN}{s}{bcolors.ENDC}"
def check_next_line(proc, cur_line):
# Read next line
next_line = proc.stdout.readline()
next_line_str = bytes.decode(next_line)
next_line_str = next_line_str.rstrip("\n")
inp_add = ""
out_add = ""
if not next_line or next_line_str[0] == ' ':
if cur_line[0] == '+':
inp_add += paint_green(cur_line[1:])
out_add += "-"*len(cur_line[1:])
if cur_line[0] == '-':
out_add += paint_red(cur_line[1:])
inp_add += "-"*len(cur_line[1:])
if next_line:
inp_add += next_line_str[1:]
out_add += next_line_str[1:]
elif next_line_str[0] == '+':
if cur_line[0] == '-':
str_diff = len(cur_line[1:]) - len(next_line_str[1:])
if str_diff > 0:
out_add += paint_red(cur_line[1:])
inp_add += paint_green(next_line_str[1:] +
'-'*str_diff)
else:
out_add += paint_red(cur_line[1:] + '-'*(-str_diff))
inp_add += paint_green(next_line_str[1:])
return inp_add, out_add
def check_solution(input_text: str, solution: str) -> None:
with NamedTemporaryFile("w+") as f_inp, NamedTemporaryFile("w+") as f_sol:
f_inp.write(input_text)
f_inp.flush()
f_sol.write(solution)
f_sol.flush()
check_solution_files(f_inp.name, f_sol.name)
def check_solution_files(input_file, sol_file):
cmd_string = (
"git diff --no-index --word-diff=porcelain --word-diff-regex=. "
f"{input_file} {sol_file}"
" | tail -n +6 | head -n -1") # Cut the first 6 and last 1 lines
# print(cmd_string)
proc = subprocess.Popen(
cmd_string, stdout=subprocess.PIPE, shell=True)
inp_form = ""
out_form = ""
while True:
line = proc.stdout.readline()
if not line:
break
# the real code does filtering here
# print ("test:", line.rstrip())
# print(type(line))
line_str = bytes.decode(line)
line_str = line_str.rstrip("\n")
# print(line_str)
if line_str[0] in ['+', '-']:
# inp_add, out_add = check_next_line(proc, line_str, inp_form, out_form)
inp_add, out_add = check_next_line(proc, line_str)
inp_form += inp_add
out_form += out_add
else:
inp_form += line_str[1:]
out_form += line_str[1:]
print(f"Input: {out_form}")
print(f"Correct: {inp_form}")
if __name__ == "__main__":
check_solution("AAA", "AABA")
# main()
|
the-stack_106_13331
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aws_direct_connect_connection
short_description: Creates, deletes, modifies a DirectConnect connection
description:
- Create, update, or delete a Direct Connect connection between a network and a specific AWS Direct Connect location.
Upon creation the connection may be added to a link aggregation group or established as a standalone connection.
The connection may later be associated or disassociated with a link aggregation group.
version_added: "2.4"
author: "Sloane Hertel (@s-hertel)"
requirements:
- boto3
- botocore
options:
state:
description:
- The state of the Direct Connect connection.
choices:
- present
- absent
name:
description:
- The name of the Direct Connect connection. This is required to create a
new connection. To recreate or delete a connection I(name) or I(connection_id)
is required.
connection_id:
description:
- The ID of the Direct Connect connection. I(name) or I(connection_id) is
required to recreate or delete a connection. Modifying attributes of a
connection with I(force_update) will result in a new Direct Connect connection ID.
location:
description:
- Where the Direct Connect connection is located. Required when I(state=present).
bandwidth:
description:
- The bandwidth of the Direct Connect connection. Required when I(state=present).
choices:
- 1Gbps
- 10Gbps
link_aggregation_group:
description:
- The ID of the link aggregation group you want to associate with the connection.
This is optional in case a stand-alone connection is desired.
force_update:
description:
- To modify bandwidth or location the connection will need to be deleted and recreated.
By default this will not happen - this option must be set to True.
"""
EXAMPLES = """
# create a Direct Connect connection
aws_direct_connect_connection:
name: ansible-test-connection
state: present
location: EqDC2
link_aggregation_group: dxlag-xxxxxxxx
bandwidth: 1Gbps
register: dc
# disassociate the LAG from the connection
aws_direct_connect_connection:
state: present
connection_id: dc.connection.connection_id
location: EqDC2
bandwidth: 1Gbps
# replace the connection with one with more bandwidth
aws_direct_connect_connection:
state: present
name: ansible-test-connection
location: EqDC2
bandwidth: 10Gbps
force_update: True
# delete the connection
aws_direct_connect_connection:
state: absent
name: ansible-test-connection
"""
RETURN = """
connection:
description:
- The attributes of the Direct Connect connection
type: complex
returned: I(state=present)
contains:
aws_device:
description: The endpoint which the physical connection terminates on.
bandwidth:
description: The bandwidth of the connection.
connection_id:
description: ID of the Direct Connect connection.
connection_state:
description: The state of the connection.
location:
description: Where the connection is located.
owner_account:
description: The owner of the connection.
region:
description: The region in which the connection exists.
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, ec2_argument_spec, HAS_BOTO3,
get_aws_connection_info, boto3_conn, AWSRetry)
from ansible.module_utils.aws.direct_connect import (DirectConnectError, delete_connection,
associate_connection_and_lag, disassociate_connection_and_lag)
try:
import botocore
except:
pass
# handled by imported HAS_BOTO3
retry_params = {"tries": 10, "delay": 5, "backoff": 1.2}
def connection_status(client, connection_id):
return connection_exists(client, connection_id=connection_id, connection_name=None, verify=False)
@AWSRetry.backoff(**retry_params)
def connection_exists(client, connection_id=None, connection_name=None, verify=True):
try:
if connection_id:
response = client.describe_connections(connectionId=connection_id)
else:
response = client.describe_connections()
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to describe DirectConnect ID {0}".format(connection_id),
last_traceback=traceback.format_exc(),
response=e.response)
match = []
connection = []
# look for matching connections
if len(response.get('connections', [])) == 1 and connection_id:
if response['connections'][0]['connectionState'] != 'deleted':
match.append(response['connections'][0]['connectionId'])
connection.extend(response['connections'])
for conn in response.get('connections', []):
if connection_name == conn['connectionName'] and conn['connectionState'] != 'deleted':
match.append(conn['connectionId'])
connection.append(conn)
# verifying if the connections exists; if true, return connection identifier, otherwise return False
if verify and len(match) == 1:
return match[0]
elif verify:
return False
# not verifying if the connection exists; just return current connection info
elif len(connection) == 1:
return {'connection': connection[0]}
return {'connection': {}}
@AWSRetry.backoff(**retry_params)
def create_connection(client, location, bandwidth, name, lag_id):
if not name:
raise DirectConnectError(msg="Failed to create a Direct Connect connection: name required.")
try:
if lag_id:
connection = client.create_connection(location=location,
bandwidth=bandwidth,
connectionName=name,
lagId=lag_id)
else:
connection = client.create_connection(location=location,
bandwidth=bandwidth,
connectionName=name)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to create DirectConnect connection {0}".format(name),
last_traceback=traceback.format_exc(),
response=e.response)
return connection['connectionId']
def changed_properties(current_status, location, bandwidth):
current_bandwidth = current_status['bandwidth']
current_location = current_status['location']
return current_bandwidth != bandwidth or current_location != location
@AWSRetry.backoff(**retry_params)
def update_associations(client, latest_state, connection_id, lag_id):
changed = False
if 'lagId' in latest_state and lag_id != latest_state['lagId']:
disassociate_connection_and_lag(client, connection_id, lag_id=latest_state['lagId'])
changed = True
if (changed and lag_id) or (lag_id and 'lagId' not in latest_state):
associate_connection_and_lag(client, connection_id, lag_id)
changed = True
return changed
def ensure_present(client, connection_id, connection_name, location, bandwidth, lag_id, forced_update):
# the connection is found; get the latest state and see if it needs to be updated
if connection_id:
latest_state = connection_status(client, connection_id=connection_id)['connection']
if changed_properties(latest_state, location, bandwidth) and forced_update:
ensure_absent(client, connection_id)
return ensure_present(client=client,
connection_id=None,
connection_name=connection_name,
location=location,
bandwidth=bandwidth,
lag_id=lag_id,
forced_update=forced_update)
elif update_associations(client, latest_state, connection_id, lag_id):
return True, connection_id
# no connection found; create a new one
else:
return True, create_connection(client, location, bandwidth, connection_name, lag_id)
return False, connection_id
@AWSRetry.backoff(**retry_params)
def ensure_absent(client, connection_id):
changed = False
if connection_id:
delete_connection(client, connection_id)
changed = True
return changed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(),
location=dict(),
bandwidth=dict(choices=['1Gbps', '10Gbps']),
link_aggregation_group=dict(),
connection_id=dict(),
forced_update=dict(type='bool', default=False)
))
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[('connection_id', 'name')],
required_if=[('state', 'present', ('location', 'bandwidth'))])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")
connection = boto3_conn(module, conn_type='client',
resource='directconnect', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
connection_id = connection_exists(connection,
connection_id=module.params.get('connection_id'),
connection_name=module.params.get('name'))
if not connection_id and module.params.get('connection_id'):
module.fail_json(msg="The Direct Connect connection {0} does not exist.".format(module.params.get('connection_id')))
state = module.params.get('state')
try:
if state == 'present':
changed, connection_id = ensure_present(connection,
connection_id=connection_id,
connection_name=module.params.get('name'),
location=module.params.get('location'),
bandwidth=module.params.get('bandwidth'),
lag_id=module.params.get('link_aggregation_group'),
forced_update=module.params.get('forced_update'))
response = connection_status(connection, connection_id)
elif state == 'absent':
changed = ensure_absent(connection, connection_id)
response = {}
except DirectConnectError as e:
if e.response:
module.fail_json(msg=e.msg, exception=e.last_traceback, **e.response)
elif e.last_traceback:
module.fail_json(msg=e.msg, exception=e.last_traceback)
else:
module.fail_json(msg=e.msg)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
if __name__ == '__main__':
main()
|
the-stack_106_13333
|
import pytest
import io
from ecostake.util.ints import int8, uint8, int16, uint16, int32, uint32, int64, uint64, uint128, int512
class TestStructStream:
def _test_impl(self, cls, upper_boundary, lower_boundary):
with pytest.raises(ValueError):
t = cls(upper_boundary + 1)
with pytest.raises(ValueError):
t = cls(lower_boundary - 1)
t = cls(upper_boundary)
assert t == upper_boundary
t = cls(lower_boundary)
assert t == lower_boundary
t = cls(0)
assert t == 0
def test_int512(self):
# int512 is special. it uses 65 bytes to allow positive and negative
# "uint512"
self._test_impl(
int512,
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, # noqa: E501
-0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, # noqa: E501
)
def test_uint128(self):
self._test_impl(uint128, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, 0)
def test_uint64(self):
self._test_impl(uint64, 0xFFFFFFFFFFFFFFFF, 0)
def test_int64(self):
self._test_impl(int64, 0x7FFFFFFFFFFFFFFF, -0x8000000000000000)
def test_uint32(self):
self._test_impl(uint32, 0xFFFFFFFF, 0)
def test_int32(self):
self._test_impl(int32, 0x7FFFFFFF, -0x80000000)
def test_uint16(self):
self._test_impl(uint16, 0xFFFF, 0)
def test_int16(self):
self._test_impl(int16, 0x7FFF, -0x8000)
def test_uint8(self):
self._test_impl(uint8, 0xFF, 0)
def test_int8(self):
self._test_impl(int8, 0x7F, -0x80)
def test_roundtrip(self):
def roundtrip(v):
s = io.BytesIO()
v.stream(s)
s.seek(0)
cls = type(v)
v2 = cls.parse(s)
assert v2 == v
# int512 is special. it uses 65 bytes to allow positive and negative
# "uint512"
roundtrip(
int512(
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501
)
)
roundtrip(
int512(
-0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF # noqa: E501
)
)
roundtrip(uint128(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF))
roundtrip(uint128(0))
roundtrip(uint64(0xFFFFFFFFFFFFFFFF))
roundtrip(uint64(0))
roundtrip(int64(0x7FFFFFFFFFFFFFFF))
roundtrip(int64(-0x8000000000000000))
roundtrip(uint32(0xFFFFFFFF))
roundtrip(uint32(0))
roundtrip(int32(0x7FFFFFFF))
roundtrip(int32(-0x80000000))
roundtrip(uint16(0xFFFF))
roundtrip(uint16(0))
roundtrip(int16(0x7FFF))
roundtrip(int16(-0x8000))
roundtrip(uint8(0xFF))
roundtrip(uint8(0))
roundtrip(int8(0x7F))
roundtrip(int8(-0x80))
|
the-stack_106_13334
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 08:52:32 2019
@author: ruan
"""
import numpy as np
from pandas import Series as series
import os
import time
import jieba
from opencc import OpenCC
from gensim.corpora import WikiCorpus
from gensim.models import KeyedVectors
from gensim.models import Word2Vec
from gensim.models import Doc2Vec
from gensim.models.word2vec import LineSentence
from gensim.models.doc2vec import TaggedDocument
import multiprocessing
import logging
logging.basicConfig(level=logging.WARNING,format="[%(asctime)s] %(message)s",datefmt="%Y-%m-%d %H:%M:%S",)
import codecs
path_nlp = r'E:\\MachineLearning\\data\\nlp\\'
flag_test = False
'''
维基百科语料下载地址:
https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2
'''
#word2vec算法相关参数
w2v_dim = 100
w2v_window = 10
#min_count参数配置过大会导致报错:you must first build vocabulary before training the model
w2v_min_count = 3
w2v_iter = 10
#batch_words参数对结果有极大的影响,原因未知,默认配置为10000。
#我曾经参试配置成1000000,最后most_similar等函数输出的结果非常差。
w2v_batch_words = 1000
#skip-gram耗时为CBOW的大约3~5倍。网络上主流用sg,我也不知道为什么。
w2v_sg = 0
#doc2vec算法相关参数
d2v_dim = 100
d2v_window = 10
d2v_min_count = 3
d2v_epoch = 5
d2v_dm = 0
#计算余弦相似度
def simlarityCalu(vector1, vector2):
vector1Mod = np.sqrt(vector1.dot(vector1))
vector2Mod = np.sqrt(vector2.dot(vector2))
if vector2Mod != 0 and vector1Mod != 0:
simlarity = (vector1.dot(vector2)) / (vector1Mod * vector2Mod)
else:
simlarity = 0
return simlarity
#读取和处理语料
def load_wiki_corpus(path_data_in=None, path_data_out=None, word2vec=True):
if path_data_in==None:
corpus_path = path_nlp+r'zhwiki-latest-pages-articles.xml.bz2'
else:
corpus_path = path_data_in
if path_data_out==None:
if word2vec==True:
corpus_processed_path = path_nlp+'corpus_word2vec.txt'
else:
corpus_processed_path = path_nlp+'corpus_doc2vec.txt'
else:
corpus_processed_path = path_data_out
cc = OpenCC('t2s')
count = 0
with open(corpus_processed_path, 'w', encoding='utf-8') as corpus_processed:
corpus=WikiCorpus(corpus_path, lemmatize=False, dictionary={})
if word2vec==True:
for doc in corpus.get_texts():
doc_new = series(doc).apply(lambda x : ' '.join(jieba.cut(cc.convert(x), cut_all=False)))
corpus_processed.write(' '.join(doc_new)+"\n")
count+=1
if (count%100 == 0):
logging.warning('Saved '+str(count)+' articles')
if ((flag_test==True) and (count==1000)):
return
else:
corpus.metadata = True
for doc,(page_id,title) in corpus.get_texts():
doc_new = TaggedDocument(words=[word for sentence in doc for word in jieba.cut(cc.convert(sentence))], tags=[cc.convert(title)])
corpus_processed.write(' '.join(doc_new[0])+'\t'+'\t'.join(doc_new[1])+"\n")
count+=1
if (count%100 == 0):
logging.warning('Saved '+str(count)+' articles')
if ((flag_test==True) and (count==1000)):
return
return
#文本向量化训练
def generate_text2vec_model(path_data=None, dim=None, word2vec=True):
if word2vec==True:
fun = generate_word2vec_model
else:
fun = generate_doc2vec_model
if dim==None:
return fun(path_data)
else:
return fun(path_data,dim)
#训练词向量
def generate_word2vec_model(path_data=None, word2vec_dim=w2v_dim):
if path_data==None:
corpus_path = path_nlp+r'corpus_word2vec.txt'
else:
corpus_path = path_data
#训练模型
logging.warning('begin word2vec')
model = Word2Vec(LineSentence(corpus_path), sg=w2v_sg, size=word2vec_dim,\
window=w2v_window, min_count=w2v_min_count,\
batch_words=w2v_batch_words, iter=w2v_iter,\
seed=int(time.time()), workers=multiprocessing.cpu_count())
logging.warning('end word2vec')
# 保存模型
model.save(path_nlp+r'wiki_zh_w2v_model_{}'.format(word2vec_dim))
model.wv.save_word2vec_format(path_nlp+r'wiki_zh_w2v_vector_{}'.format(word2vec_dim), binary=False)
logging.warning('saved word2vec model')
return model
#训练句向量
def generate_doc2vec_model(path_data=None, doc2vec_dim=d2v_dim):
if path_data==None:
corpus_path = path_nlp+r'corpus_doc2vec.txt'
else:
corpus_path = path_data
#迭代输出语料
class LineSentence_doc2vec():
def __iter__(self):
for doc in open(corpus_path,'r',encoding='utf-8').readlines():
if doc.strip()!='':
words,tags = doc.split('\t',maxsplit=1)
words = words.split(' ')
tags = [tag.strip() for tag in tags.split('\t')]
yield TaggedDocument(words=words, tags=tags)
#训练模型
logging.warning('begin doc2vec')
model = Doc2Vec(LineSentence_doc2vec(), dm=d2v_dm, vector_size=d2v_dim,\
window=d2v_window, min_count=d2v_min_count,\
dbow_words=2, epochs=d2v_epoch,\
seed=int(time.time()), workers=multiprocessing.cpu_count())
logging.warning('end doc2vec')
# 保存模型
model.save(path_nlp+r'wiki_zh_d2v_model_{}'.format(doc2vec_dim))
logging.warning('saved doc2vec model')
return model
#读取词向量文件
def load_w2v_vector(path_data=None):
if path_data==None:
w2v_path = path_nlp+r'Tencent_AILab_ChineseEmbedding.txt'
if not os.path.exists(w2v_path):
w2v_path = path_nlp+r'wiki_zh_vector'
else:
w2v_path = path_data
w2v_vector = KeyedVectors.load_word2vec_format(w2v_path,binary=False)
return w2v_vector
#对新文章进行doc2vec转换
def doc2vec(file_name):
start_alpha = 0.01
infer_epoch = 1000
model = Doc2Vec.load(r'E:\MachineLearning\data\nlp\wiki_zh_d2v_model_100')
doc = [w for x in codecs.open(file_name, 'r', 'utf-8').readlines() for w in jieba.cut(x.strip())]
doc_vec_all = model.infer_vector(doc, alpha=start_alpha, steps=infer_epoch)
return doc_vec_all
#word2vec测试用
def w2v_demo(w2v_model):
w2v_model.vector_size
w2v_model.index2word
w2v_model.get_vector('数学')
w2v_model.most_similar(u"数学")
w2v_model.most_similar(positive=[ u"皇上",u"女人"],negative=u"男人")
w2v_model.doesnt_match(u'数学 物理 微积分 几何 代数 数论'.split())
w2v_model.similarity(u'书籍',u'书本')
w2v_model.similarity(u'逛街',u'书本')
if __name__=='__main__':
flag_test = True
# load_wiki_corpus()
# generate_text2vec_model()
# load_wiki_corpus(word2vec=False)
# generate_text2vec_model(word2vec=False)
from gensim.models import Word2Vec
sents = []
for i in range(100):
sents.append(list('我爱机器学习我爱机器学习我爱机器学习我爱机器学习我爱机器学习我爱机器学习'))
model = Word2Vec(sents, size=100, window=5, min_count=5, sg=1)
|
the-stack_106_13335
|
import sys
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.operations.freeze import freeze
from pip._internal.utils.compat import stdlib_pkgs
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'}
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List
class FreezeCommand(Command):
"""
Output installed packages in requirements format.
packages are listed in a case-insensitive sorted order.
"""
usage = """
%prog [options]"""
log_streams = ("ext://sys.stderr", "ext://sys.stderr")
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help="Use the order in the given requirements file and its "
"comments when generating output. This option can be "
"used multiple times.")
self.cmd_opts.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL for finding packages, which will be added to the '
'output.')
self.cmd_opts.add_option(
'-l', '--local',
dest='local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not output '
'globally-installed packages.')
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
self.cmd_opts.add_option(cmdoptions.list_path())
self.cmd_opts.add_option(
'--all',
dest='freeze_all',
action='store_true',
help='Do not skip these packages in the output:'
' {}'.format(', '.join(DEV_PKGS)))
self.cmd_opts.add_option(
'--exclude-editable',
dest='exclude_editable',
action='store_true',
help='Exclude editable package from output.')
self.cmd_opts.add_option(cmdoptions.list_exclude())
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
skip = set(stdlib_pkgs)
if not options.freeze_all:
skip.update(DEV_PKGS)
if options.excludes:
skip.update(options.excludes)
cmdoptions.check_list_path_option(options)
if options.find_links:
deprecated(
"--find-links option in pip freeze is deprecated.",
replacement=None,
gone_in="21.2",
issue=9069,
)
for line in freeze(
requirement=options.requirements,
find_links=options.find_links,
local_only=options.local,
user_only=options.user,
paths=options.path,
isolated=options.isolated_mode,
skip=skip,
exclude_editable=options.exclude_editable,
):
sys.stdout.write(line + '\n')
return SUCCESS
|
the-stack_106_13339
|
from copy import deepcopy
import os
import hashlib
import base64
import autode.wrappers.keywords as kws
import autode.exceptions as ex
from autode.point_charges import PointCharge
from autode.solvent.solvents import get_available_solvent_names
from autode.solvent.solvents import get_solvent
from autode.config import Config
from autode.solvent.solvents import Solvent
from autode.log import logger
output_exts = ('.out', '.hess', '.xyz', '.inp', '.com', '.log', '.nw',
'.pc', '.grad')
def execute_calc(calc):
""" Top level function that can be hashed"""
return calc.execute_calculation()
def get_solvent_name(molecule, method):
"""
Set the solvent keyword to use in the calculation given an QM method
Arguments:
molecule (autode.species.Species)
method (autode.wrappers.base.ElectronicStructureMethod):
"""
if molecule.solvent is None:
logger.info('Calculation is in the gas phase')
return None
if type(molecule.solvent) is str:
# Solvent could be a string from e.g. cgbind
solvent = get_solvent(solvent_name=molecule.solvent)
elif isinstance(molecule.solvent, Solvent):
# Otherwise expecting a autode.solvents.solvent.Solvent
solvent = molecule.solvent
else:
raise ex.SolventUnavailable('Expecting either a str or Solvent')
try:
# Get the name of the solvent for this method
return getattr(solvent, method.name)
except AttributeError:
raise ex.SolventUnavailable(f'Available solvents for {method.name} are'
f' {get_available_solvent_names(method)}')
class Calculation:
def __str__(self):
"""Create a unique string(/hash) of the calculation"""
string = (f'{self.name}{self.method.name}{str(self.input.keywords)}'
f'{str(self.molecule)}{self.method.implicit_solvation_type}'
f'{str(self.molecule.constraints)}')
if self.input.temp is not None:
string += str(self.input.temp)
hasher = hashlib.sha1(string.encode()).digest()
return base64.urlsafe_b64encode(hasher).decode()
def _check_molecule(self):
"""Ensure the molecule has the required attributes"""
assert hasattr(self.molecule, 'n_atoms')
assert hasattr(self.molecule, 'atoms')
assert hasattr(self.molecule, 'mult')
assert hasattr(self.molecule, 'charge')
assert hasattr(self.molecule, 'solvent')
# The molecule must have > 0 atoms
if self.molecule.atoms is None or self.molecule.n_atoms == 0:
logger.error('Have no atoms. Can\'t form a calculation')
raise ex.NoInputError
def _get_energy(self, e=False, h=False, g=False, force=False):
"""
Get the energy from a completed calculation
Keyword Arguments:
e (bool): Return the potential energy (E)
h (bool): Return the enthalpy (H) at 298 K
g (bool): Return the Gibbs free energy (G) at 298 K
force (bool): Return the energy even if the calculation errored
Returns:
(float): Energy in Hartrees, or None
"""
logger.info(f'Getting energy from {self.output.filename}')
if self.terminated_normally() or force:
if h:
return self.method.get_enthalpy(self)
if g:
return self.method.get_free_energy(self)
if e:
return self.method.get_energy(self)
logger.error('Calculation did not terminate normally. Energy = None')
return None
def _fix_unique(self, register_name='.autode_calculations'):
"""
If a calculation has already been run for this molecule then it
shouldn't be run again, unless the input keywords have changed, in
which case it should be run while retaining the previous data. This
function fixes this problem by checking .autode_calculations and adding
a number to the end of self.name if the calculation input is different
"""
def append_register():
with open(register_name, 'a') as register_file:
print(self.name, str(self), file=register_file)
def exists():
return any(reg_name == self.name for reg_name in register.keys())
def is_identical():
return any(reg_id == str(self) for reg_id in register.values())
# If there is no register yet in this folder then create it
if not os.path.exists(register_name):
logger.info('No calculations have been performed here yet')
append_register()
return
# Populate a register of calculation names and their unique identifiers
register = {}
for line in open(register_name, 'r'):
if len(line.split()) == 2: # Expecting: name id
calc_name, identifier = line.split()
register[calc_name] = identifier
if is_identical():
logger.info('Calculation has already been run')
return
# If this calculation doesn't yet appear in the register add it
if not exists():
logger.info('This calculation has not yet been run')
append_register()
return
# If we're here then this calculation - with these input - has not yet
# been run. Therefore, add an integer to the calculation name until
# either the calculation has been run before and is the same or it's
# not been run
logger.info('Calculation with this name has been run before but '
'with different input')
name, n = self.name, 0
while True:
self.name = f'{name}{n}'
logger.info(f'New calculation name is: {self.name}')
if is_identical():
return
if not exists():
append_register()
return
n += 1
def _add_to_comp_methods(self):
"""Add the methods used in this calculation to the used methods list"""
from autode.log.methods import methods
methods.add(f'Calculations were performed using {self.method.name} v. '
f'{self.method.get_version(self)} '
f'({self.method.doi_str()}).')
# Type of calculation ----
if isinstance(self.input.keywords, kws.SinglePointKeywords):
string = 'Single point '
elif isinstance(self.input.keywords, kws.OptKeywords):
string = 'Optimisation '
else:
logger.warning('Not adding gradient or hessian to methods section '
'anticipating that they will be the same as opt')
# and have been already added to the methods section
return
# Level of theory ----
string += (f'calculations performed at the '
f'{self.input.keywords.method_string()} level')
basis = self.input.keywords.basis_set
if basis is not None:
string += (f' in combination with the {str(basis)} '
f'({basis.doi_str()}) basis set')
if self.input.solvent is not None:
solv_type = self.method.implicit_solvation_type
doi = solv_type.doi_str() if hasattr(solv_type, 'doi_str') else '?'
string += (f' and {solv_type.upper()} ({doi}) '
f'solvation, with parameters appropriate for '
f'{self.input.solvent}')
methods.add(f'{string}.\n')
return None
def get_energy(self):
return self._get_energy(e=True)
def get_enthalpy(self):
return self._get_energy(h=True)
def get_free_energy(self):
return self._get_energy(g=True)
def optimisation_converged(self):
"""Check whether a calculation has has converged to within the theshold
on energies and graidents specified in the input
Returns:
(bool)
"""
logger.info('Checking to see if the geometry converged')
if not self.output.exists():
return False
return self.method.optimisation_converged(self)
def optimisation_nearly_converged(self):
"""Check whether a calculation has nearly converged and may just need
more geometry optimisation steps to complete successfully
Returns:
(bool)
"""
logger.info('Checking to see if the geometry nearly converged')
if not self.output.exists():
return False
return self.method.optimisation_nearly_converged(self)
def get_imaginary_freqs(self):
"""Get the imaginary frequencies from a calculation output note that
they are returned as negative to conform with standard QM codes
Returns:
(list(float)): List of negative frequencies in wavenumbers (cm-1)
"""
logger.info(f'Getting imaginary frequencies from {self.name}')
return self.method.get_imaginary_freqs(self)
def get_normal_mode_displacements(self, mode_number):
"""Get the displacements along a mode for each of the n_atoms in the
structure will return a list of length n_atoms each with 3 components
(x, y, z)
Arguments:
mode_number (int): Normal mode number. 6 will be the first
vibrational mode as 0->2 are translation and
3->5 rotation
Returns:
(np.ndarray): Displacement vectors for each atom (Å)
modes.shape = (n_atoms, 3)
"""
modes = self.method.get_normal_mode_displacements(self, mode_number)
if len(modes) != self.molecule.n_atoms:
raise ex.NoNormalModesFound
return modes
def get_final_atoms(self):
"""
Get the atoms from the final step of a geometry optimisation
Returns:
(list(autode.atoms.Atom)):
"""
logger.info(f'Getting final atoms from {self.output.filename}')
if not self.output.exists():
logger.error('No calculation output. Could not get atoms')
raise ex.AtomsNotFound
# Extract the atoms from the output file, which is method dependent
atoms = self.method.get_final_atoms(self)
if len(atoms) != self.molecule.n_atoms:
logger.error(f'Failed to get atoms from {self.output.filename}')
raise ex.AtomsNotFound
return atoms
def get_atomic_charges(self):
"""
Get the partial atomic charges from a calculation. The method used to
calculate them depends on the QM method and are implemented in their
respective wrappers
Returns:
(list(float)): Atomic charges in units of e
"""
if not self.output.exists():
logger.error('No calculation output. Could not get final charges')
raise ex.CouldNotGetProperty(name='atomic charges')
logger.info(f'Getting atomic charges from {self.output.filename}')
charges = self.method.get_atomic_charges(self)
if len(charges) != self.molecule.n_atoms:
raise ex.CouldNotGetProperty(name='atomic charges')
return charges
def get_gradients(self):
"""
Get the gradient (dE/dr) with respect to atomic displacement from a
calculation
Returns:
(np.ndarray): Gradient vectors for each atom (Ha Å^-1)
gradients.shape = (n_atoms, 3)
"""
logger.info(f'Getting gradients from {self.output.filename}')
gradients = self.method.get_gradients(self)
if len(gradients) != self.molecule.n_atoms:
raise ex.CouldNotGetProperty(name='gradients')
return gradients
def terminated_normally(self):
"""Determine if the calculation terminated without error"""
logger.info(f'Checking for {self.output.filename} normal termination')
if not self.output.exists():
logger.warning('Calculation did not generate any output')
return False
return self.method.calculation_terminated_normally(self)
def clean_up(self, force=False, everything=False):
"""Clean up input files, if Config.keep_input_files is False"""
if Config.keep_input_files and not force:
logger.info('Keeping input files')
return
filenames = self.input.get_input_filenames()
if everything:
filenames.append(self.output.filename)
logger.info(f'Deleting {filenames}')
# Delete the files that exist
for filename in filenames:
if not os.path.exists(filename):
logger.warning(f'Could not delete {filename} it did not exist')
continue
os.remove(filename)
return None
def generate_input(self):
"""Generate the required input"""
logger.info(f'Generating input file(s) for {self.name}')
# Can switch off uniqueness testing with e.g.
# export AUTODE_FIXUNIQUE=False used for testing
if os.getenv('AUTODE_FIXUNIQUE', True) != 'False':
self._fix_unique()
self.input.filename = self.method.get_input_filename(self)
# Check that if the keyword is a autode.wrappers.keywords.Keyword then
# it has the required name in the method used for this calculation
for keyword in self.input.keywords:
# Allow keywords as strings
if not isinstance(keyword, kws.Keyword):
continue
# Allow for the unambiguous setting of a keyword with only a name
if keyword.has_only_name():
# set e.g. keyword.orca = 'b3lyp'
setattr(keyword, self.method.name, keyword.name)
continue
# For a keyword e.g. Keyword(name='pbe', orca='PBE') then the
# definition in this method is not obvious, so raise an exception
if not hasattr(keyword, self.method.name):
err_str = (f'Keyword: {keyword} is not supported set '
f'{keyword}.{self.method.name} as a string')
raise ex.UnsuppportedCalculationInput(err_str)
self.method.generate_input(self, self.molecule)
return None
def execute_calculation(self):
"""Execute a calculation if it has not been run or finish correctly"""
logger.info(f'Running {self.input.filename} using {self.method.name}')
if not self.input.exists():
raise ex.NoInputError('Input did not exist')
# If the output file already exists set the output lines
if self.output.filename is not None and os.path.exists(self.output.filename):
self.output.set_lines()
if self.output.exists() and self.terminated_normally():
logger.info('Calculation already terminated normally. Skipping')
return None
# Check that the method used to execute the calculation is available
if not self.method.available:
raise ex.MethodUnavailable
self.method.execute(self)
self.output.set_lines()
return None
def run(self):
"""Run the calculation using the EST method """
logger.info(f'Running calculation {self.name}')
# Set an input filename and generate the input
self.generate_input()
# Set the output filename, run the calculation and clean up the files
self.output.filename = self.method.get_output_filename(self)
self.execute_calculation()
self.clean_up()
self._add_to_comp_methods()
return None
def __init__(self, name, molecule, method, keywords, n_cores=1,
bond_ids_to_add=None,
other_input_block=None,
distance_constraints=None,
cartesian_constraints=None,
point_charges=None,
temp=None):
"""
Arguments:
name (str):
molecule (autode.species.Species): Molecule to be calculated
method (autode.wrappers.base.ElectronicStructureMethod):
keywords (autode.wrappers.keywords.Keywords):
Keyword Arguments:
n_cores (int): Number of cores available (default: {1})
bond_ids_to_add (list(tuples)): List of bonds to add to internal
coordinates (default: {None})
other_input_block (str): Other input block to add (default: {None})
distance_constraints (dict): keys = tuple of atom ids for a bond to
be kept at fixed length, value = dist
to be fixed at (default: {None})
cartesian_constraints (list(int)): List of atom ids to fix at their
cartesian coordinates
(default: {None})
point_charges (list(autode.point_charges.PointCharge)): List of
float of point charges, x, y, z
coordinates for each point charge
temp (float): Temperature to perform the calculation at in K, or
None
"""
# Calculation names that start with "-" can break EST methods
self.name = (f'{name}_{method.name}' if not name.startswith('-')
else f'_{name}_{method.name}')
# ------------------- System specific parameters ----------------------
self.molecule = deepcopy(molecule)
self.molecule.constraints = Constraints(distance=distance_constraints,
cartesian=cartesian_constraints)
self._check_molecule()
# --------------------- Calculation parameters ------------------------
self.method = method
self.n_cores = int(n_cores)
# ------------------- Calculation input/output ------------------------
self.input = CalculationInput(keywords=deepcopy(keywords),
solvent=get_solvent_name(molecule, method),
additional_input=other_input_block,
added_internals=bond_ids_to_add,
point_charges=point_charges,
temp=temp)
self.output = CalculationOutput()
class CalculationOutput:
def set_lines(self):
"""
Set the output files lines. This may be slow for large files but should
not become a bottleneck when running standard DFT/WF calculations
Returns:
(None)
"""
logger.info('Setting output file lines')
if not os.path.exists(self.filename):
raise ex.NoCalculationOutput
self.file_lines = open(self.filename, 'r', encoding="utf-8").readlines()
return None
def exists(self):
"""Does the calculation output exist?"""
return self.filename is not None and self.file_lines is not None
def __init__(self):
self.filename = None
self.file_lines = None
class CalculationInput:
def _check(self):
"""Check that the input parameters have the expected format"""
if self.keywords is not None:
assert isinstance(self.keywords, kws.Keywords)
assert self.solvent is None or type(self.solvent) is str
assert self.other_block is None or type(self.other_block) is str
# Ensure the point charges are given as a list of PointCharge objects
if self.point_charges is not None:
assert type(self.point_charges) is list
assert all(type(pc) is PointCharge for pc in self.point_charges)
if self.added_internals is not None:
assert type(self.added_internals) is list
assert all(len(idxs) == 2 for idxs in self.added_internals)
def exists(self):
"""Does the input (files) exist?"""
if self.filename is None:
return False
return all(os.path.exists(fn) for fn in self.get_input_filenames())
def get_input_filenames(self):
"""Return a list of all the input files"""
assert self.filename is not None
return [self.filename] + self.additional_filenames
def __init__(self, keywords, solvent, additional_input,
added_internals, point_charges, temp):
"""
Arguments:
keywords (autode.wrappers.keywords.Keywords):
solvent (str or None): Name of the solvent for this QM method
additional_input (str or None): Any additional input string to add
to the input file, or None
added_internals (list(tuple(int)) or None): Atom indexes to add to
the internal coordinates
point_charges (list(autode.point_charges.PointCharge) or None):
list of float of point charges, x, y, z coordinates
for each point charge
temp (float): Temperature to perform the calculation at in K, or
None
"""
self.keywords = keywords
self.solvent = solvent
self.temp = temp
self.other_block = additional_input
self.added_internals = added_internals
self.point_charges = point_charges
self.filename = None
self.additional_filenames = []
self._check()
class Constraints:
def __str__(self):
"""String of constraints"""
string = ''
if self.cartesian is not None:
string += str(self.cartesian)
if self.distance is not None:
string += str({key: round(val, 3)
for key, val in self.distance.items()})
return f'Constraints({string})'
def _check(self):
""" Check the constraints have the expected format"""
if self.distance is not None:
assert type(self.distance) is dict
assert all(len(key) == 2 for key in self.distance.keys())
if self.cartesian is not None:
assert type(self.cartesian) is list
assert all(type(item) is int for item in self.cartesian)
def any(self):
"""Are there any constraints?"""
return self.distance is not None or self.cartesian is not None
def __init__(self, distance, cartesian):
"""
Arguments:
distance (any): Keys of: tuple(int) for two atom indexes and
values of the distance in Å or None
cartesian (any): List of atom indexes or None
"""
self.distance = distance
self.cartesian = cartesian
self._check()
|
the-stack_106_13342
|
"""Test the event bus."""
import pytest
from camacq import event as event_mod
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio # pylint: disable=invalid-name
async def test_event_bus(center):
"""Test register handler, fire event and remove handler."""
event = event_mod.Event({"test": 2})
bus = center.bus
async def handler(center, event):
"""Handle event."""
if "test" not in center.data:
center.data["test"] = 0
center.data["test"] += event.data["test"]
assert event_mod.BASE_EVENT not in bus.event_types
remove = bus.register(event_mod.BASE_EVENT, handler)
assert event_mod.BASE_EVENT in bus.event_types
assert not center.data
await bus.notify(event)
await center.wait_for()
assert center.data.get("test") == 2
remove()
await bus.notify(event)
await center.wait_for()
assert center.data.get("test") == 2
|
the-stack_106_13343
|
from random import shuffle, randrange
from time import sleep
from threading import Thread
import fantome
import inspector
latence = 0.01
permanents, deux, avant, apres = {'rose'}, {'rouge', 'gris', 'bleu'}, {'violet', 'marron'}, {'noir', 'blanc'}
couleurs = avant | permanents | apres | deux
passages = [{1, 4}, {0, 2}, {1, 3}, {2, 7}, {0, 5, 8}, {4, 6}, {5, 7}, {3, 6, 9}, {4, 9}, {7, 8}]
pass_ext = [{1, 4}, {0, 2, 5, 7}, {1, 3, 6}, {2, 7}, {0, 5, 8, 9}, {4, 6, 1, 8}, {5, 7, 2, 9}, {3, 6, 9, 1}, {4, 9, 5},
{7, 8, 4, 6}]
def message(texte, jos):
for j in jos:
f = open("./" + str(j.numero) + "/infos.txt", "a")
f.write(texte + "\n")
f.close()
def informer(texte):
message(texte, joueurs)
def demander(q, j):
informer("QUESTION : " + q)
f = open("./" + str(j.numero) + "/questions" + ".txt", "w")
f.write(q)
f.close()
sleep(latence)
f = open("./" + str(j.numero) + "/reponses" + ".txt", "r")
# r = f.read()
r = f.readline()
f.close()
# informer("REPONSE DONNEE : "+r)
informer("REPONSE DONNEE : " + str(r))
return r
class personnage:
def __init__(self, couleur):
self.couleur, self.suspect, self.position, self.pouvoir = couleur, True, 0, True
def __repr__(self):
susp = "-suspect" if self.suspect else "-clean"
return self.couleur + "-" + str(self.position) + susp
class joueur:
def __init__(self, n):
self.numero = n
self.role = "l'inspecteur" if n == 0 else "le fantome"
def jouer(self, party):
informer("****\n Tour de " + self.role)
p = self.selectionner(party.tuiles_actives)
avec = self.activer_pouvoir(p, party, avant | deux)
self.bouger(p, avec, party.bloque)
self.activer_pouvoir(p, party, apres | deux)
def selectionner(self, t):
w = demander("Tuiles disponibles : " + str(t) + " choisir entre 0 et " + str(len(t) - 1), self)
i = int(w) if w.isnumeric() and int(w) in range(len(t)) else 0
p = t[i]
informer("REPONSE INTERPRETEE : " + str(p))
informer(self.role + " joue " + p.couleur)
del t[i]
return p
def activer_pouvoir(self, p, party, activables):
if p.pouvoir and p.couleur in activables:
a = demander("Voulez-vous activer le pouvoir (0/1) ?", self) == "1"
informer("REPONSE INTERPRETEE : " + str(a == 1))
if a:
informer("Pouvoir de " + p.couleur + " activé")
p.pouvoir = False
if p.couleur == "rouge":
draw = party.cartes[0]
informer(str(draw) + " a été tiré")
if draw == "fantome":
party.start += -1 if self.numero == 0 else 1
elif self.numero == 0:
draw.suspect = False
del party.cartes[0]
if p.couleur == "noir":
for q in party.personnages:
if q.position in {x for x in passages[p.position] if
x not in party.bloque or q.position not in party.bloque}:
q.position = p.position
informer("NOUVEAU PLACEMENT : " + str(q))
if p.couleur == "blanc":
for q in party.personnages:
if q.position == p.position and p != q:
dispo = {x for x in passages[p.position] if
x not in party.bloque or q.position not in party.bloque}
w = demander(str(q) + ", positions disponibles : " + str(dispo) + ", choisir la valeur",
self)
x = int(w) if w.isnumeric() and int(w) in dispo else dispo.pop()
informer("REPONSE INTERPRETEE : " + str(x))
q.position = x
informer("NOUVEAU PLACEMENT : " + str(q))
if p.couleur == "violet":
informer("Rappel des positions :\n" + str(party))
co = demander("Avec quelle couleur échanger (pas violet!) ?", self)
if co not in couleurs:
co = "rose"
informer("REPONSE INTERPRETEE : " + co)
q = [x for x in party.personnages if x.couleur == co][0]
p.position, q.position = q.position, p.position
informer("NOUVEAU PLACEMENT : " + str(p))
informer("NOUVEAU PLACEMENT : " + str(q))
if p.couleur == "marron":
return [q for q in party.personnages if p.position == q.position]
if p.couleur == "gris":
w = demander("Quelle salle obscurcir ? (0-9)", self)
party.shadow = int(w) if w.isnumeric() and int(w) in range(10) else 0
informer("REPONSE INTERPRETEE : " + str(party.shadow))
if p.couleur == "bleu":
w = demander("Quelle salle bloquer ? (0-9)", self)
x = int(w) if w.isnumeric() and int(w) in range(10) else 0
w = demander("Quelle sortie ? Chosir parmi : " + str(passages[x]), self)
y = int(w) if w.isnumeric() and int(w) in passages[x] else passages[x].copy().pop()
informer("REPONSE INTERPRETEE : " + str({x, y}))
party.bloque = {x, y}
return [p]
def bouger(self, p, avec, bloque):
pass_act = pass_ext if p.couleur == 'rose' else passages
if p.couleur != 'violet' or p.pouvoir:
disp = {x for x in pass_act[p.position] if p.position not in bloque or x not in bloque}
w = demander("positions disponibles : " + str(disp) + ", choisir la valeur", self)
x = int(w) if w.isnumeric() and int(w) in disp else disp.pop()
informer("REPONSE INTERPRETEE : " + str(x))
for q in avec:
q.position = x
informer("NOUVEAU PLACEMENT : " + str(q))
class partie:
def __init__(self, joueurs):
for i in [0, 1]:
f = open("./" + str(i) + "/infos.txt", "w")
f.close()
f = open("./" + str(i) + "/questions.txt", "w")
f.close()
f = open("./" + str(i) + "/reponses.txt", "w")
f.close()
self.joueurs = joueurs
self.start, self.end, self.num_tour, self.shadow, x = 4, 22, 1, randrange(10), randrange(10)
self.bloque = {x, passages[x].copy().pop()}
self.personnages = {personnage(c) for c in couleurs}
self.tuiles = [p for p in self.personnages]
self.cartes = self.tuiles[:]
self.fantome = self.cartes[randrange(8)]
message("!!! Le fantôme est : " + self.fantome.couleur, [self.joueurs[1]])
self.cartes.remove(self.fantome)
self.cartes += ['fantome'] * 3
shuffle(self.tuiles)
shuffle(self.cartes)
for i, p in enumerate(self.tuiles):
p.position = i
def actions(self):
joueur_actif = self.num_tour % 2
if joueur_actif == 1:
shuffle(self.tuiles)
self.tuiles_actives = self.tuiles[:4]
else:
self.tuiles_actives = self.tuiles[4:]
for i in [joueur_actif, 1 - joueur_actif, 1 - joueur_actif, joueur_actif]:
self.joueurs[i].jouer(self)
def lumiere(self):
partition = [{p for p in self.personnages if p.position == i} for i in range(10)]
if len(partition[self.fantome.position]) == 1 or self.fantome.position == self.shadow:
informer("le fantome frappe")
self.start += 1
for piece, gens in enumerate(partition):
if len(gens) > 1 and piece != self.shadow:
for p in gens:
p.suspect = False
else:
informer("pas de cri")
for piece, gens in enumerate(partition):
if len(gens) == 1 or piece == self.shadow:
for p in gens:
p.suspect = False
self.start += len([p for p in self.personnages if p.suspect])
def tour(self):
informer("**************************\n" + str(self))
self.actions()
self.lumiere()
for p in self.personnages:
p.pouvoir = True
self.num_tour += 1
def lancer(self):
while self.start < self.end and len([p for p in self.personnages if p.suspect]) > 1:
self.tour()
informer(
"L'enquêteur a trouvé - c'était " + str(self.fantome) if self.start < self.end else "Le fantôme a gagné")
informer("Score final : " + str(self.end - self.start))
def __repr__(self):
return "Tour:" + str(self.num_tour) + ", Score:" + str(self.start) + "/" + str(self.end) + ", Ombre:" + str(
self.shadow) + ", Bloque:" + str(self.bloque) + "\n" + " ".join([str(p) for p in self.personnages])
fantome_qtable = []
inspector_qtable = []
NB_GAMES = 2
joueurs = [joueur(0), joueur(1)]
for i in range(0, NB_GAMES):
print('*' * 10)
Thread(target=lambda: inspector.lancer(inspector_qtable)).start()
Thread(target=lambda: fantome.lancer(fantome_qtable)).start()
partie(joueurs).lancer()
|
the-stack_106_13344
|
#!/usr/bin/env python
import os
import sys
import argparse
import binascii
number_of_columns = 16
copyright = """/***************************************************************************//**
* \\file {}
*
* \\brief
* Cortex-M0+ prebuilt application image.
*
********************************************************************************
* \\copyright
* Copyright (c) 2018-2019 Cypress Semiconductor Corporation
* SPDX-License-Identifier: LicenseRef-PBL
*
* Licensed under the Permissive Binary License
*******************************************************************************/
"""
header = """
#if defined(__APPLE__) && defined(__clang__)
__attribute__ ((__section__("__CY_M0P_IMAGE,__cy_m0p_image"), used))
#elif defined(__GNUC__) || defined(__ARMCC_VERSION)
__attribute__ ((__section__(".cy_m0p_image"), used))
#elif defined(__ICCARM__)
#pragma location=".cy_m0p_image"
#else
#error "An unsupported toolchain"
#endif
const uint8_t cy_m0p_image[] = {
"""
c_list = []
# Get component name from the command line
parser = argparse.ArgumentParser()
parser.add_argument("bin_path", help="Specify path the input binary file to be converted to a C array.")
parser.add_argument("c_path", help="Specify path the output C file with the C array.")
parser.add_argument("c_macro", help="Specify the C preprocessor macro to wrap the variable.")
args = parser.parse_args()
bin_path = args.bin_path
c_path = args.c_path
c_macro = args.c_macro
c_file=os.path.basename(c_path)
f = open(bin_path, "rb")
data = list(f.read())
with open(c_path, "w") as c_fd:
# Clear content of the file
c_fd.seek(0)
c_fd.truncate()
# Write copyright
c_fd.write(copyright.format(c_file))
# Include headers
c_fd.write("#include <stdint.h>\n")
c_fd.write("#include \"cy_device_headers.h\"\n")
c_fd.write("\n")
# Open conditional block
if c_macro:
c_fd.write("#if defined(" + c_macro + ")\n")
# Write template
c_fd.write(header)
# Generate list of the data bytes
for n in data:
c_list.append(format(n, '#04x'))
for i in range(int(len(c_list) / number_of_columns) + 1):
line_list = c_list[i * number_of_columns: (i + 1) * number_of_columns]
c_fd.write(" ")
for item in line_list:
c_fd.write(" %su," % item)
c_fd.write("\n")
c_fd.write("};\n")
# Close conditional block
if c_macro:
c_fd.write("#endif /* defined(" + c_macro + ") */")
c_fd.write("\n")
f.close()
|
the-stack_106_13345
|
"""Subset rows using column values
See source https://github.com/tidyverse/dplyr/blob/master/R/filter.R
"""
from typing import Iterable
import numpy
from pandas import DataFrame, RangeIndex
from pipda import register_verb
from pipda.expression import Expression
from pipda.utils import CallingEnvs
from ..core.contexts import Context
from ..core.grouped import DataFrameGroupBy, DataFrameRowwise
from ..core.types import is_scalar, is_null, BoolOrIter
from ..core.utils import copy_attrs, reconstruct_tibble, Array
from .group_data import group_data, group_vars
@register_verb(DataFrame, context=Context.EVAL)
def filter(
_data: DataFrame,
*conditions: Iterable[bool],
_preserve: bool = False,
_drop_index: bool = None,
) -> DataFrame:
"""Subset a data frame, retaining all rows that satisfy your conditions
Args:
*conditions: Expressions that return logical values
_preserve: Relevant when the .data input is grouped.
If _preserve = FALSE (the default), the grouping structure
is recalculated based on the resulting data, otherwise
the grouping is kept as is.
_drop_index: Whether drop the index or not.
When it is None and the index of _data is a RangeIndex, then
the index is dropped.
Returns:
The subset dataframe
"""
if _data.shape[0] == 0 or not conditions:
return _data
condition = None
for cond in conditions:
cond = _sanitize_condition(cond, _data.shape[0])
condition = (
cond if condition is None else numpy.logical_and(condition, cond)
)
out = _data.loc[condition, :]
if _drop_index is None:
_drop_index = isinstance(_data.index, RangeIndex)
if _drop_index:
out = out.reset_index(drop=True)
copy_attrs(out, _data)
return out
@filter.register(DataFrameGroupBy, context=Context.PENDING)
def _(
_data: DataFrameGroupBy,
*conditions: Expression,
_preserve: bool = False,
_drop_index: bool = None, # TODO?
) -> DataFrameGroupBy:
"""Filter on DataFrameGroupBy object"""
if _data.shape[0] > 0:
out = _data._datar_apply(
filter,
*conditions,
_drop_index=False,
__calling_env=CallingEnvs.REGULAR,
).sort_index()
else:
out = _data.copy()
out = reconstruct_tibble(_data, out)
gdata = _filter_groups(out, _data)
if not _preserve and _data.attrs["_group_drop"]:
out.attrs["_group_data"] = gdata[
gdata["_rows"].map(len) > 0
].reset_index(drop=True)
return out
@filter.register(DataFrameRowwise, context=Context.EVAL)
def _(
_data: DataFrameRowwise,
*conditions: Expression,
_preserve: bool = False,
_drop_index: bool = None,
) -> DataFrameGroupBy:
"""Filter on DataFrameGroupBy object"""
out = filter.dispatch(DataFrame)(
_data, *conditions, _preserve=_preserve, _drop_index=_drop_index
)
return reconstruct_tibble(_data, out, keep_rowwise=True)
def _filter_groups(
new: DataFrameGroupBy,
old: DataFrameGroupBy,
sort_rows: bool = True,
) -> DataFrame:
"""Filter non-existing rows in groupdata"""
gdata = (
group_data(new, __calling_env=CallingEnvs.REGULAR)
.set_index(group_vars(new, __calling_env=CallingEnvs.REGULAR))["_rows"]
.to_dict()
)
new_gdata = group_data(old, __calling_env=CallingEnvs.REGULAR).copy()
for row in new_gdata.iterrows():
ser = row[1]
key = tuple(ser[:-1])
if len(key) == 1:
key = key[0]
ser[-1] = gdata.get(key, [])
new_gdata.loc[row[0], :] = ser
if sort_rows:
# GH69
# The order changes when top row number filtered
new_gdata = new_gdata.sort_values(
["_rows"],
# keep empty rows at last
key=lambda rowss: Array(
[new.shape[0] if len(rows) == 0 else rows[0] for rows in rowss],
dtype=int,
),
).reset_index(drop=True)
new.attrs["_group_data"] = new_gdata
return new_gdata
def _sanitize_condition(cond: BoolOrIter, length: int) -> numpy.ndarray:
"""Handle single condition"""
if is_scalar(cond):
out = Array([cond] * length)
elif isinstance(cond, numpy.ndarray):
out = cond
else:
out = Array(cond)
out[is_null(out)] = False
return out.astype(bool)
|
the-stack_106_13347
|
# Copyright 2006-2017,2020 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
#
# This module is for reading and writing FASTA format files as SeqRecord
# objects. The code is partly inspired by earlier Biopython modules,
# Bio.Fasta.* and the now deprecated Bio.SeqIO.FASTA
"""Bio.SeqIO support for the "fasta" (aka FastA or Pearson) file format.
You are expected to use this module via the Bio.SeqIO functions.
"""
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from .Interfaces import SequenceIterator, SequenceWriter
from .Interfaces import _clean, _get_seq_string
def SimpleFastaParser(handle):
"""Iterate over Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
For each record a tuple of two strings is returned, the FASTA title
line (without the leading '>' character), and the sequence (with any
whitespace removed). The title line is not divided up into an
identifier (the first word) and comment or description.
>>> with open("Fasta/dups.fasta") as handle:
... for values in SimpleFastaParser(handle):
... print(values)
...
('alpha', 'ACGTA')
('beta', 'CGTC')
('gamma', 'CCGCC')
('alpha (again - this is a duplicate entry to test the indexing code)', 'ACGTA')
('delta', 'CGCGC')
"""
# Skip any text before the first record (e.g. blank lines, comments)
for line in handle:
if line[0] == ">":
title = line[1:].rstrip()
break
else:
# no break encountered - probably an empty file
return
# Main logic
# Note, remove trailing whitespace, and any internal spaces
# (and any embedded \r which are possible in mangled files
# when not opened in universal read lines mode)
lines = []
for line in handle:
if line[0] == ">":
yield title, "".join(lines).replace(" ", "").replace("\r", "")
lines = []
title = line[1:].rstrip()
continue
lines.append(line.rstrip())
yield title, "".join(lines).replace(" ", "").replace("\r", "")
def FastaTwoLineParser(handle):
"""Iterate over no-wrapping Fasta records as string tuples.
Arguments:
- handle - input stream opened in text mode
Functionally the same as SimpleFastaParser but with a strict
interpretation of the FASTA format as exactly two lines per
record, the greater-than-sign identifier with description,
and the sequence with no line wrapping.
Any line wrapping will raise an exception, as will excess blank
lines (other than the special case of a zero-length sequence
as the second line of a record).
Examples
--------
This file uses two lines per FASTA record:
>>> with open("Fasta/aster_no_wrap.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
gi|3298468|dbj|BAA31520.1| SAMIPF = GGH...
This equivalent file uses line wrapping:
>>> with open("Fasta/aster.pro") as handle:
... for title, seq in FastaTwoLineParser(handle):
... print("%s = %s..." % (title, seq[:3]))
...
Traceback (most recent call last):
...
ValueError: Expected FASTA record starting with '>' character. Perhaps this file is using FASTA line wrapping? Got: 'MTFGLVYTVYATAIDPKKGSLGTIAPIAIGFIVGANI'
"""
idx = -1 # for empty file
for idx, line in enumerate(handle):
if idx % 2 == 0: # title line
if line[0] != ">":
raise ValueError(
"Expected FASTA record starting with '>' character. "
"Perhaps this file is using FASTA line wrapping? "
f"Got: '{line}'"
)
title = line[1:].rstrip()
else: # sequence line
if line[0] == ">":
raise ValueError(
"Two '>' FASTA lines in a row. Missing sequence line "
"if this is strict two-line-per-record FASTA format. "
f"Have '>{title}' and '{line}'"
)
yield title, line.strip()
if idx == -1:
pass # empty file
elif idx % 2 == 0: # on a title line
raise ValueError(
"Missing sequence line at end of file if this is strict "
f"two-line-per-record FASTA format. Have title line '{line}'"
)
else:
assert line[0] != ">", "line[0] == '>' ; this should be impossible!"
class FastaIterator(SequenceIterator):
"""Parser for Fasta files."""
def __init__(self, source, alphabet=None, title2ids=None):
"""Iterate over Fasta records as SeqRecord objects.
Arguments:
- source - input stream opened in text mode, or a path to a file
- alphabet - optional alphabet, not used. Leave as None.
- title2ids - A function that, when given the title of the FASTA
file (without the beginning >), will return the id, name and
description (in that order) for the record as a tuple of strings.
If this is not given, then the entire title line will be used
as the description, and the first word as the id and name.
By default this will act like calling Bio.SeqIO.parse(handle, "fasta")
with no custom handling of the title lines:
>>> with open("Fasta/dups.fasta") as handle:
... for record in FastaIterator(handle):
... print(record.id)
...
alpha
beta
gamma
alpha
delta
However, you can supply a title2ids function to alter this:
>>> def take_upper(title):
... return title.split(None, 1)[0].upper(), "", title
>>> with open("Fasta/dups.fasta") as handle:
... for record in FastaIterator(handle, title2ids=take_upper):
... print(record.id)
...
ALPHA
BETA
GAMMA
ALPHA
DELTA
"""
if alphabet is not None:
raise ValueError("The alphabet argument is no longer supported")
self.title2ids = title2ids
super().__init__(source, mode="t", fmt="Fasta")
def parse(self, handle):
"""Start parsing the file, and return a SeqRecord generator."""
records = self.iterate(handle)
return records
def iterate(self, handle):
"""Parse the file and generate SeqRecord objects."""
title2ids = self.title2ids
if title2ids:
for title, sequence in SimpleFastaParser(handle):
id, name, descr = title2ids(title)
yield SeqRecord(Seq(sequence), id=id, name=name, description=descr)
else:
for title, sequence in SimpleFastaParser(handle):
try:
first_word = title.split(None, 1)[0]
except IndexError:
assert not title, repr(title)
# Should we use SeqRecord default for no ID?
first_word = ""
yield SeqRecord(
Seq(sequence), id=first_word, name=first_word, description=title,
)
class FastaTwoLineIterator(SequenceIterator):
"""Parser for Fasta files with exactly two lines per record."""
def __init__(self, source):
"""Iterate over two-line Fasta records (as SeqRecord objects).
Arguments:
- source - input stream opened in text mode, or a path to a file
This uses a strict interpretation of the FASTA as requiring
exactly two lines per record (no line wrapping).
Only the default title to ID/name/description parsing offered
by the relaxed FASTA parser is offered.
"""
super().__init__(source, mode="t", fmt="FASTA")
def parse(self, handle):
"""Start parsing the file, and return a SeqRecord generator."""
records = self.iterate(handle)
return records
def iterate(self, handle):
"""Parse the file and generate SeqRecord objects."""
for title, sequence in FastaTwoLineParser(handle):
try:
first_word = title.split(None, 1)[0]
except IndexError:
assert not title, repr(title)
# Should we use SeqRecord default for no ID?
first_word = ""
yield SeqRecord(
Seq(sequence), id=first_word, name=first_word, description=title,
)
class FastaWriter(SequenceWriter):
"""Class to write Fasta format files (OBSOLETE).
Please use the ``as_fasta`` function instead, or the top level
``Bio.SeqIO.write()`` function instead using ``format="fasta"``.
"""
def __init__(self, target, wrap=60, record2title=None):
"""Create a Fasta writer (OBSOLETE).
Arguments:
- target - Output stream opened in text mode, or a path to a file.
- wrap - Optional line length used to wrap sequence lines.
Defaults to wrapping the sequence at 60 characters
Use zero (or None) for no wrapping, giving a single
long line for the sequence.
- record2title - Optional function to return the text to be
used for the title line of each record. By default
a combination of the record.id and record.description
is used. If the record.description starts with the
record.id, then just the record.description is used.
You can either use::
handle = open(filename, "w")
writer = FastaWriter(handle)
writer.write_file(myRecords)
handle.close()
Or, follow the sequential file writer system, for example::
handle = open(filename, "w")
writer = FastaWriter(handle)
writer.write_header() # does nothing for Fasta files
...
Multiple writer.write_record() and/or writer.write_records() calls
...
writer.write_footer() # does nothing for Fasta files
handle.close()
"""
super().__init__(target)
if wrap:
if wrap < 1:
raise ValueError
self.wrap = wrap
self.record2title = record2title
def write_record(self, record):
"""Write a single Fasta record to the file."""
if self.record2title:
title = self.clean(self.record2title(record))
else:
id = self.clean(record.id)
description = self.clean(record.description)
if description and description.split(None, 1)[0] == id:
# The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
assert "\n" not in title
assert "\r" not in title
self.handle.write(">%s\n" % title)
data = self._get_seq_string(record) # Catches sequence being None
assert "\n" not in data
assert "\r" not in data
if self.wrap:
for i in range(0, len(data), self.wrap):
self.handle.write(data[i : i + self.wrap] + "\n")
else:
self.handle.write(data + "\n")
class FastaTwoLineWriter(FastaWriter):
"""Class to write 2-line per record Fasta format files (OBSOLETE).
This means we write the sequence information without line
wrapping, and will always write a blank line for an empty
sequence.
Please use the ``as_fasta_2line`` function instead, or the top level
``Bio.SeqIO.write()`` function instead using ``format="fasta"``.
"""
def __init__(self, handle, record2title=None):
"""Create a 2-line per record Fasta writer (OBSOLETE).
Arguments:
- handle - Handle to an output file, e.g. as returned
by open(filename, "w")
- record2title - Optional function to return the text to be
used for the title line of each record. By default
a combination of the record.id and record.description
is used. If the record.description starts with the
record.id, then just the record.description is used.
You can either use::
handle = open(filename, "w")
writer = FastaWriter(handle)
writer.write_file(myRecords)
handle.close()
Or, follow the sequential file writer system, for example::
handle = open(filename, "w")
writer = FastaWriter(handle)
writer.write_header() # does nothing for Fasta files
...
Multiple writer.write_record() and/or writer.write_records() calls
...
writer.write_footer() # does nothing for Fasta files
handle.close()
"""
super().__init__(handle, wrap=None, record2title=record2title)
def as_fasta(record):
"""Turn a SeqRecord into a FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta")
method and by the SeqIO.write(..., ..., "fasta") function.
"""
id = _clean(record.id)
description = _clean(record.description)
if description and description.split(None, 1)[0] == id:
# The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
assert "\n" not in title
assert "\r" not in title
lines = [">%s\n" % title]
data = _get_seq_string(record) # Catches sequence being None
assert "\n" not in data
assert "\r" not in data
for i in range(0, len(data), 60):
lines.append(data[i : i + 60] + "\n")
return "".join(lines)
def as_fasta_2line(record):
"""Turn a SeqRecord into a two-line FASTA formatted string.
This is used internally by the SeqRecord's .format("fasta-2line")
method and by the SeqIO.write(..., ..., "fasta-2line") function.
"""
id = _clean(record.id)
description = _clean(record.description)
if description and description.split(None, 1)[0] == id:
# The description includes the id at the start
title = description
elif description:
title = "%s %s" % (id, description)
else:
title = id
assert "\n" not in title
assert "\r" not in title
data = _get_seq_string(record) # Catches sequence being None
assert "\n" not in data
assert "\r" not in data
return ">%s\n%s\n" % (title, data)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest(verbose=0)
|
the-stack_106_13350
|
""" Common utility code """
import configparser
import os.path
COLOR_CERULEAN = "#377eb8" # outliers
COLOR_ORANGE = "#ff7f00" # inliers
COLOR_GREEN = "#b6d7a8" # inliers
COLOR_RED = "#ff0000" # outliers
# Constantes de indices del archivo de promedio de demoras "avg_yyyy.csv"
AVG_FILE_AEP_CODE_IDX = 0
AVG_FILE_FL_DATE_IDX = 1
AVG_FILE_AVG_DELAY_IDX = 2
AVG_FILE_NBR_FLIGHTS_IDX = 3
ENV_CFG_SECTION = "EnvironmentSection"
ENV_CFG_PROP_RUN_MODE = "env.run_mode"
ENV_CFG_PROP_S3_BUCKET = "env.s3_data_bucket"
ENV_CFG_PROP_DEF_REGION = "env.default_region"
ENV_CFG_VAL_RUN_MODE_CLOUD = "cloud"
ENV_CFG_VAL_RUN_MODE_STAND_ALONE = "stand-alone"
FILE_CFG_SECTION = "FileSection"
FILE_CFG_PROP_DATA_DIR = "file.data_dir"
DATA_CFG_SECTION = "DataEngineeringSection"
DATA_CFG_PROP_ANOMALY_ALGORITHM = "data.anomaly_algorithm"
DATA_CFG_PROP_RANDOM_SEED = "data.random_seed"
DATA_CFG_PROP_CONTAMINATION = "data.contamination"
CONFIG_FILE_PATH = "conf/config.ini"
CREDENTIALS_FILE_PATH = "conf/credentials"
CRED_FILE_DEFAULT_SECTION = "default"
CRED_FILE_PROP_ACCESS_KEY_ID = "aws_access_key_id"
CRED_FILE_PROP_SECRET_ACCESS_KEY = "aws_secret_access_key"
CRED_FILE_PROP_SESSION_TOKEN = "aws_session_token"
def get_data_dir(cfg):
"""Get data directory"""
data_dir = cfg.get(FILE_CFG_SECTION, FILE_CFG_PROP_DATA_DIR)
return data_dir
def load_credentials():
"""Load credentials file"""
return load_properties_file(CREDENTIALS_FILE_PATH)
def load_config():
"""Load configuration file"""
return load_properties_file(CONFIG_FILE_PATH)
def load_properties_file(file_path):
"""Load properties file"""
print(f"### Loading {file_path} file ###")
cfg_file_name = file_path
print(f"File name: {cfg_file_name}")
file_found = os.path.isfile(cfg_file_name)
print(f"File found: {file_found}")
if file_found:
config = configparser.RawConfigParser()
config.read(cfg_file_name)
print(f"Using file {cfg_file_name}")
return config
else:
raise Exception(f"Could not find file {cfg_file_name}")
|
the-stack_106_13352
|
from string import ascii_letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def diag_corr(df, title):
''' Diagonal correlation plot from
'''
sns.set(style="white")
# Compute the correlation matrix
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
ax = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
ax.set_title(title)
sns.set(style="white", context="talk")
def bars(x, y, ylabel, title):
''' Plot a niice barplot
'''
f, ax = plt.subplots(figsize=(10, 5))
sns.barplot(x=x, y=y, palette="rocket", ax=ax)
ax.axhline(0, color="k", clip_on=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
# Finalize the plot
sns.despine(bottom=True)
# plt.setp(f.axes, yticks=[])
plt.tight_layout(h_pad=2)
ax.set_xticklabels(ax.get_xticklabels(), rotation = 90)
plt.show()
def lines(x, y, hue, title):
''' Line plot with custom line widths
'''
sns.set(style="white", context="talk")
f, ax = plt.subplots(figsize=(10, 6))
ax = sns.pointplot(x=x,
y=y,
hue=hue,
scale=.6,
errwidth=0.6)
sns.despine(bottom=True)
plt.title(title)
ax.legend(bbox_to_anchor=(1.15, 1.05))
plt.show()
def joint(x, y, title):
sns.set(style="white", context="talk")
f, ax = plt.subplots(figsize=(10, 6))
ax = sns.scatterplot(x=x,
y=y)
sns.despine(bottom=True)
plt.title(title)
|
the-stack_106_13353
|
from flushed import log
import cv2
import time
def wait_for_format(fourcc, width, height, fps, port=0):
while True:
log(f'camera> connecting to port {port}: {fourcc} {width} x {height} @ {fps}fps')
camera = cv2.VideoCapture(port)
camera.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc))
camera.set(cv2.CAP_PROP_FRAME_WIDTH, width)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
camera.set(cv2.CAP_PROP_FPS, fps)
camera.set(cv2.CAP_PROP_AUTOFOCUS, 0)
camera.set(cv2.CAP_PROP_FOCUS, 0)
if not camera.isOpened():
log('camera> port not opening')
else:
log('camera> port open')
status, img = camera.read()
if status:
current_width = camera.get(3)
current_height = camera.get(4)
log(f'camera> reading at {current_width} x {current_height}')
if current_width >= width and current_height >= height:
log('camera> resolution met')
return camera
else:
log('camera> resolution not met')
else:
log('camera> not reading')
log('camera> releasing camera')
camera.release()
log('camera> wait_for_format sleeping')
time.sleep(10)
if __name__ == '__main__':
print('waiting for 4k')
camera = wait_for_format('MJPG', 3840, 2160, 5)
print('got 4k')
|
the-stack_106_13354
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class ActorCriticNetowrk(nn.Module):
def __init__(self, alpha, input_dims, fc1_dims, fc2_dims, n_actions, device):
super(ActorCriticNetowrk, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.actor_policy_net = nn.Linear(self.fc2_dims, n_actions)
self.critic_value_net = nn.Linear(self.fc2_dims, 1)
self.optimizer = optim.Adam(self.parameters(), lr=alpha)
self.device = torch.device(device if torch.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, observation):
state = torch.Tensor(observation).to(self.device)
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
policy = self.actor_policy_net(x)
value = self.critic_value_net(x)
return policy, value
class ACAgent():
def __init__(self, alpha, input_dims, gamma=0.999,
layer1_size=600, layer2_size=600, n_actions=8):
self.gamma = gamma
self.actor_critic = ActorCriticNetowrk(alpha, input_dims, layer1_size, layer2_size, n_actions, device='cuda:3')
self.log_probs = None
def choose_action(self, observation):
policy, _ = self.actor_critic.forward(observation)
policy = F.softmax(policy)
action_probs = torch.distributions.Categorical(policy)
action = action_probs.sample()
self.log_probs = action_probs.log_prob(action)
return action.item()
def learn(self, state, reward, state_, done):
self.actor_critic.optimizer.zero_grad()
_, critic_value = self.actor_critic.forward(state)
_, critic_value_ = self.actor_critic.forward(state_)
reward = torch.tensor(reward, dtype=torch.float).to(self.actor_critic.device)
delta = reward + self.gamma * critic_value_ * (1-int(done)) - critic_value
actor_loss = -self.log_probs * delta
critic_loss = delta**2
(actor_loss + critic_loss).backward()
self.actor_critic.optimizer.step()
from maze_env import Maze
import os
import time
import matplotlib.pyplot as plt
import pickle
np.random.seed(42)
if __name__ == '__main__':
env = Maze(height=21, width=21, detection_range=1, obstacle_occupancy_prob=0.5)
agent = ACAgent(alpha=0.00001, gamma=0.999, input_dims=[529], n_actions=8, layer1_size=2048, layer2_size=512)
score_history = []
avg_score_history = []
epsiodes = 5000000
max_steps = 23 * 23
start_time = str(time.time())
plt.figure(figsize=(10, 8))
for e in range(epsiodes):
score = 0
done = False
observation = env.reset()
# while not done:
for i in range(max_steps):
action = agent.choose_action(observation)
observation_, reward, done, success = env.step(action)
agent.learn(observation, reward, observation_, done)
observation = observation_
score += reward
if done:
break
score_history.append(score)
avg_score = np.mean(score_history[-100:])
avg_score_history.append(avg_score)
print('Epsidoe : ', e, ' | Score : %.2f' % score, ' | Average Score : %.2f' % avg_score)
if e == 0:
if os.path.exists('./' + start_time) == False:
print('Creating save directory')
os.mkdir('./' + start_time)
plt.plot([i for i in range(len(score_history))], score_history, 'bo-')
plt.plot([i for i in range(len(avg_score_history))], avg_score_history, 'r-', linewidth=4)
plt.title('RL Random Maze Training [Actor-Critic]')
plt.xlabel('Episodes')
plt.ylabel('Reward')
plt.tight_layout()
plt.savefig('./' + start_time + '/Training_Result.png')
plt.cla()
with open('./' + start_time + '/score_history.txt', 'wb') as reward_list_file:
pickle.dump(score_history, reward_list_file)
if success:
torch.save({'epoch' : e,
'ActorCritic_model_state_dict' : agent.actor_critic.state_dict(),
'optimizer' : agent.actor_critic.optimizer.state_dict(),
'reward' : score_history}, './' + start_time + '/Random Maze Agent.pth')
|
the-stack_106_13356
|
from discord.ext import commands
import util.math
import async_cse
import discord
import psutil
import arrow
class Useful(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.d = bot.d
self.google_client = async_cse.Search(bot.k.google)
self.db = bot.get_cog('Database')
@commands.group(name='help')
async def help(self, ctx):
if ctx.invoked_subcommand is None:
cmd = ctx.message.content.replace(f'{ctx.prefix}help ', '')
if cmd != '':
cmd_true = self.bot.get_command(cmd.lower())
if cmd_true is not None:
all_help = {
**ctx.l.help.econ,
**ctx.l.help.mc,
**ctx.l.help.util,
**ctx.l.help.fun,
**ctx.l.help.mod
}
help_text = all_help.get(str(cmd_true))
if help_text is None:
await self.bot.send(ctx, ctx.l.help.main.nodoc)
return
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.help.n.cmd, icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
embed.description = help_text.format(ctx.prefix)
if len(cmd_true.aliases) > 0:
embed.description += '\n\n' + ctx.l.help.main.aliases.format('`, `'.join(cmd_true.aliases))
await ctx.send(embed=embed)
return
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.help.n.title, icon_url=self.d.splash_logo)
embed.description = ctx.l.help.main.desc.format(self.d.support, self.d.topgg)
p = ctx.prefix
embed.add_field(name=(self.d.emojis.emerald_spinn + ctx.l.help.n.economy), value=f'`{p}help econ`')
embed.add_field(name=(self.d.emojis.bounce + ' ' + ctx.l.help.n.minecraft), value=f'`{p}help mc`')
embed.add_field(name=(self.d.emojis.anichest + ctx.l.help.n.utility), value=f'`{p}help util`')
embed.add_field(name=(self.d.emojis.rainbow_shep + ctx.l.help.n.fun), value=f'`{p}help fun`')
embed.add_field(name=(self.d.emojis.netherite_sword + ctx.l.help.n.admin), value=f'`{p}help admin`')
embed.add_field(name=(self.d.emojis.heart_spin + ctx.l.help.main.support), value=f'[**{ctx.l.help.main.clickme}**]({self.d.support})')
embed.set_footer(text=ctx.l.misc.petus)
await ctx.send(embed=embed)
@help.command(name='economy', aliases=['econ'])
async def help_economy(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{ctx.l.help.n.title} [{ctx.l.help.n.economy}]', icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = '`, `'.join(list(ctx.l.help.econ))
embed.description = f'`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}'
await ctx.send(embed=embed)
@help.command(name='minecraft', aliases=['mc'])
async def help_minecraft(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{ctx.l.help.n.title} [{ctx.l.help.n.minecraft}]', icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = '`, `'.join(list(ctx.l.help.mc))
embed.description = f'`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}'
await ctx.send(embed=embed)
@help.command(name='utility', aliases=['util', 'useful'])
async def help_utility(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{ctx.l.help.n.title} [{ctx.l.help.n.utility}]', icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = '`, `'.join(list(ctx.l.help.util))
embed.description = f'`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}'
await ctx.send(embed=embed)
@help.command(name='fun')
async def help_fun(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{ctx.l.help.n.title} [{ctx.l.help.n.fun}]', icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = '`, `'.join(list(ctx.l.help.fun))
embed.description = f'`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}'
await ctx.send(embed=embed)
@help.command(name='administrator', aliases=['mod', 'moderation', 'administrative', 'admin'])
async def help_administrative(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{ctx.l.help.n.title} [{ctx.l.help.n.admin}]', icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
commands_formatted = '`, `'.join(list(ctx.l.help.mod))
embed.description = f'`{commands_formatted}`\n\n{ctx.l.help.main.howto.format(ctx.prefix)}'
await ctx.send(embed=embed)
@commands.command(name='ping', aliases=['pong', 'ding', 'dong', 'shing', 'shling', 'schlong'])
async def ping_pong(self, ctx):
content = ctx.message.content.lower()
if 'ping' in content:
pp = 'Pong'
elif 'pong' in content:
pp = 'Ping'
elif 'ding' in content:
pp = 'Dong'
elif 'dong' in content:
pp = 'Ding'
elif 'shing' in content or 'shling' in content:
pp = 'Schlong'
elif 'schlong' in content:
await self.bot.send(ctx, f'{self.d.emojis.aniheart} Magnum Dong! \uFEFF `69.00 ms`')
return
await self.bot.send(ctx, f'{self.d.emojis.aniheart} {pp} \uFEFF `{round(self.bot.latency*1000, 2)} ms`')
@commands.command(name='vote', aliases=['votelink', 'votelinks'])
async def votelinks(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name='Vote for Villager Bot!', icon_url=self.d.splash_logo)
embed.description = f'**[{ctx.l.useful.vote.click_1}]({self.d.topgg + "/vote"})**'
await ctx.send(embed=embed)
@commands.command(name='links', aliases=['invite', 'support', 'usefullinks', 'website', 'source'])
async def useful_links(self, ctx):
embed = discord.Embed(color=self.d.cc)
embed.set_author(name='Useful Links', icon_url=self.d.splash_logo)
embed.description = f'**[{ctx.l.useful.links.support}]({self.d.support})\n' \
f'\n[{ctx.l.useful.links.invite}]({self.d.invite})\n' \
f'\n[{ctx.l.useful.links.topgg}]({self.d.topgg})\n' \
f'\n[{ctx.l.useful.links.website}]({self.d.website})\n' \
f'\n[{ctx.l.useful.links.source}]({self.d.github})**'
await ctx.send(embed=embed)
@commands.command(name='stats', aliases=['bs'])
async def stats(self, ctx):
await ctx.trigger_typing()
uptime_seconds = (arrow.utcnow() - self.d.start_time).total_seconds()
uptime = arrow.utcnow().shift(seconds=uptime_seconds).humanize(locale=ctx.l.lang, only_distance=True)
proc = psutil.Process()
with proc.oneshot():
mem_usage = proc.memory_full_info().uss
threads = proc.num_threads()
proc.cpu_percent(interval=.1)
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=ctx.l.useful.stats.stats, icon_url=self.d.splash_logo)
embed.set_footer(text=ctx.l.misc.petus)
col_1 = f'{ctx.l.useful.stats.servers}: `{len(self.bot.guilds)}`\n' \
f'{ctx.l.useful.stats.dms}: `{len(self.bot.private_channels)}/128`\n' \
f'{ctx.l.useful.stats.users}: `{len(self.bot.users)}`\n' \
f'{ctx.l.useful.stats.msgs}: `{self.d.msg_count}`\n' \
f'{ctx.l.useful.stats.cmds}: `{self.d.cmd_count}` `({round((self.d.cmd_count / (self.d.msg_count + .000001)) * 100, 2)}%)`\n' \
f'{ctx.l.useful.stats.cmds_sec}: `{round(self.d.cmd_count / uptime_seconds, 2)}`\n' \
f'{ctx.l.useful.stats.votes}: `{self.d.votes_topgg}`\n' \
f'{ctx.l.useful.stats.topgg}: `{round((self.d.votes_topgg / uptime_seconds) * 3600, 2)}`\n'
col_2 = f'{ctx.l.useful.stats.mem}: `{round(mem_usage / 1000000, 2)} MB`\n' \
f'{ctx.l.useful.stats.cpu}: `{round(proc.cpu_percent() / psutil.cpu_count(), 2)}%`\n' \
f'{ctx.l.useful.stats.threads}: `{threads}`\n' \
f'{ctx.l.useful.stats.ping}: `{round(self.bot.latency * 1000, 2)} ms`\n' \
f'{ctx.l.useful.stats.shards}: `{self.bot.shard_count}`\n' \
f'{ctx.l.useful.stats.uptime}: `{uptime}`\n'
col_2 += '\n' + ctx.l.useful.stats.more.format(self.d.statcord)
embed.add_field(name='\uFEFF', value=col_1+'\uFEFF')
embed.add_field(name='\uFEFF', value=col_2+'\uFEFF')
await ctx.send(embed=embed)
@commands.command(name='serverinfo', aliases=['server', 'guild'])
@commands.guild_only()
async def server_info(self, ctx, gid: int = None):
if gid is None:
guild = ctx.guild
else:
guild = self.bot.get_guild(gid)
db_guild = await self.db.fetch_guild(guild.id)
time = arrow.get(discord.utils.snowflake_time(guild.id))
time = time.format('MMM D, YYYY', locale=ctx.l.lang) + ', ' + time.humanize(locale=ctx.l.lang)
embed = discord.Embed(color=self.d.cc)
embed.set_author(name=f'{guild.name} {ctx.l.useful.ginf.info}', icon_url=guild.icon_url)
embed.description = f'{ctx.l.useful.ginf.age}: `{time}`'
general = f'{ctx.l.useful.ginf.owner}: {guild.owner.mention}\n' \
f'{ctx.l.useful.ginf.members}: `{guild.member_count}`\n' \
f'{ctx.l.useful.ginf.channels}: `{len(guild.channels)}`\n ' \
f'{ctx.l.useful.ginf.roles}: `{len(guild.roles)}`\n' \
f'{ctx.l.useful.ginf.emojis}: `{len(guild.emojis)}`\n' \
f'{ctx.l.useful.ginf.bans}: `{len(await guild.bans())}`\n'
villager = f'{ctx.l.useful.ginf.cmd_prefix}: `{self.d.prefix_cache.get(guild.id, self.d.default_prefix)}`\n' \
f'{ctx.l.useful.ginf.lang}: `{ctx.l.name}`\n' \
f'{ctx.l.useful.ginf.diff}: `{db_guild["difficulty"]}`\n'
embed.add_field(name='General', value=general, inline=True)
embed.add_field(name='Villager Bot', value=villager, inline=True)
embed.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=embed)
@commands.command(name='info', aliases=['i'])
@commands.is_owner()
@commands.cooldown(1, 2, commands.BucketType.user)
async def info(self, ctx, *, thing):
await ctx.send('execute')
type_ = None
try:
snowflake = int(thing)
type_ = 'id'
except Exception:
user = discord.utils.find((lambda u: u.name == thing), ctx.guild.members)
if user is None:
user = discord.utils.find((lambda u: u.name == thing), self.bot.users)
if user is not None:
type_ = 'user'
else:
guild = discord.utils.find((lambda g: g.name == thing), self.bot.guilds)
if guild is not None:
type_ = 'guild'
if type_ == 'id':
user = self.bot.get_user(snowflake)
if user is None:
try:
await ctx.send('api for user snowflake')
user = await self.bot.fetch_user(snowflake)
except Exception:
pass
if user is not None:
type_ = 'user'
else:
guild = self.bot.get_guild(snowflake)
if guild is not None:
type_ = 'guild'
if type_ == 'guild':
await self.server_info(ctx, guild.id)
elif type_ == 'user':
await ctx.send('user')
else:
await ctx.send(type_)
await ctx.send(snowflake)
@commands.command(name='math', aliases=['solve', 'meth'])
async def math(self, ctx, *, problem):
try:
await self.bot.send(ctx, f'```{util.math.parse(problem)}```')
except Exception:
await self.bot.send(ctx, ctx.l.useful.meth.oops)
@commands.command(name='google', aliases=['thegoogle'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def google_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
embed = discord.Embed(color=self.d.cc, title=res.title, description=res.description, url=res.url)
await ctx.send(embed=embed)
@commands.command(name='youtube', aliases=['ytsearch', 'yt'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def youtube_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
res = (*filter((lambda r: 'youtube.com/watch' in r.url), res),)
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
await ctx.send(res.url)
@commands.command(name='image', aliases=['imagesearch', 'img'])
@commands.cooldown(1, 2, commands.BucketType.user)
async def image_search(self, ctx, *, query):
safesearch = True
if isinstance(ctx.channel, discord.TextChannel):
safesearch = not ctx.channel.is_nsfw()
try:
with ctx.typing():
res = await self.google_client.search(query, safesearch=safesearch, image_search=True)
except async_cse.search.NoResults:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
except async_cse.search.APIError:
await self.bot.send(ctx, ctx.l.useful.search.error)
return
if len(res) == 0:
await self.bot.send(ctx, ctx.l.useful.search.nope)
return
res = res[0]
await ctx.send(res.image_url)
def setup(bot):
bot.add_cog(Useful(bot))
|
the-stack_106_13358
|
"""
Definition of urls for Django App.
"""
from django.conf.urls import url
import django.contrib.auth.views
from . import views
# Uncomment the next lines to enable the admin:
# from django.conf.urls import include
# from django.contrib import admin
# admin.autodiscover()
site = [
# Examples:
url(r'^$', views.home, name='research-home'),
url(r'^index$', views.home, name='research-home'),
url(r'^chart$', views.chart, name='research-chart'),
url(r'^barometric$', views.barometric, name='research-barometric'),
url(r'^hchart$', views.hchart, name='hchart'),
url(r'^hchart/$', views.hchart_para, name='hchart'),
url(regex='^bar$', view=views.BarView.as_view(), name='bar'),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
the-stack_106_13359
|
from operator import itemgetter
from urllib.parse import urlparse
from fman import \
DirectoryPaneCommand, NO, QuicksearchItem, YES, load_json, show_alert, \
show_prompt, show_quicksearch
from fman.url import splitscheme
from .filesystems import is_ftp
class OpenFtpLocation(DirectoryPaneCommand):
def __call__(self):
text, ok = show_prompt(
'Please enter the URL',
default='ftp[s]://[user[:password]@]ftp.host[:port][/path/to/dir]')
if text and ok:
self.pane.set_path(text)
return
class OpenFtpBookmark(DirectoryPaneCommand):
def __call__(self):
result = show_quicksearch(self._get_items)
if result and result[1]:
# Fetch bookmarks to connect to the default path
bookmarks = \
load_json('FTP Bookmarks.json', default={})
bookmark = bookmarks[result[1]]
url = urlparse(result[1])._replace(path=bookmark[1]).geturl()
self.pane.set_path(url)
def _get_items(self, query):
bookmarks = \
load_json('FTP Bookmarks.json', default={})
for item in sorted(bookmarks.keys()):
try:
index = item.lower().index(query)
except ValueError:
continue
else:
highlight = range(index, index + len(query))
yield QuicksearchItem(item, highlight=highlight)
class AddFtpBookmark(DirectoryPaneCommand):
def __call__(self):
url = self.pane.get_path()
if not is_ftp(url):
url = 'ftp[s]://user[:password]@other.host[:port]/some_dir'
url, ok = show_prompt(
'New FTP bookmark, please enter the URL', default=url)
if not (url and ok):
return
if not is_ftp(url):
show_alert(
'URL must include any of the following schemes: '
'ftp://, ftps://')
return
bookmarks = \
load_json('FTP Bookmarks.json', default={}, save_on_quit=True)
# XXX URL is split in `(base, path)` to allow setting a default path
u = urlparse(url)
base = alias = u._replace(path='').geturl()
path = u.path
if base in bookmarks:
# XXX if base URL points to an alias, resolve to an existing URL
base = bookmarks[base][0]
if path and path.strip('/'):
alias += '-'.join(path.split('/'))
alias, ok = show_prompt(
'Please enter an alias (will override aliases with the same name)',
default=alias)
if not (alias and ok):
return
if not is_ftp(alias):
# XXX alias must include the FTP scheme
scheme, _ = splitscheme(base)
alias = scheme + alias
if urlparse(alias).path:
show_alert('Aliases should not include path information')
return
bookmarks[alias] = (base, path)
class RemoveFtpBookmark(DirectoryPaneCommand):
def __call__(self):
result = show_quicksearch(self._get_items)
if result and result[1]:
choice = show_alert(
'Are you sure you want to delete "%s"' % (result[1],),
buttons=YES | NO,
default_button=NO
)
if choice == YES:
bookmarks = \
load_json('FTP Bookmarks.json', default={}, save_on_quit=True)
bookmarks.pop(result[1], None)
def _get_items(self, query):
bookmarks = \
load_json('FTP Bookmarks.json', default={})
for item in sorted(bookmarks.keys()):
try:
index = item.lower().index(query)
except ValueError:
continue
else:
highlight = range(index, index + len(query))
yield QuicksearchItem(item, highlight=highlight)
class OpenFtpHistory(DirectoryPaneCommand):
def __call__(self):
result = show_quicksearch(self._get_items)
if result and result[1]:
self.pane.set_path(result[1])
def _get_items(self, query):
bookmarks = \
load_json('FTP History.json', default={})
for item, _ in sorted(
bookmarks.items(), key=itemgetter(1), reverse=True):
try:
index = item.lower().index(query)
except ValueError:
continue
else:
highlight = range(index, index + len(query))
yield QuicksearchItem(item, highlight=highlight)
class RemoveFtpHistory(DirectoryPaneCommand):
def __call__(self):
choice = show_alert(
'Are you sure you want to delete the FTP connection history?',
buttons=YES | NO,
default_button=NO
)
if choice == YES:
history = \
load_json('FTP History.json', default={}, save_on_quit=True)
history.clear()
|
the-stack_106_13360
|
# Copyright 2019 Apex.AI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import unittest
import ament_index_python
import launch
import launch.actions
import launch_testing
import launch_testing.util
import pytest
@pytest.mark.launch_test
@launch_testing.parametrize('arg_param', ['thing=On', 'thing=Off', 'flag1'])
def generate_test_description(arg_param, ready_fn):
terminating_process = launch.actions.ExecuteProcess(
cmd=[
sys.executable,
os.path.join(
ament_index_python.get_package_prefix('launch_testing'),
'lib/launch_testing',
'terminating_proc',
),
# Use the parameter passed to generate_test_description as an argument
# to the terminating_proc
'--{}'.format(arg_param),
]
)
return (
launch.LaunchDescription([
terminating_process,
launch_testing.util.KeepAliveProc(),
launch.actions.OpaqueFunction(function=lambda context: ready_fn())
]),
{'dut_process': terminating_process}
)
class TestProcessOutput(unittest.TestCase):
# Note that 'arg_param' is automatically given to the test case, even though it was not
# part of the test context.
def test_process_outputs_expected_value(self, proc_output, arg_param):
proc_output.assertWaitFor('--' + arg_param, timeout=10, stream='stdout')
|
the-stack_106_13362
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import os
import sys
from pycompss.util.exceptions import PyCOMPSsException
def test_get_optional_module_warning():
from pycompss.util.warnings.modules import get_optional_module_warning
warning = get_optional_module_warning(
"UNITTEST_NAME", "UNITTEST_DESCRIPTION"
)
assert isinstance(
warning, str
), "Optional module warning does NOT return a string"
assert warning != "", "Optional module warning can not be empty"
assert (
"UNITTEST_NAME" in warning
), "Module name not in optional module warning"
assert (
"UNITTEST_DESCRIPTION" in warning
), "Module description not in optional module warning"
def test_show_optional_module_warning():
import pycompss.util.warnings.modules as warn
# Hack - Add non existing package
warn.OPTIONAL_MODULES["non_existing_package"] = "this is the description"
stdout_backup = sys.stdout
out_file = "warning.out"
fd = open(out_file, "w")
sys.stdout = fd
warn.show_optional_module_warnings()
# Cleanup
sys.stdout = stdout_backup
fd.close()
del warn.OPTIONAL_MODULES["non_existing_package"]
# Result check
if os.path.exists(out_file) and os.path.getsize(out_file) > 0:
# Non empty file exists - this is ok.
os.remove(out_file)
else:
raise PyCOMPSsException("The warning has not been shown")
|
the-stack_106_13363
|
import datetime
from google.cloud import bigtable
# configure the connection to bigtable
client = bigtable.Client(project='sensorray', admin=True)
instance = client.instance('instance')
table = instance.table('table')
# get the current time
timestamp=datetime.datetime.utcnow()
# read the data in from arduino
id = '0000'
data = {
'water':500,
'humidity':600,
'light':800,
}
# write the data
row_key = id.encode()
rows = []
for key, value in data.items():
row = table.row(row_key)
row.set_cell( 'sensor',
key,
int(value),
timestamp)
rows.append(row)
table.mutate_rows(rows)
|
the-stack_106_13365
|
import logging
import re
import requests
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
from anime_downloader import util
from subprocess import CalledProcessError
logger = logging.getLogger(__name__)
class Kwik(BaseExtractor):
'''Extracts video url from kwik pages, Kwik has some `security`
which allows to access kwik pages when only refered by something
and the kwik video stream when refered through the corresponding
kwik video page.
'''
def _get_data(self):
# Kwik servers don't have direct link access you need to be referred
# from somewhere, I will just use the url itself. We then
# have to rebuild the url. Hopefully kwik doesn't block this too
# Necessary
self.url = self.url.replace(".cx/e/", ".cx/f/")
self.headers.update({"referer": self.url})
cookies = util.get_hcaptcha_cookies(self.url)
if not cookies:
resp = util.bypass_hcaptcha(self.url)
else:
resp = requests.get(self.url, cookies=cookies)
title_re = re.compile(r'title>(.*)<')
kwik_text = resp.text
deobfuscated = None
loops = 0
while not deobfuscated and loops < 6:
try:
deobfuscated = helpers.soupify(util.deobfuscate_packed_js(re.search(r'<(script).*(var\s+_.*escape.*?)</\1>(?s)', kwik_text).group(2)))
except (AttributeError, CalledProcessError) as e:
if type(e) == AttributeError:
resp = util.bypass_hcaptcha(self.url)
kwik_text = resp.text
if type(e) == CalledProcessError:
resp = requests.get(self.url, cookies=cookies)
finally:
cookies = resp.cookies
title = title_re.search(kwik_text).group(1)
loops += 1
post_url = deobfuscated.form["action"]
token = deobfuscated.input["value"]
resp = helpers.post(post_url, headers=self.headers, params={"_token": token}, cookies=cookies, allow_redirects=False)
stream_url = resp.headers["Location"]
logger.debug('Stream URL: %s' % stream_url)
return {
'stream_url': stream_url,
'meta': {
'title': title,
'thumbnail': ''
},
'referer': None
}
|
the-stack_106_13366
|
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Example for Caprese's module for NMPC.
"""
import random
from idaes.apps.caprese.dynamic_block import DynamicBlock
from idaes.apps.caprese.controller import ControllerBlock
from idaes.apps.caprese.util import apply_noise_with_bounds
from idaes.apps.caprese.categorize import (
categorize_dae_variables_and_constraints,
VariableCategory,
ConstraintCategory,
)
VC = VariableCategory
CC = ConstraintCategory
import pyomo.environ as pyo
from pyomo.dae.flatten import flatten_dae_components
from pyomo.dae.initialization import solve_consistent_initial_conditions
from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
from pyomo.contrib.incidence_analysis.interface import IncidenceGraphInterface
from pyomo.core.expr.calculus.derivatives import reverse_ad
import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_model import make_model
import numpy as np
import scipy.sparse as sps
import pandas as pd
import matplotlib.pyplot as plt
__author__ = "Robert Parker"
# See if ipopt is available and set up solver
if pyo.SolverFactory('ipopt').available():
solver = pyo.SolverFactory('ipopt')
solver.options = {
'tol': 1e-6,
'bound_push': 1e-8,
'halt_on_ampl_error': 'yes',
'linear_solver': 'ma57',
}
else:
solver = None
class PlotData(object):
def __init__(self, group, location, name=None, t_switch=None):
# Would really like a PlotData class that is constructed based on an
# NMPCVar object that contains necessary setpoint/reference
# information, instead of having to access that in the NMPCVarGroup
time = group.index_set
if t_switch == None:
t_switch = group.t0
self.name = name
var = group.varlist[location]
initial = group.reference[location]
setpoint = group.setpoint[location]
self.data_series = pd.Series(
[var[t].value for t in time],
index=[t for t in time])
self.setpoint_series = pd.Series(
[initial if t < t_switch else setpoint for t in time])
def plot(self):
# fig, ax can be formatted to the user's liking
fig, ax = plt.subplots()
if self.name is not None:
self.data_series.plot(label=self.name)
else:
self.data_series.plot()
return fig, ax
def main(plot_switch=False):
# This tests the same model constructed in the test_nmpc_constructor_1 file
m_controller = make_model(horizon=3, ntfe=30, ntcp=2, bounds=True)
sample_time = 0.5
m_plant = make_model(horizon=sample_time, ntfe=5, ntcp=2)
time_plant = m_plant.fs.time
solve_consistent_initial_conditions(m_plant, time_plant, solver)
#####
# Flatten and categorize controller model
#####
model = m_controller
time = model.fs.time
t0 = time.first()
t1 = time[2]
scalar_vars, dae_vars = flatten_dae_components(
model,
time,
pyo.Var,
)
scalar_cons, dae_cons = flatten_dae_components(
model,
time,
pyo.Constraint,
)
inputs = [
model.fs.mixer.S_inlet.flow_vol,
model.fs.mixer.E_inlet.flow_vol,
]
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
model.fs.cstr.outlet.temperature,
]
model.fs.cstr.control_volume.material_holdup[:,'aq','Solvent'].fix()
model.fs.cstr.total_flow_balance.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
controller = ControllerBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
controller.construct()
solve_consistent_initial_conditions(m_controller, time, solver)
controller.initialize_to_initial_conditions()
m_controller._dummy_obj = pyo.Objective(expr=0)
nlp = PyomoNLP(m_controller)
igraph = IncidenceGraphInterface(nlp)
m_controller.del_component(m_controller._dummy_obj)
diff_vars = [var[t1] for var in var_partition[VC.DIFFERENTIAL]]
alg_vars = [var[t1] for var in var_partition[VC.ALGEBRAIC]]
deriv_vars = [var[t1] for var in var_partition[VC.DERIVATIVE]]
diff_eqns = [con[t1] for con in con_partition[CC.DIFFERENTIAL]]
alg_eqns = [con[t1] for con in con_partition[CC.ALGEBRAIC]]
# Assemble and factorize "derivative Jacobian"
dfdz = nlp.extract_submatrix_jacobian(diff_vars, diff_eqns)
dfdy = nlp.extract_submatrix_jacobian(alg_vars, diff_eqns)
dgdz = nlp.extract_submatrix_jacobian(diff_vars, alg_eqns)
dgdy = nlp.extract_submatrix_jacobian(alg_vars, alg_eqns)
dfdzdot = nlp.extract_submatrix_jacobian(deriv_vars, diff_eqns)
fact = sps.linalg.splu(dgdy.tocsc())
dydz = fact.solve(dgdz.toarray())
deriv_jac = dfdz - dfdy.dot(dydz)
fact = sps.linalg.splu(dfdzdot.tocsc())
dzdotdz = -fact.solve(deriv_jac)
# Use some heuristic on the eigenvalues of the derivative Jacobian
# to identify fast states.
w, V = np.linalg.eig(dzdotdz)
w_max = np.max(np.abs(w))
fast_modes, = np.where(np.abs(w) > w_max/2)
fast_states = []
for idx in fast_modes:
evec = V[:, idx]
_fast_states, _ = np.where(np.abs(evec) > 0.5)
fast_states.extend(_fast_states)
fast_states = set(fast_states)
# Store components necessary for model reduction in a model-
# independent form.
fast_state_derivs = [pyo.ComponentUID(
var_partition[VC.DERIVATIVE][idx].referent,
context=model)
for idx in fast_states
]
fast_state_diffs = [pyo.ComponentUID(
var_partition[VC.DIFFERENTIAL][idx].referent,
context=model)
for idx in fast_states
]
fast_state_discs = [pyo.ComponentUID(
con_partition[CC.DISCRETIZATION][idx].referent,
context=model)
for idx in fast_states
]
# Perform pseudo-steady state model reduction on the fast states
# and re-categorize
for cuid in fast_state_derivs:
var = cuid.find_component_on(m_controller)
var.fix(0.0)
for cuid in fast_state_diffs:
var = cuid.find_component_on(m_controller)
var[t0].unfix()
for cuid in fast_state_discs:
con = cuid.find_component_on(m_controller)
con.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
controller.del_component(model)
# Re-construct controller block with new categorization
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
]
controller = ControllerBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
controller.construct()
#####
# Construct dynamic block for plant
#####
model = m_plant
time = model.fs.time
t0 = time.first()
t1 = time[2]
scalar_vars, dae_vars = flatten_dae_components(
model,
time,
pyo.Var,
)
scalar_cons, dae_cons = flatten_dae_components(
model,
time,
pyo.Constraint,
)
inputs = [
model.fs.mixer.S_inlet.flow_vol,
model.fs.mixer.E_inlet.flow_vol,
]
measurements = [
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'C']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'E']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'S']),
pyo.Reference(model.fs.cstr.outlet.conc_mol[:, 'P']),
]
model.fs.cstr.control_volume.material_holdup[:,'aq','Solvent'].fix()
model.fs.cstr.total_flow_balance.deactivate()
var_partition, con_partition = categorize_dae_variables_and_constraints(
model,
dae_vars,
dae_cons,
time,
input_vars=inputs,
)
plant = DynamicBlock(
model=model,
time=time,
measurements=measurements,
category_dict={None: var_partition},
)
plant.construct()
p_t0 = plant.time.first()
c_t0 = controller.time.first()
p_ts = plant.sample_points[1]
c_ts = controller.sample_points[1]
controller.set_sample_time(sample_time)
plant.set_sample_time(sample_time)
# We now perform the "RTO" calculation: Find the optimal steady state
# to achieve the following setpoint
setpoint = [
(controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 0.4),
#(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.01),
(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 0.1),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 300),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 0.1),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 2.0),
(controller.mod.fs.cstr.volume[0], 1.0),
]
setpoint_weights = [
(controller.mod.fs.cstr.outlet.conc_mol[0, 'P'], 1.),
(controller.mod.fs.cstr.outlet.conc_mol[0, 'S'], 1.),
(controller.mod.fs.cstr.control_volume.energy_holdup[0, 'aq'], 1.),
(controller.mod.fs.mixer.E_inlet.flow_vol[0], 1.),
(controller.mod.fs.mixer.S_inlet.flow_vol[0], 1.),
(controller.mod.fs.cstr.volume[0], 1.),
]
# Some of the "differential variables" that have been fixed in the
# model file are different from the measurements listed above. We
# unfix them here so the RTO solve is not overconstrained.
# (The RTO solve will only automatically unfix inputs and measurements.)
controller.mod.fs.cstr.control_volume.material_holdup[0,...].unfix()
controller.mod.fs.cstr.control_volume.energy_holdup[0,...].unfix()
#controller.mod.fs.cstr.volume[0].unfix()
controller.mod.fs.cstr.control_volume.material_holdup[0,'aq','Solvent'].fix()
controller.add_single_time_optimization_objective(setpoint, setpoint_weights)
controller.solve_setpoint(solver)
# Now we are ready to construct the tracking NMPC problem
tracking_weights = [
*((v, 1.) for v in controller.vectors.differential[:,0]),
*((v, 1.) for v in controller.vectors.input[:,0]),
]
controller.add_tracking_objective(tracking_weights)
controller.constrain_control_inputs_piecewise_constant()
controller.initialize_to_initial_conditions()
# Solve the first control problem
controller.vectors.input[...].unfix()
controller.vectors.input[:,0].fix()
solver.solve(controller, tee=True)
# For a proper NMPC simulation, we must have noise.
# We do this by treating inputs and measurements as Gaussian random
# variables with the following variances (and bounds).
cstr = controller.mod.fs.cstr
variance = [
(cstr.outlet.conc_mol[0.0, 'S'], 0.01),
(cstr.outlet.conc_mol[0.0, 'E'], 0.005),
(cstr.outlet.conc_mol[0.0, 'C'], 0.01),
(cstr.outlet.conc_mol[0.0, 'P'], 0.005),
(cstr.outlet.temperature[0.0], 1.),
(cstr.volume[0.0], 0.05),
]
controller.set_variance(variance)
measurement_variance = [
v.variance for v in controller.MEASUREMENT_BLOCK[:].var
]
measurement_noise_bounds = [
(0.0, var[c_t0].ub) for var in controller.MEASUREMENT_BLOCK[:].var
]
mx = plant.mod.fs.mixer
variance = [
(mx.S_inlet_state[0.0].flow_vol, 0.02),
(mx.E_inlet_state[0.0].flow_vol, 0.001),
]
plant.set_variance(variance)
input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var]
input_noise_bounds = [
(0.0, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var
]
random.seed(100)
# Extract inputs from controller and inject them into plant
inputs = controller.generate_inputs_at_time(c_ts)
plant.inject_inputs(inputs)
# This "initialization" really simulates the plant with the new inputs.
plant.vectors.input[:, :].fix()
plant.initialize_by_solving_elements(solver)
plant.vectors.input[:, :].fix()
solver.solve(plant, tee=True)
for i in range(1,11):
print('\nENTERING NMPC LOOP ITERATION %s\n' % i)
measured = plant.generate_measurements_at_time(p_ts)
plant.advance_one_sample()
plant.initialize_to_initial_conditions()
measured = apply_noise_with_bounds(
measured,
measurement_variance,
random.gauss,
measurement_noise_bounds,
)
controller.advance_one_sample()
controller.load_measurements(measured)
solver.solve(controller, tee=True)
inputs = controller.generate_inputs_at_time(c_ts)
inputs = apply_noise_with_bounds(
inputs,
input_variance,
random.gauss,
input_noise_bounds,
)
plant.inject_inputs(inputs)
plant.initialize_by_solving_elements(solver)
solver.solve(plant)
import pdb; pdb.set_trace()
if __name__ == '__main__':
main()
|
the-stack_106_13367
|
import numpy as np
import itertools
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
from random import *
class Scenario(BaseScenario):
def __init__(self):
self.one_hot_array = []
self.colours = []
self.obstacle_count = 0
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 5
num_landmarks = 5
num_obstacles = 12
# generate one-hot encoding for unique hidden goals
self.one_hot_array = list(itertools.product([0, 1], repeat=num_landmarks))
# generate colours for goal identification
for _ in range(num_landmarks):
self.colours.append(np.random.uniform(-1, +1, 3))
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.10
agent.color = self.colours[i]
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.color = self.colours[i]
landmark.id = self.one_hot_array[2**i]
# add obstacles
world.obstacles = [Landmark() for i in range(num_obstacles)]
for i, obstacle in enumerate(world.obstacles):
obstacle.name = 'obstacle %d' % i
obstacle.collide = True
obstacle.movable = False
obstacle.size = 0.40
obstacle.boundary = False
obstacle.color = np.array([0.25, 0.25, 0.25])
# self.create_wall(world, obstacle, 10, -0.2, -1, -0.2, -0.2)
# make initial conditions
self.reset_world(world)
return world
def assign_goals(self, i, agent):
# assign each agent to a unique set of goals in one-hot encoding
agent.hidden_goals = self.one_hot_array[2**i]
def reset_world(self, world):
# properties for agents
for i, agent in enumerate(world.agents):
pass
# properties for landmarks
for i, agent in enumerate(world.agents):
pass
# properties for obstacles
for i, obstacle in enumerate(world.obstacles):
pass
# set initial states
starts = [[0.00, -0.70], [0.00, 0.70], [-0.70, 0.00], [0.70, 0.00],
[0.00, -0.60], [0.00, 0.60], [-0.60, 0.00], [0.60, 0.00]]
for i, agent in enumerate(world.agents):
r = randint(0,len(starts)-1)
agent.state.p_pos = np.array(starts[r])
starts.remove(starts[r])
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
self.assign_goals(i, agent)
starts = [[0.00, -0.70], [0.00, 0.70], [-0.70, 0.00], [0.70, 0.00],
[0.00, -0.60], [0.00, 0.60], [-0.60, 0.00], [0.60, 0.00]]
for i, landmark in enumerate(world.landmarks):
r = randint(0,len(starts)-1)
landmark.state.p_pos = np.array(starts[r])
starts.remove(starts[r])
landmark.state.p_vel = np.zeros(world.dim_p)
for i, obstacle in enumerate(world.obstacles):
if i > 3:
obstacle.size = 0.2
if i > 7:
obstacle.size = 0.1
positions = [[-0.50, -0.50], [-0.50, 0.50], [0.50, -0.50], [0.50, 0.50],
[-0.30, -0.30], [-0.30, 0.30], [0.30, -0.30], [0.30, 0.30],
[-0.20, -0.20], [-0.20, 0.20], [0.20, -0.20], [0.20, 0.20]]
obstacle.state.p_pos = np.array(positions[i])
obstacle.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
dists = []
for l in world.landmarks:
if l.id == agent.hidden_goals:
rew -= np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 7
collisions += 1
for o in world.obstacles:
if self.is_collision(o, agent):
rew -= 0
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each relevant landmark, penalized for collisions
rew = 0
dists = []
for l in world.landmarks:
if l.id == agent.hidden_goals:
rew -= np.sqrt(np.sum(np.square(agent.state.p_pos - l.state.p_pos)))
if self.is_collision(l, agent):
rew += 0
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 7
for o in world.obstacles:
if self.is_collision(o, agent):
rew -= 0
# agents are penalized for exiting the screen, so that they can converge faster
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
for entity in world.obstacles: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
for entity in world.obstacles: # world.entities:
entity_color.append(entity.color)
# communication of all other agents
comm = []
other_pos = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)
|
the-stack_106_13369
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import inspect
import types
import decorator
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions.internal import slicing
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import name_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import
__all__ = [
'Distribution',
]
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
'batch_shape',
'batch_shape_tensor',
'cdf',
'covariance',
'cross_entropy',
'entropy',
'event_shape',
'event_shape_tensor',
'kl_divergence',
'log_cdf',
'log_prob',
'log_survival_function',
'mean',
'mode',
'prob',
'sample',
'stddev',
'survival_function',
'variance',
]
_ALWAYS_COPY_PUBLIC_METHOD_WRAPPERS = ['kl_divergence', 'cross_entropy']
UNSET_VALUE = object()
JAX_MODE = False # Overwritten by rewrite script.
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(tf.Module):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError('fn is not callable: {}'.format(fn))
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the 'Args:' section."""
old_str = old_str or ''
old_str_lines = old_str.split('\n')
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = '\n'.join(' %s' % line for line in append_str.split('\n'))
# Step 1: Find mention of 'Args':
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == 'args:']
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ('\n'.join(old_str_lines[:final_args_ix])
+ '\n\n' + append_str + '\n\n'
+ '\n'.join(old_str_lines[final_args_ix:]))
else:
return old_str + '\n\n' + append_str
def _convert_to_tensor(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a (structure of) `Tensor`.
This function converts Python objects of various types to a (structure of)
`Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and
Python scalars. For example:
Args:
value: An object whose structure matches that of `dtype ` and/or
`dtype_hint` and for which each leaf has a registered `Tensor` conversion
function.
dtype: Optional (structure of) element type for the returned tensor. If
missing, the type is inferred from the type of `value`.
dtype_hint: Optional (structure of) element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a dtype in
mind when converting to a tensor, so dtype_hint can be used as a soft
preference. If the conversion to `dtype_hint` is not possible, this
argument has no effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
tensor: A (structure of) `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
if (tf.nest.is_nested(dtype) or
tf.nest.is_nested(dtype_hint)):
if dtype is None:
fn = lambda v, dh: tf.convert_to_tensor(v, dtype_hint=dh, name=name)
return nest.map_structure_up_to(dtype_hint, fn, value, dtype_hint,
# Allow list<->tuple conflation.
check_types=False)
elif dtype_hint is None:
fn = lambda v, d: tf.convert_to_tensor(v, dtype=d, name=name)
return nest.map_structure_up_to(dtype, fn, value, dtype,
check_types=False)
else:
fn = lambda v, d, dh: tf.convert_to_tensor( # pylint: disable=g-long-lambda
v, dtype=d, dtype_hint=dh, name=name)
return nest.map_structure_up_to(dtype, fn, value, dtype, dtype_hint,
check_types=False, expand_composites=True)
return tf.convert_to_tensor(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def _remove_dict_keys_with_value(dict_, val):
"""Removes `dict` keys which have have `self` as value."""
return {k: v for k, v in dict_.items() if v is not val}
class _DistributionMeta(abc.ABCMeta):
"""Helper metaclass for tfp.Distribution."""
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError('Expected non-empty baseclass. Does Distribution '
'not subclass _BaseDistribution?')
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0] if which_base else None
if base is None or base == _BaseDistribution:
# Nothing to be done for Distribution or unrelated subclass.
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError('First parent class declared for {} must be '
'Distribution, but saw "{}"'.format(
classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
if attr in attrs:
# The method is being overridden, do not update its docstring.
continue
special_attr = '_{}'.format(attr)
class_attr_value = attrs.get(attr, None)
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
'Internal error: expected base class "{}" to '
'implement method "{}"'.format(base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
class_special_attr_docstring = (
None if class_special_attr_value is None else
tf_inspect.getdoc(class_special_attr_value))
if (class_special_attr_docstring or
attr in _ALWAYS_COPY_PUBLIC_METHOD_WRAPPERS):
class_attr_value = _copy_fn(base_attr_value)
attrs[attr] = class_attr_value
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
'Expected base class fn to contain a docstring: {}.{}'.format(
base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
'Additional documentation from `{}`:\n\n{}'.format(
classname, class_special_attr_docstring))
# Now we'll intercept the default __init__ if it exists.
default_init = attrs.get('__init__', None)
if default_init is None:
# The class has no __init__ because its abstract. (And we won't add one.)
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
# pylint: disable=protected-access
# For a comparison of different methods for wrapping functions, see:
# https://hynek.me/articles/decorators/
@decorator.decorator
def wrapped_init(wrapped, self_, *args, **kwargs):
"""A 'master `__init__`' which is always called."""
# We can't use `wrapped` because it results in a self reference which
# confounds `tf.function`.
del wrapped
# Note: if we ever want to have things set in `self` before `__init__` is
# called, here is the place to do it.
self_._parameters = None
default_init(self_, *args, **kwargs)
# Note: if we ever want to override things set in `self` by subclass
# `__init__`, here is the place to do it.
if self_._parameters is None:
# We prefer subclasses will set `parameters = dict(locals())` because
# this has nearly zero overhead. However, failing to do this, we will
# resolve the input arguments dynamically and only when needed.
dummy_self = tuple()
self_._parameters = self_._no_dependency(lambda: ( # pylint: disable=g-long-lambda
_remove_dict_keys_with_value(
inspect.getcallargs(default_init, dummy_self, *args, **kwargs),
dummy_self)))
elif hasattr(self_._parameters, 'pop'):
self_._parameters = self_._no_dependency(
_remove_dict_keys_with_value(self_._parameters, self_))
# pylint: enable=protected-access
attrs['__init__'] = wrapped_init(default_init) # pylint: disable=no-value-for-parameter,assignment-from-no-return
return super(_DistributionMeta, mcs).__new__(
mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name='...'`. For example, to enable `log_prob(value,
name='log_prob')` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring('Some other details.')
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample(n)`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `tfd.FULLY_REPARAMETERIZED`, then samples from the distribution are
fully reparameterized, and straight-through gradients are supported.
If `tfd.NOT_REPARAMETERIZED`, then samples from the distribution are not
fully reparameterized, and straight-through gradients are either
partially unsupported or are not supported at all.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
if not name:
name = type(self).__name__
name = name_util.camel_to_lower_snake(name)
name = name_util.get_name_scope_name(name)
name = name_util.strip_invalid_chars(name)
super(Distribution, self).__init__(name=name)
self._name = name
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tf.is_tensor(t):
raise ValueError('Graph parent item %d is not a Tensor; %s.' % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = self._no_dependency(parameters)
self._parameters_sanitized = False
self._graph_parents = graph_parents
self._initial_parameter_control_dependencies = tuple(
d for d in self._parameter_control_dependencies(is_init=True)
if d is not None)
if self._initial_parameter_control_dependencies:
self._initial_parameter_control_dependencies = (
tf.group(*self._initial_parameter_control_dependencies),)
@classmethod
def param_shapes(cls, sample_shape, name='DistributionParamShapes'):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with tf.name_scope(name):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tf.TensorShape):
if not tensorshape_util.is_fully_defined(sample_shape):
raise ValueError('TensorShape sample_shape must be fully defined')
sample_shape = tensorshape_util.as_list(sample_shape)
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tf.get_static_value(shape)
if static_shape is None:
raise ValueError(
'sample_shape must be a fully-defined TensorShape or list/tuple')
static_params[name] = tf.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError('_param_shapes not implemented')
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name if hasattr(self, '_name') else None
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype if hasattr(self, '_dtype') else None
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove 'self', '__class__', or other special variables. These can appear
# if the subclass used: `parameters = dict(locals())`.
if (not hasattr(self, '_parameters_sanitized') or
not self._parameters_sanitized):
p = self._parameters() if callable(self._parameters) else self._parameters
self._parameters = self._no_dependency({
k: v for k, v in p.items()
if not k.startswith('__') and v is not self})
self._parameters_sanitized = True
return dict(self._parameters)
@classmethod
def _params_event_ndims(cls):
"""Returns a dict mapping constructor argument names to per-event rank.
Distributions may implement this method to provide support for slicing
(`__getitem__`) on the batch axes.
Examples: Normal has scalar parameters, so would return
`{'loc': 0, 'scale': 0}`. On the other hand, MultivariateNormalTriL has
vector loc and matrix scale, so returns `{'loc': 1, 'scale_tril': 2}`. When
a distribution accepts multiple parameterizations, either all possible
parameters may be specified by the dict, e.g. Bernoulli returns
`{'logits': 0, 'probs': 0}`, or if convenient only the parameters relevant
to this instance may be specified.
Parameter dtypes are inferred from Tensor attributes on the distribution
where available, e.g. `bernoulli.probs`, 'mvn.scale_tril', falling back with
a warning to the dtype of the distribution.
Returns:
params_event_ndims: Per-event parameter ranks, a `str->int dict`.
"""
raise NotImplementedError(
'{} does not support batch slicing; must implement '
'_params_event_ndims.'.format(cls))
def __getitem__(self, slices):
"""Slices the batch axes of this distribution, returning a new instance.
```python
b = tfd.Bernoulli(logits=tf.zeros([3, 5, 7, 9]))
b.batch_shape # => [3, 5, 7, 9]
b2 = b[:, tf.newaxis, ..., -2:, 1::2]
b2.batch_shape # => [3, 1, 5, 2, 4]
x = tf.random.normal([5, 3, 2, 2])
cov = tf.matmul(x, x, transpose_b=True)
chol = tf.cholesky(cov)
loc = tf.random.normal([4, 1, 3, 1])
mvn = tfd.MultivariateNormalTriL(loc, chol)
mvn.batch_shape # => [4, 5, 3]
mvn.event_shape # => [2]
mvn2 = mvn[:, 3:, ..., ::-1, tf.newaxis]
mvn2.batch_shape # => [4, 2, 3, 1]
mvn2.event_shape # => [2]
```
Args:
slices: slices from the [] operator
Returns:
dist: A new `tfd.Distribution` instance with sliced parameters.
"""
return slicing.batch_slice(self, self._params_event_ndims(), {}, slices)
def __iter__(self):
raise TypeError('{!r} object is not iterable'.format(type(self).__name__))
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`tfd.FULLY_REPARAMETERIZED` or `tfd.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
try:
# We want track provenance from origin variables, so we use batch_slice
# if this distribution supports slicing. See the comment on
# PROVENANCE_ATTR in slicing.py
return slicing.batch_slice(self, self._params_event_ndims(),
override_parameters_kwargs, Ellipsis)
except NotImplementedError:
parameters = dict(self.parameters, **override_parameters_kwargs)
d = type(self)(**parameters)
# pylint: disable=protected-access
d._parameters = parameters
d._parameters_sanitized = True
# pylint: enable=protected-access
return d
def _batch_shape_tensor(self):
raise NotImplementedError(
'batch_shape_tensor is not implemented: {}'.format(type(self).__name__))
def batch_shape_tensor(self, name='batch_shape_tensor'):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_and_control_scope(name):
if tensorshape_util.is_fully_defined(self.batch_shape):
v = tensorshape_util.as_list(self.batch_shape)
else:
v = self._batch_shape_tensor()
return tf.identity(tf.convert_to_tensor(v, dtype_hint=tf.int32),
name='batch_shape')
def _batch_shape(self):
return None
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tf.TensorShape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError(
'event_shape_tensor is not implemented: {}'.format(type(self).__name__))
def event_shape_tensor(self, name='event_shape_tensor'):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_and_control_scope(name):
if tensorshape_util.is_fully_defined(self.event_shape):
v = tensorshape_util.as_list(self.event_shape)
else:
v = self._event_shape_tensor()
return tf.identity(tf.convert_to_tensor(v, dtype_hint=tf.int32),
name='event_shape')
def _event_shape(self):
return None
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tf.TensorShape(self._event_shape())
def is_scalar_event(self, name='is_scalar_event'):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_and_control_scope(name):
return tf.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name='is_scalar_event')
def is_scalar_batch(self, name='is_scalar_batch'):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_and_control_scope(name):
return tf.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name='is_scalar_batch')
def _sample_n(self, n, seed=None, **kwargs):
raise NotImplementedError('sample_n is not implemented: {}'.format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
"""Wrapper around _sample_n."""
with self._name_and_control_scope(name):
if JAX_MODE and seed is None:
raise ValueError('Must provide JAX PRNGKey as `dist.sample(seed=.)`')
sample_shape = tf.cast(sample_shape, tf.int32, name='sample_shape')
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, 'sample_shape')
samples = self._sample_n(
n, seed=seed() if callable(seed) else seed, **kwargs)
batch_event_shape = tf.shape(samples)[1:]
final_shape = tf.concat([sample_shape, batch_event_shape], 0)
samples = tf.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name='sample', **kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer or `tfp.util.SeedStream` instance, for seeding PRNG.
name: name to give to the op.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name, **kwargs)
def _call_log_prob(self, value, name, **kwargs):
"""Wrapper around _log_prob."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_log_prob'):
return self._log_prob(value, **kwargs)
if hasattr(self, '_prob'):
return tf.math.log(self._prob(value, **kwargs))
raise NotImplementedError('log_prob is not implemented: {}'.format(
type(self).__name__))
def log_prob(self, value, name='log_prob', **kwargs):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name, **kwargs)
def _call_prob(self, value, name, **kwargs):
"""Wrapper around _prob."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_prob'):
return self._prob(value, **kwargs)
if hasattr(self, '_log_prob'):
return tf.exp(self._log_prob(value, **kwargs))
raise NotImplementedError('prob is not implemented: {}'.format(
type(self).__name__))
def prob(self, value, name='prob', **kwargs):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name, **kwargs)
def _call_log_cdf(self, value, name, **kwargs):
"""Wrapper around _log_cdf."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_log_cdf'):
return self._log_cdf(value, **kwargs)
if hasattr(self, '_cdf'):
return tf.math.log(self._cdf(value, **kwargs))
raise NotImplementedError('log_cdf is not implemented: {}'.format(
type(self).__name__))
def log_cdf(self, value, name='log_cdf', **kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name, **kwargs)
def _call_cdf(self, value, name, **kwargs):
"""Wrapper around _cdf."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
if hasattr(self, '_cdf'):
return self._cdf(value, **kwargs)
if hasattr(self, '_log_cdf'):
return tf.exp(self._log_cdf(value, **kwargs))
raise NotImplementedError('cdf is not implemented: {}'.format(
type(self).__name__))
def cdf(self, value, name='cdf', **kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name, **kwargs)
def _log_survival_function(self, value, **kwargs):
raise NotImplementedError(
'log_survival_function is not implemented: {}'.format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
"""Wrapper around _log_survival_function."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return tf.math.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name='log_survival_function',
**kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name, **kwargs)
def _survival_function(self, value, **kwargs):
raise NotImplementedError('survival_function is not implemented: {}'.format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
"""Wrapper around _survival_function."""
value = _convert_to_tensor(value, name='value', dtype_hint=self.dtype)
with self._name_and_control_scope(name, value, kwargs):
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name='survival_function', **kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name, **kwargs)
def _entropy(self, **kwargs):
raise NotImplementedError('entropy is not implemented: {}'.format(
type(self).__name__))
def entropy(self, name='entropy', **kwargs):
"""Shannon entropy in nats."""
with self._name_and_control_scope(name):
return self._entropy(**kwargs)
def _mean(self, **kwargs):
raise NotImplementedError('mean is not implemented: {}'.format(
type(self).__name__))
def mean(self, name='mean', **kwargs):
"""Mean."""
with self._name_and_control_scope(name):
return self._mean(**kwargs)
def _quantile(self, value, **kwargs):
raise NotImplementedError('quantile is not implemented: {}'.format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_and_control_scope(name):
dtype = tf.float32 if tf.nest.is_nested(self.dtype) else self.dtype
value = tf.convert_to_tensor(value, name='value', dtype_hint=dtype)
if self.validate_args:
value = distribution_util.with_dependencies([
assert_util.assert_less_equal(value, tf.cast(1, value.dtype),
message='`value` must be <= 1'),
assert_util.assert_greater_equal(value, tf.cast(0, value.dtype),
message='`value` must be >= 0')
], value)
return self._quantile(value, **kwargs)
def quantile(self, value, name='quantile', **kwargs):
"""Quantile function. Aka 'inverse cdf' or 'percent point function'.
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name, **kwargs)
def _variance(self, **kwargs):
raise NotImplementedError('variance is not implemented: {}'.format(
type(self).__name__))
def variance(self, name='variance', **kwargs):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_and_control_scope(name):
try:
return self._variance(**kwargs)
except NotImplementedError as original_exception:
try:
return tf.square(self._stddev(**kwargs))
except NotImplementedError:
raise original_exception
def _stddev(self, **kwargs):
raise NotImplementedError('stddev is not implemented: {}'.format(
type(self).__name__))
def stddev(self, name='stddev', **kwargs):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_and_control_scope(name):
try:
return self._stddev(**kwargs)
except NotImplementedError as original_exception:
try:
return tf.sqrt(self._variance(**kwargs))
except NotImplementedError:
raise original_exception
def _covariance(self, **kwargs):
raise NotImplementedError('covariance is not implemented: {}'.format(
type(self).__name__))
def covariance(self, name='covariance', **kwargs):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
**kwargs: Named arguments forwarded to subclass implementation.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_and_control_scope(name):
return self._covariance(**kwargs)
def _mode(self, **kwargs):
raise NotImplementedError('mode is not implemented: {}'.format(
type(self).__name__))
def mode(self, name='mode', **kwargs):
"""Mode."""
with self._name_and_control_scope(name):
return self._mode(**kwargs)
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name='cross_entropy'):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shannon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shannon) cross entropy.
"""
with self._name_and_control_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name='kl_divergence'):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shannon) cross entropy, and `H[.]` denotes (Shannon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
# NOTE: We do not enter a `self._name_and_control_scope` here. We rely on
# `tfd.kl_divergence(self, other)` to use `_name_and_control_scope` to apply
# assertions on both Distributions.
#
# Subclasses that override `Distribution.kl_divergence` or `_kl_divergence`
# must ensure that assertions are applied for both `self` and `other`.
return self._kl_divergence(other)
def _default_event_space_bijector(self):
raise NotImplementedError(
'_default_event_space_bijector` is not implemented: {}'.format(
type(self).__name__))
def _experimental_default_event_space_bijector(self):
"""Bijector mapping the reals (R**n) to the event space of the distribution.
Returns:
event_space_bijector: `Bijector` instance or `None`.
Distributions with continuous support have a
`_default_event_space_bijector`, a subclass of `tfp.bijectors.Bijector`
that maps R**n to the distribution's event space. For example, the
`_default_event_space_bijector` of the `Beta` distribution is
`tfb.Sigmoid()`, which maps the real line to `[0, 1]`, the support of the
`Beta` distribution. The purpose of `_default_event_space_bijector` is
to enable gradient descent in an unconstrained space for Variational
Inference and Hamiltonian Monte Carlo methods. An effort has been made to
choose bijectors such that the tails of the distribution in the
unconstrained space are between Gaussian and Exponential. For distributions
with discrete event space, `_default_event_space_bijector` returns `None`.
"""
return self._default_event_space_bijector()
def __str__(self):
if self.batch_shape:
maybe_batch_shape = ', batch_shape=' + _str_tensorshape(self.batch_shape)
else:
maybe_batch_shape = ''
if self.event_shape:
maybe_event_shape = ', event_shape=' + _str_tensorshape(self.event_shape)
else:
maybe_event_shape = ''
if self.dtype is not None:
maybe_dtype = ', dtype=' + _str_dtype(self.dtype)
else:
maybe_dtype = ''
return ('tfp.distributions.{type_name}('
'"{self_name}"'
'{maybe_batch_shape}'
'{maybe_event_shape}'
'{maybe_dtype})'.format(
type_name=type(self).__name__,
self_name=self.name or '<unknown>',
maybe_batch_shape=maybe_batch_shape,
maybe_event_shape=maybe_event_shape,
maybe_dtype=maybe_dtype))
def __repr__(self):
return ('<tfp.distributions.{type_name} '
'\'{self_name}\''
' batch_shape={batch_shape}'
' event_shape={event_shape}'
' dtype={dtype}>'.format(
type_name=type(self).__name__,
self_name=self.name or '<unknown>',
batch_shape=_str_tensorshape(self.batch_shape),
event_shape=_str_tensorshape(self.event_shape),
dtype=_str_dtype(self.dtype)))
@contextlib.contextmanager
def _name_and_control_scope(self, name=None, value=UNSET_VALUE, kwargs=None):
"""Helper function to standardize op scope."""
# Note: we recieve `kwargs` and not `**kwargs` to ensure no collisions on
# other args we choose to take in this function.
with tf.name_scope(self.name):
with tf.name_scope(name) as name_scope:
deps = []
deps.extend(self._initial_parameter_control_dependencies)
deps.extend(self._parameter_control_dependencies(is_init=False))
if value is not UNSET_VALUE:
deps.extend(self._sample_control_dependencies(
value, **({} if kwargs is None else kwargs)))
if not deps:
yield name_scope
return
with tf.control_dependencies(deps) as deps_scope:
yield deps_scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tf.get_static_value(x)
if x_static_val is None:
prod = tf.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=dtype_util.as_numpy_dtype(x.dtype))
x = distribution_util.expand_to_vector(x, tensor_name=name)
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tf.TensorShape(tf.get_static_value(sample_shape))
ndims = tensorshape_util.rank(x.shape)
sample_ndims = tensorshape_util.rank(sample_shape)
batch_ndims = tensorshape_util.rank(self.batch_shape)
event_ndims = tensorshape_util.rank(self.event_shape)
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
tensorshape_util.set_shape(x, [None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = tensorshape_util.concatenate(sample_shape,
[None] * (ndims - sample_ndims))
tensorshape_util.set_shape(x, shape)
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tf.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
tensorshape_util.set_shape(x, shape)
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tf.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
tensorshape_util.set_shape(x, shape)
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if tensorshape_util.rank(static_shape) is not None:
return tensorshape_util.rank(static_shape) == 0
shape = dynamic_shape_fn()
if tf.compat.dimension_value(shape.shape[0]) is not None:
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return tensorshape_util.as_list(shape.shape) == [0]
return tf.equal(tf.shape(shape)[0], 0)
def _parameter_control_dependencies(self, is_init):
"""Returns a list of ops to be executed in members with graph deps.
Typically subclasses override this function to return parameter specific
assertions (eg, positivity of `scale`, etc.).
Args:
is_init: Python `bool` indicating that the call site is `__init__`.
Returns:
dependencies: `list`-like of ops to be executed in member functions with
graph dependencies.
"""
return ()
def _sample_control_dependencies(self, value, **kwargs):
"""Returns a list of ops to be executed to validate distribution samples.
The ops are executed in methods that take distribution samples as an
argument (e.g. `log_prob` and `cdf`). They validate that `value` is
within the support of the distribution. Typically subclasses override this
function to return assertions specific to the distribution (e.g. samples
from `Beta` must be between `0` and `1`). By convention, finite bounds of
the support are considered valid samples, since `sample` may output values
that are numerically equivalent to the bounds.
Args:
value: `float` or `double` `Tensor`.
**kwargs: Additional keyword args.
Returns:
assertions: `list`-like of ops to be executed in member functions that
take distribution samples as input.
"""
return ()
class _PrettyDict(dict):
"""`dict` with stable `repr`, `str`."""
def __str__(self):
pairs = (': '.join([str(k), str(v)]) for k, v in sorted(self.items()))
return '{' + ', '.join(pairs) + '}'
def __repr__(self):
pairs = (': '.join([repr(k), repr(v)]) for k, v in sorted(self.items()))
return '{' + ', '.join(pairs) + '}'
def _recursively_replace_dict_for_pretty_dict(x):
"""Recursively replace `dict`s with `_PrettyDict`."""
# We use "PrettyDict" because collections.OrderedDict repr/str has the word
# "OrderedDict" in it. We only want to print "OrderedDict" if in fact the
# input really is an OrderedDict.
if isinstance(x, dict):
return _PrettyDict({
k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
if (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x)
is_named_tuple = (isinstance(x, tuple) and
hasattr(x, '_asdict') and
hasattr(x, '_fields'))
return type(x)(*args) if is_named_tuple else type(x)(args)
if isinstance(x, collections.Mapping):
return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
return x
def _str_tensorshape(x):
def _str(s):
if tensorshape_util.rank(s) is None:
return '?'
return str(tensorshape_util.as_list(s)).replace('None', '?')
# Because Python2 `dict`s are unordered, we must replace them with
# `PrettyDict`s so __str__, __repr__ are deterministic.
x = _recursively_replace_dict_for_pretty_dict(x)
return str(tf.nest.map_structure(_str, x)).replace('\'', '')
def _str_dtype(x):
def _str(s):
if s is None:
return '?'
return dtype_util.name(s)
# Because Python2 `dict`s are unordered, we must replace them with
# `PrettyDict`s so __str__, __repr__ are deterministic.
x = _recursively_replace_dict_for_pretty_dict(x)
return str(tf.nest.map_structure(_str, x)).replace('\'', '')
|
the-stack_106_13370
|
#!/usr/bin/env python3
# Copyright (c) 2019 The OPCX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a valid PoS block with a valid coinstake transaction where the
coinstake input prevout is double spent in one of the other transactions in the same block.
'''
from time import sleep
from fake_stake.base_test import OPCX_FakeStakeTest
class PoSDoubleSpend(OPCX_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a valid PoS block with a valid coinstake transaction where the coinstake input prevout is double spent in one of the other transactions in the same block."
self.init_test()
INITAL_MINED_BLOCKS = 300
FORK_DEPTH = 30
self.NUM_BLOCKS = 3
# 1) Starting mining blocks
self.log.info("Mining %d blocks.." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
# 2) Collect the possible prevouts
self.log.info("Collecting all unspent coins which we generated from mining...")
staking_utxo_list = self.node.listunspent()
# 3) Spam Blocks on the main chain
self.log.info("-- Main chain blocks first")
self.test_spam("Main", staking_utxo_list, fDoubleSpend=True)
sleep(2)
# 4) Mine some block as buffer
self.log.info("Mining %d more blocks..." % FORK_DEPTH)
self.node.generate(FORK_DEPTH)
sleep(2)
# 5) Spam Blocks on a forked chain
self.log.info("-- Forked chain blocks now")
err_msgs = self.test_spam("Forked", staking_utxo_list, fRandomHeight=True, randomRange=FORK_DEPTH, fDoubleSpend=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
PoSDoubleSpend().main()
|
the-stack_106_13371
|
from dagster_aws.emr.solids import EmrRunJobFlowSolidDefinition
from moto import mock_emr
from dagster import execute_pipeline, pipeline
@mock_emr
def test_run_emr_job(emr_cluster_config):
@pipeline
def test_pipe():
EmrRunJobFlowSolidDefinition('test', max_wait_time_sec=2, poll_interval_sec=1)()
config = {
'solids': {
'test': {'config': {'job_config': emr_cluster_config, 'aws_region': 'us-west-1'}}
}
}
result = execute_pipeline(test_pipe, config)
assert result.success
|
the-stack_106_13372
|
# encoding = utf-8
__author__ = "Ang Li"
def foo(): # 函数中出现 yield 即为一个生成器函数,生成器函数返回一个生成器对象
yield 1
yield 100
for i in range(4):
yield i + 100
return 10
f = foo()
print(f)
print(1, next(f)) # 1
print(2, next(f)) # 100
for i in range(4):
print(i+3, next(f)) # 100, 101, 102, 103
print('last', next(f))
|
the-stack_106_13373
|
# Copyright 2019 Yan Yan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from https://github.com/traveller59/spconv/blob/a6ae896726e59e0f5ec9f724a42c8b69e7420762/spconv/utils/__init__.py
import numpy as np
from spconv import spconv_utils
from spconv.spconv_utils import (non_max_suppression_cpu, points_to_voxel_3d_np,
points_to_voxel_3d_np_mean, points_to_voxel_3d_with_filtering,
rbbox_intersection, rbbox_iou, rotate_non_max_suppression_cpu)
try:
from spconv.spconv_utils import non_max_suppression
except ImportError:
pass
def points_to_voxel(points,
voxel_size,
coors_range,
coor_to_voxelidx,
max_points=35,
max_voxels=20000,
full_mean=False,
block_filtering=True,
block_factor=1,
block_size=8,
height_threshold=0.2,
height_high_threshold=3.0,
pad_output=False):
"""convert 3d points(N, >=3) to voxels. This version calculate
everything in one loop. now it takes only 0.8ms(~6k voxels)
with c++ and 3.2ghz cpu.
Args:
points: [N, ndim] float tensor. points[:, :3] contain xyz points and
points[:, 3:] contain other information such as reflectivity.
voxel_size: [3] list/tuple or array, float. xyz, indicate voxel size
coors_range: [6] list/tuple or array, float. indicate voxel range.
format: xyzxyz, minmax
coor_to_voxelidx: int array. used as a dense map.
max_points: int. indicate maximum points contained in a voxel.
max_voxels: int. indicate maximum voxels this function create.
for voxelnet, 20000 is a good choice. you should shuffle points
before call this function because max_voxels may drop some points.
full_mean: bool. if true, all empty points in voxel will be filled with mean
of exist points.
block_filtering: filter voxels by height. used for lidar point cloud.
use some visualization tool to see filtered result.
Returns:
voxels: [M, max_points, ndim] float tensor. only contain points.
coordinates: [M, 3] int32 tensor. zyx format.
num_points_per_voxel: [M] int32 tensor.
"""
if full_mean:
assert block_filtering is False
if not isinstance(voxel_size, np.ndarray):
voxel_size = np.array(voxel_size, dtype=points.dtype)
if not isinstance(coors_range, np.ndarray):
coors_range = np.array(coors_range, dtype=points.dtype)
voxelmap_shape = (coors_range[3:] - coors_range[:3]) / voxel_size
voxelmap_shape = tuple(np.round(voxelmap_shape).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::-1]
num_points_per_voxel = np.zeros(shape=(max_voxels, ), dtype=np.int32)
voxels = np.zeros(
shape=(max_voxels, max_points, points.shape[-1]), dtype=points.dtype)
voxel_point_mask = np.zeros(
shape=(max_voxels, max_points), dtype=points.dtype)
coors = np.zeros(shape=(max_voxels, 3), dtype=np.int32)
res = {
"voxels": voxels,
"coordinates": coors,
"num_points_per_voxel": num_points_per_voxel,
"voxel_point_mask": voxel_point_mask,
}
if full_mean:
means = np.zeros(
shape=(max_voxels, points.shape[-1]), dtype=points.dtype)
voxel_num = points_to_voxel_3d_np_mean(
points, voxels, voxel_point_mask, means, coors,
num_points_per_voxel, coor_to_voxelidx, voxel_size.tolist(),
coors_range.tolist(), max_points, max_voxels)
else:
if block_filtering:
block_shape = [*voxelmap_shape[1:]]
block_shape = [b // block_factor for b in block_shape]
mins = np.full(block_shape, 99999999, dtype=points.dtype)
maxs = np.full(block_shape, -99999999, dtype=points.dtype)
voxel_mask = np.zeros((max_voxels, ), dtype=np.int32)
voxel_num = points_to_voxel_3d_with_filtering(
points, voxels, voxel_point_mask, voxel_mask, mins, maxs,
coors, num_points_per_voxel, coor_to_voxelidx,
voxel_size.tolist(), coors_range.tolist(), max_points,
max_voxels, block_factor, block_size, height_threshold,
height_high_threshold)
voxel_mask = voxel_mask.astype(np.bool_)
coors_ = coors[voxel_mask]
if pad_output:
res["coordinates"][:voxel_num] = coors_
res["voxels"][:voxel_num] = voxels[voxel_mask]
res["voxel_point_mask"][:voxel_num] = voxel_point_mask[
voxel_mask]
res["num_points_per_voxel"][:voxel_num] = num_points_per_voxel[
voxel_mask]
res["coordinates"][voxel_num:] = 0
res["voxels"][voxel_num:] = 0
res["num_points_per_voxel"][voxel_num:] = 0
res["voxel_point_mask"][voxel_num:] = 0
else:
res["coordinates"] = coors_
res["voxels"] = voxels[voxel_mask]
res["num_points_per_voxel"] = num_points_per_voxel[voxel_mask]
res["voxel_point_mask"] = voxel_point_mask[voxel_mask]
voxel_num = coors_.shape[0]
else:
voxel_num = points_to_voxel_3d_np(
points, voxels, voxel_point_mask, coors,
num_points_per_voxel, coor_to_voxelidx, voxel_size.tolist(),
coors_range.tolist(), max_points, max_voxels)
res["voxel_num"] = voxel_num
res["voxel_point_mask"] = res["voxel_point_mask"].reshape(
-1, max_points, 1)
return res
class VoxelGenerator:
def __init__(self,
voxel_size,
point_cloud_range,
max_num_points,
max_voxels=20000,
full_mean=True):
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = (
point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
voxelmap_shape = tuple(np.round(grid_size).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::-1]
self._coor_to_voxelidx = np.full(voxelmap_shape, -1, dtype=np.int32)
self._voxel_size = voxel_size
self._point_cloud_range = point_cloud_range
self._max_num_points = max_num_points
self._max_voxels = max_voxels
self._grid_size = grid_size
self._full_mean = full_mean
def generate(self, points, max_voxels=None):
res = points_to_voxel(points, self._voxel_size,
self._point_cloud_range, self._coor_to_voxelidx,
self._max_num_points, max_voxels
or self._max_voxels, self._full_mean)
voxels = res["voxels"]
coors = res["coordinates"]
num_points_per_voxel = res["num_points_per_voxel"]
voxel_num = res["voxel_num"]
coors = coors[:voxel_num]
voxels = voxels[:voxel_num]
num_points_per_voxel = num_points_per_voxel[:voxel_num]
return (voxels, coors, num_points_per_voxel)
def generate_multi_gpu(self, points, max_voxels=None):
res = points_to_voxel(points, self._voxel_size,
self._point_cloud_range, self._coor_to_voxelidx,
self._max_num_points, max_voxels
or self._max_voxels, self._full_mean)
voxels = res["voxels"]
coors = res["coordinates"]
num_points_per_voxel = res["num_points_per_voxel"]
voxel_num = res["voxel_num"]
return (voxels, coors, num_points_per_voxel)
@property
def voxel_size(self):
return self._voxel_size
@property
def max_num_points_per_voxel(self):
return self._max_num_points
@property
def point_cloud_range(self):
return self._point_cloud_range
@property
def grid_size(self):
return self._grid_size
class VoxelGeneratorV2:
def __init__(self,
voxel_size, #[0.05, 0.05, 0.1]
point_cloud_range, # [0, -40, -3, 70.4, 40, 1]
max_num_points, #5 #1
max_voxels=20000,
full_mean=False,
block_filtering=False,
block_factor=8, #0 #1
block_size=3, #0 #8
height_threshold=0.1, #0 0.2
height_high_threshold=2.0):
assert full_mean is False, "don't use this."
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = (
point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size
grid_size = np.round(grid_size).astype(np.int64)
if block_filtering:
assert block_size > 0
assert grid_size[0] % block_factor == 0
assert grid_size[1] % block_factor == 0
voxelmap_shape = tuple(np.round(grid_size).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::-1]
self._coor_to_voxelidx = np.full(voxelmap_shape, -1, dtype=np.int32)
self._voxel_size = voxel_size
self._point_cloud_range = point_cloud_range
self._max_num_points = max_num_points
self._max_voxels = max_voxels
self._grid_size = grid_size
self._full_mean = full_mean
self._block_filtering = block_filtering
self._block_factor = block_factor
self._height_threshold = height_threshold
self._block_size = block_size
self._height_high_threshold = height_high_threshold
def generate(self, points, max_voxels=None):
res = points_to_voxel(
points, self._voxel_size, self._point_cloud_range,
self._coor_to_voxelidx, self._max_num_points, max_voxels
or self._max_voxels, self._full_mean, self._block_filtering,
self._block_factor, self._block_size, self._height_threshold,
self._height_high_threshold)
for k, v in res.items():
if k != "voxel_num":
res[k] = v[:res["voxel_num"]]
return res
def generate_multi_gpu(self, points, max_voxels=None):
res = points_to_voxel(
points,
self._voxel_size,
self._point_cloud_range,
self._coor_to_voxelidx,
self._max_num_points,
max_voxels or self._max_voxels,
self._full_mean,
self._block_filtering,
self._block_factor,
self._block_size,
self._height_threshold,
self._height_high_threshold,
pad_output=True)
return res
@property
def voxel_size(self):
return self._voxel_size
@property
def max_num_points_per_voxel(self):
return self._max_num_points
@property
def point_cloud_range(self):
return self._point_cloud_range
@property
def grid_size(self):
return self._grid_size
|
the-stack_106_13374
|
import NuralNetwork as N
N.clearScreen()
dataTraining= N.loadData("dataTraining.txt")
X=dataTraining[:,0:400]
y=dataTraining[:,400:401]
m = X.shape[0]
rand_indices = N.getRandomValues(m)
sel = X[rand_indices[:100],:]
N.displayData(sel)
input_layer_size = 400
hidden_layer_size = 25
num_labels = 10
initial_Theta1 = N.randInitializeWeights(input_layer_size, hidden_layer_size)
initial_Theta2 = N.randInitializeWeights(hidden_layer_size, num_labels)
# Unroll parameters
initial_nn_params = N.concatenateVectors(initial_Theta1.reshape(1,initial_Theta1.size), initial_Theta2.reshape(1,initial_Theta2.size))
initial_nn_params=initial_nn_params.flatten()
maxiter = 50
lambda_reg = 3
nn_params =N.nnOptimize(X, y,initial_nn_params,input_layer_size, hidden_layer_size, num_labels, lambda_reg,maxiter)
yPred=N.nnPredict(nn_params,input_layer_size, hidden_layer_size, num_labels,X )
print("Accuracy="+str(N.accurracy(yPred,y)))
Theta1=nn_params[:hidden_layer_size * (input_layer_size + 1)]
Theta2=nn_params[hidden_layer_size * (input_layer_size + 1):]
Theta1.shape = (hidden_layer_size, input_layer_size + 1)
Theta2.shape = (num_labels, hidden_layer_size + 1)
N.displayData(Theta1[:, 1:])
|
the-stack_106_13376
|
import urllib.request
import csv
def save_file():
"""
Pulls all information from CVE tracking list
None -> list
"""
URL = 'https://salsa.debian.org/security-tracker-team/security-tracker/raw/master/data/CVE/list'
file = urllib.request.urlopen(URL).readlines()
generic = [line.strip().decode() for line in file]
result = list()
i = 0
while True:
try:
if generic[i].startswith('CVE'):
if ')' in generic[i]:
flag = False
else:
flag = True
header = [generic[i]]
i += 1
notes = list()
while not generic[i].startswith('CVE'):
if 'NOT-FOR-US' in generic[i] or 'RESERVED' in generic[i] \
or 'NOTE' in generic[i] or 'TODO' in generic[i]:
notes.append(generic[i])
i += 1
elif generic[i].startswith('-'):
notes.append(generic[i])
i += 1
else:
if flag:
header[0] += generic[i]
i += 1
else:
notes.append(generic[i])
i += 1
result.append(header + notes)
else:
i += 1
except IndexError:
print('Finished')
break
return result
def write_to_txt(data, filename, attr='w'):
"""
Writes data from tracking list to txt file
(list, str, str) -> None
"""
f = open(filename, attr, encoding='utf-8', errors='ignore')
for collection in data:
for item in collection:
f.write(item)
f.write('\n')
f.write('----------------------------\n')
f.close()
def write_to_csv(data, filename, attr='w'):
"""
Writes data from tracking list to csv file
(list, str, str) -> None
"""
writer = csv.writer(open(filename, attr))
for collection in data:
writer.writerow([item for item in collection])
if __name__ == '__main__':
data = save_file()
write_to_txt(data, 'data_main.txt')
write_to_csv(data, 'data_main.csv')
|
the-stack_106_13378
|
#! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): Flesh this out considerably. We focused on reflection_test.py
# first, since it's testing the subtler code, and since it provides decent
# indirect testing of the protocol compiler output.
"""Unittest that directly tests the output of the pure-Python protocol
compiler. See //google/protobuf/reflection_test.py for a test which
further ensures that we can use Python protocol message objects as we expect.
"""
__author__ = '[email protected] (Will Robinson)'
import unittest
from google.protobuf.internal import test_bad_identifiers_pb2
from google.protobuf import unittest_custom_options_pb2
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_no_generic_services_pb2
from google.protobuf import service
MAX_EXTENSION = 536870912
class GeneratorTest(unittest.TestCase):
def testNestedMessageDescriptor(self):
field_name = 'optional_nested_message'
proto_type = unittest_pb2.TestAllTypes
self.assertEqual(
proto_type.NestedMessage.DESCRIPTOR,
proto_type.DESCRIPTOR.fields_by_name[field_name].message_type)
def testEnums(self):
# We test only module-level enums here.
# TODO(robinson): Examine descriptors directly to check
# enum descriptor output.
self.assertEqual(4, unittest_pb2.FOREIGN_FOO)
self.assertEqual(5, unittest_pb2.FOREIGN_BAR)
self.assertEqual(6, unittest_pb2.FOREIGN_BAZ)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testExtremeDefaultValues(self):
message = unittest_pb2.TestExtremeDefaultValues()
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
self.assertTrue(isinf(message.inf_double))
self.assertTrue(message.inf_double > 0)
self.assertTrue(isinf(message.neg_inf_double))
self.assertTrue(message.neg_inf_double < 0)
self.assertTrue(isnan(message.nan_double))
self.assertTrue(isinf(message.inf_float))
self.assertTrue(message.inf_float > 0)
self.assertTrue(isinf(message.neg_inf_float))
self.assertTrue(message.neg_inf_float < 0)
self.assertTrue(isnan(message.nan_float))
self.assertEqual("? ? ?? ?? ??? ??/ ??-", message.cpp_trigraph)
def testHasDefaultValues(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
expected_has_default_by_name = {
'optional_int32': False,
'repeated_int32': False,
'optional_nested_message': False,
'default_int32': True,
}
has_default_by_name = dict(
[(f.name, f.has_default_value)
for f in desc.fields
if f.name in expected_has_default_by_name])
self.assertEqual(expected_has_default_by_name, has_default_by_name)
def testContainingTypeBehaviorForExtensions(self):
self.assertEqual(unittest_pb2.optional_int32_extension.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
self.assertEqual(unittest_pb2.TestRequired.single.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
def testExtensionScope(self):
self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope,
None)
self.assertEqual(unittest_pb2.TestRequired.single.extension_scope,
unittest_pb2.TestRequired.DESCRIPTOR)
def testIsExtension(self):
self.assertTrue(unittest_pb2.optional_int32_extension.is_extension)
self.assertTrue(unittest_pb2.TestRequired.single.is_extension)
message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR
non_extension_descriptor = message_descriptor.fields_by_name['a']
self.assertTrue(not non_extension_descriptor.is_extension)
def testOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testMessageWithCustomOptions(self):
proto = unittest_custom_options_pb2.TestMessageWithCustomOptions()
enum_options = proto.DESCRIPTOR.enum_types_by_name['AnEnum'].GetOptions()
self.assertTrue(enum_options is not None)
# TODO(gps): We really should test for the presense of the enum_opt1
# extension and for its value to be set to -789.
def testNestedTypes(self):
self.assertEquals(
set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types),
set([
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR,
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR,
]))
self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, [])
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, [])
def testContainingType(self):
self.assertTrue(
unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None)
self.assertTrue(
unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testContainingTypeInEnumDescriptor(self):
self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None)
self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testPackage(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.file.package,
'protobuf_unittest')
desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR
self.assertEqual(desc.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package,
'protobuf_unittest_import')
self.assertEqual(
unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package,
'protobuf_unittest')
self.assertEqual(
unittest_import_pb2._IMPORTENUM.file.package,
'protobuf_unittest_import')
def testExtensionRange(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, [])
self.assertEqual(
unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges,
[(1, MAX_EXTENSION)])
self.assertEqual(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges,
[(42, 43), (4143, 4244), (65536, MAX_EXTENSION)])
def testFileDescriptor(self):
self.assertEqual(unittest_pb2.DESCRIPTOR.name,
'google/protobuf/unittest.proto')
self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest')
self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None)
def testNoGenericServices(self):
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension"))
# Make sure unittest_no_generic_services_pb2 has no services subclassing
# Proto2 Service class.
if hasattr(unittest_no_generic_services_pb2, "TestService"):
self.assertFalse(issubclass(unittest_no_generic_services_pb2.TestService,
service.Service))
def testMessageTypesByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2._TESTALLTYPES,
file_type.message_types_by_name[unittest_pb2._TESTALLTYPES.name])
# Nested messages shouldn't be included in the message_types_by_name
# dictionary (like in the C++ API).
self.assertFalse(
unittest_pb2._TESTALLTYPES_NESTEDMESSAGE.name in
file_type.message_types_by_name)
def testPublicImports(self):
# Test public imports as embedded message.
all_type_proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, all_type_proto.optional_public_import_message.e)
# PublicImportMessage is actually defined in unittest_import_public_pb2
# module, and is public imported by unittest_import_pb2 module.
public_import_proto = unittest_import_pb2.PublicImportMessage()
self.assertEqual(0, public_import_proto.e)
self.assertTrue(unittest_import_public_pb2.PublicImportMessage is
unittest_import_pb2.PublicImportMessage)
def testBadIdentifiers(self):
# We're just testing that the code was imported without problems.
message = test_bad_identifiers_pb2.TestBadIdentifiers()
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.message],
"foo")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.descriptor],
"bar")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.reflection],
"baz")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.service],
"qux")
if __name__ == '__main__':
unittest.main()
|
the-stack_106_13379
|
from os.path import join as opj
import numpy as np
from pandas import DataFrame, read_sql_query
import matplotlib.pylab as plt
from matplotlib import cm as mcmap
from matplotlib.ticker import MaxNLocator
from configs.nucleus_style_defaults import Interrater as ir
from interrater.interrater_utils import _connect_to_anchor_db, \
_maybe_mkdir, _annotate_krippendorph_ranges
def _kripp_subplot(
krippendorph_summary, axis, what, is_agreement, cutoff=None):
""""""
minx = np.min(krippendorph_summary.loc[:, f'n_matches'])
maxx = np.max(krippendorph_summary.loc[:, f'n_matches'])
if is_agreement:
# annotate agreement ranges
_annotate_krippendorph_ranges(
axis=axis, minx=minx, maxx=maxx, shades=False)
mx = 0
for min_iou in [0.75, 0.5, 0.25]:
ksummary = krippendorph_summary.loc[
krippendorph_summary.loc[:, 'min_iou'] >= min_iou - 0.005, :]
ksummary = ksummary.loc[ksummary.loc[
:, 'min_iou'] < min_iou + 0.005, :]
x = ksummary.loc[:, f'n_matches']
if is_agreement:
y = ksummary.loc[:, what]
else:
# y = 100 * ksummary.loc[:, what] / np.max(
# ksummary.loc[:, what].values)
y = ksummary.loc[:, what]
mx = np.max([np.max(y), mx])
axis.plot(
x, y,
marker='o',
# marker='o' if min_iou == miniou else '.',
linestyle='-',
# linestyle='-' if min_iou == miniou else '--',
# linewidth=2. if min_iou == miniou else 1.,
linewidth=1.5,
c=mcmap.YlOrRd(min_iou + 0.2),
# alpha=1. if min_iou == miniou else 0.7,
label='min_iou=%.2f' % min_iou)
axis.xaxis.set_major_locator(MaxNLocator(integer=True))
if is_agreement:
ymin = -0.02 # -0.22
ymax = 1.02
else:
ymin = -mx * 0.02
ymax = mx * 1.02
# axis.set_ylim(0, 100)
axis.set_ylim(ymin=0)
minx = minx * 0.98
maxx = maxx * 1.02
if cutoff is not None:
axis.fill_betweenx(
y=[ymin, ymax], x1=minx, x2=cutoff - 0.2, color='gray', alpha=0.2)
axis.set_ylim(ymin, ymax)
axis.set_xlim(minx, maxx)
axis.legend(fontsize=8)
axis.set_title(what.replace('_', ' '), fontsize=14, fontweight='bold')
axis.set_xlabel(
f'At least x participants per anchor', fontsize=12)
axis.set_ylabel(
# 'Krippendorph Alpha' if is_agreement else '% anchors kept',
'Krippendorph Alpha' if is_agreement else 'No. of anchors',
fontsize=12)
def plot_krippendorph_figure(
savedir: str, krippendorph_summary: DataFrame,
unbiased_is_truth: bool, evalset: str, whoistruth: str,
who: str, whichanchors: str):
""""""
path0 = opj(savedir, f'{whoistruth}AreTruth')
path1 = opj(path0, f'{evalset}')
for path in [path0, path1]:
_maybe_mkdir(path)
ubstr = ir._ubstr(unbiased_is_truth)
print(
f'Plotting Krippendorph for {evalset}: {ubstr}{whoistruth}AreTruth: '
f'{who}: {whichanchors} anchors')
ksummary = krippendorph_summary.loc[krippendorph_summary.loc[
:, 'unbiased_is_truth'] == unbiased_is_truth, :]
ksummary = ksummary.loc[
ksummary.loc[:, 'evalset'] == evalset, :]
ksummary = ksummary.loc[
ksummary.loc[:, 'whoistruth'] == whoistruth, :]
ksummary = ksummary.loc[ksummary.loc[:, 'who'] == who, :]
ksummary = ksummary.loc[
ksummary.loc[:, 'whichanchors'] == whichanchors, :]
cutoff = ir.MIN_DETECTIONS_PER_ANCHOR if any([
(evalset == 'U-control') and (who == whoistruth),
(evalset != 'U-control') and (who == whoistruth) and (not unbiased_is_truth), # noqa
]) else None
if ksummary.shape[0] < 1:
return
nrows = 1
nperrow = 3
fig, ax = plt.subplots(nrows, nperrow, figsize=(
5 * nperrow, 5.5 * nrows))
for axno, axis in enumerate(ax.ravel()):
if axno == 0:
_kripp_subplot(
axis=axis, what='n_anchors', is_agreement=False,
cutoff=cutoff, krippendorph_summary=ksummary)
elif axno == 1:
_kripp_subplot(
axis=axis, what='detection_and_classification', cutoff=cutoff,
is_agreement=True, krippendorph_summary=ksummary)
elif axno == 2:
_kripp_subplot(
axis=axis, what='classification', is_agreement=True,
cutoff=cutoff, krippendorph_summary=ksummary)
plt.tight_layout(pad=0.3, w_pad=0.5, h_pad=0.3)
plt.savefig(opj(
path1, f'krippendorph_{evalset}_{who}_{ubstr}{whoistruth}_AreTruth'
f'_{whichanchors}.svg'))
plt.close()
def plot_krippendorph_summary(savepath, clsgroup):
""""""
# connect to database
dbcon = _connect_to_anchor_db(opj(savepath, '..'))
# get krippendorph summary table
krippendorph_summary = read_sql_query(f"""
SELECT * FROM "Krippendorph_byAnchorSubsets"
WHERE "class_grouping" = "{clsgroup}"
;""", dbcon)
# now plot
savedir = opj(savepath, '..', 'i10_Krippendorph', f'plots_{clsgroup}')
_maybe_mkdir(savedir)
_ = [
plot_krippendorph_figure(
savedir=savedir, krippendorph_summary=krippendorph_summary,
unbiased_is_truth=unbiased_is_truth, evalset=evalset,
whoistruth=whoistruth, who=who, whichanchors=whichanchors,
)
for evalset in ir.MAIN_EVALSET_NAMES
for unbiased_is_truth in [True, False]
for whoistruth in ir.CONSENSUS_WHOS
for who in ir.CONSENSUS_WHOS
for whichanchors in ['v2.1_consensus', 'v2.2_excluded']
]
def main():
DATASETNAME = 'CURATED_v1_2020-03-29_EVAL'
# where to save stuff
BASEPATH = '/home/mtageld/Desktop/cTME/results/tcga-nucleus/interrater/'
SAVEPATH = opj(BASEPATH, DATASETNAME, 'i1_anchors')
kpath = opj(BASEPATH, DATASETNAME, 'i10_Krippendorph')
_maybe_mkdir(kpath)
# get krippendorph summary
for clsgroup in ['main', 'super']:
plot_krippendorph_summary(savepath=SAVEPATH, clsgroup=clsgroup)
# %%===========================================================================
if __name__ == '__main__':
main()
|
the-stack_106_13380
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSD MobilenetV2 NAS-FPN Feature Extractor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from six.moves import range
import tensorflow as tf
import tf_slim as slim
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
Block = collections.namedtuple(
'Block', ['inputs', 'output_level', 'kernel_size', 'expansion_size'])
_MNASFPN_CELL_CONFIG = [
Block(inputs=(1, 2), output_level=4, kernel_size=3, expansion_size=256),
Block(inputs=(0, 4), output_level=3, kernel_size=3, expansion_size=128),
Block(inputs=(5, 4), output_level=4, kernel_size=3, expansion_size=128),
Block(inputs=(4, 3), output_level=5, kernel_size=5, expansion_size=128),
Block(inputs=(4, 3), output_level=6, kernel_size=3, expansion_size=96),
]
MNASFPN_DEF = dict(
feature_levels=[3, 4, 5, 6],
spec=[_MNASFPN_CELL_CONFIG] * 4,
)
def _maybe_pad(feature, use_explicit_padding, kernel_size=3):
return ops.fixed_padding(feature,
kernel_size) if use_explicit_padding else feature
# Wrapper around mobilenet.depth_multiplier
def _apply_multiplier(d, multiplier, min_depth):
p = {'num_outputs': d}
mobilenet.depth_multiplier(
p, multiplier=multiplier, divisible_by=8, min_depth=min_depth)
return p['num_outputs']
def _apply_size_dependent_ordering(input_feature, feature_level, block_level,
expansion_size, use_explicit_padding,
use_native_resize_op):
"""Applies Size-Dependent-Ordering when resizing feature maps.
See https://arxiv.org/abs/1912.01106
Args:
input_feature: input feature map to be resized.
feature_level: the level of the input feature.
block_level: the desired output level for the block.
expansion_size: the expansion size for the block.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
Returns:
A transformed feature at the desired resolution and expansion size.
"""
padding = 'VALID' if use_explicit_padding else 'SAME'
if feature_level >= block_level: # Perform 1x1 then upsampling.
node = slim.conv2d(
input_feature,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
if feature_level == block_level:
return node
scale = 2**(feature_level - block_level)
if use_native_resize_op:
input_shape = shape_utils.combined_static_and_dynamic_shape(node)
node = tf.image.resize_nearest_neighbor(
node, [input_shape[1] * scale, input_shape[2] * scale])
else:
node = ops.nearest_neighbor_upsampling(node, scale=scale)
else: # Perform downsampling then 1x1.
stride = 2**(block_level - feature_level)
node = slim.max_pool2d(
_maybe_pad(input_feature, use_explicit_padding), [3, 3],
stride=[stride, stride],
padding=padding,
scope='Downsample')
node = slim.conv2d(
node,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
return node
def _mnasfpn_cell(feature_maps,
feature_levels,
cell_spec,
output_channel=48,
use_explicit_padding=False,
use_native_resize_op=False,
multiplier_func=None):
"""Create a MnasFPN cell.
Args:
feature_maps: input feature maps.
feature_levels: levels of the feature maps.
cell_spec: A list of Block configs.
output_channel: Number of features for the input, output and intermediate
feature maps.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
multiplier_func: Depth-multiplier function. If None, use identity function.
Returns:
A transformed list of feature maps at the same resolutions as the inputs.
"""
# This is the level where multipliers are realized.
if multiplier_func is None:
multiplier_func = lambda x: x
num_outputs = len(feature_maps)
cell_features = list(feature_maps)
cell_levels = list(feature_levels)
padding = 'VALID' if use_explicit_padding else 'SAME'
for bi, block in enumerate(cell_spec):
with tf.variable_scope('block_{}'.format(bi)):
block_level = block.output_level
intermediate_feature = None
for i, inp in enumerate(block.inputs):
with tf.variable_scope('input_{}'.format(i)):
input_level = cell_levels[inp]
node = _apply_size_dependent_ordering(
cell_features[inp], input_level, block_level,
multiplier_func(block.expansion_size), use_explicit_padding,
use_native_resize_op)
# Add features incrementally to avoid producing AddN, which doesn't
# play well with TfLite.
if intermediate_feature is None:
intermediate_feature = node
else:
intermediate_feature += node
node = tf.nn.relu6(intermediate_feature)
node = slim.separable_conv2d(
_maybe_pad(node, use_explicit_padding, block.kernel_size),
multiplier_func(output_channel),
block.kernel_size,
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='SepConv')
cell_features.append(node)
cell_levels.append(block_level)
# Cell-wide residuals.
out_idx = range(len(cell_features) - num_outputs, len(cell_features))
for in_i, out_i in enumerate(out_idx):
if cell_features[out_i].shape.as_list(
) == cell_features[in_i].shape.as_list():
cell_features[out_i] += cell_features[in_i]
return cell_features[-num_outputs:]
def mnasfpn(feature_maps,
head_def,
output_channel=48,
use_explicit_padding=False,
use_native_resize_op=False,
multiplier_func=None):
"""Create the MnasFPN head given head_def."""
features = feature_maps
for ci, cell_spec in enumerate(head_def['spec']):
with tf.variable_scope('cell_{}'.format(ci)):
features = _mnasfpn_cell(features, head_def['feature_levels'], cell_spec,
output_channel, use_explicit_padding,
use_native_resize_op, multiplier_func)
return features
def training_scope(l2_weight_decay=1e-4, is_training=None):
"""Arg scope for training MnasFPN."""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.initializers.he_normal(),
weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
slim.arg_scope(
[slim.separable_conv2d],
weights_initializer=tf.initializers.truncated_normal(
stddev=0.536), # He_normal for 3x3 depthwise kernel.
weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
slim.arg_scope([slim.batch_norm],
is_training=is_training,
epsilon=0.01,
decay=0.99,
center=True,
scale=True) as s:
return s
class SSDMobileNetV2MnasFPNFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV2 MnasFPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=6,
additional_layer_depth=48,
head_def=None,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
use_native_resize_op=False,
override_base_feature_extractor_hyperparams=False,
data_format='channels_last'):
"""SSD MnasFPN feature extractor based on Mobilenet v2 architecture.
See https://arxiv.org/abs/1912.01106
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in MnasFPN.
Currently the only valid value is 3.
fpn_max_level: the smallest resolution feature map to construct or use in
MnasFPN. Currentl the only valid value is 6.
additional_layer_depth: additional feature map layer channel depth for
NAS-FPN.
head_def: A dictionary specifying the MnasFPN head architecture. Default
uses MNASFPN_DEF.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
use_native_resize_op: Whether to use native resize op. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
data_format: The ordering of the dimensions in the inputs, The valid
values are {'channels_first', 'channels_last').
"""
super(SSDMobileNetV2MnasFPNFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=(
override_base_feature_extractor_hyperparams))
if fpn_min_level != 3 or fpn_max_level != 6:
raise ValueError('Min and max levels of MnasFPN must be 3 and 6 for now.')
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._fpn_layer_depth = additional_layer_depth
self._head_def = head_def if head_def else MNASFPN_DEF
self._data_format = data_format
self._use_native_resize_op = use_native_resize_op
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _verify_config(self, inputs):
"""Verify that MnasFPN config and its inputs."""
num_inputs = len(inputs)
assert len(self._head_def['feature_levels']) == num_inputs
base_width = inputs[0].shape.as_list(
)[1] * 2**self._head_def['feature_levels'][0]
for i in range(1, num_inputs):
width = inputs[i].shape.as_list()[1]
level = self._head_def['feature_levels'][i]
expected_width = base_width // 2**level
if width != expected_width:
raise ValueError(
'Resolution of input {} does not match its level {}.'.format(
i, level))
for cell_spec in self._head_def['spec']:
# The last K nodes in a cell are the inputs to the next cell. Assert that
# their feature maps are at the right level.
for i in range(num_inputs):
if cell_spec[-num_inputs +
i].output_level != self._head_def['feature_levels'][i]:
raise ValueError(
'Mismatch between node level {} and desired output level {}.'
.format(cell_spec[-num_inputs + i].output_level,
self._head_def['feature_levels'][i]))
# Assert that each block only uses precending blocks.
for bi, block_spec in enumerate(cell_spec):
for inp in block_spec.inputs:
if inp >= bi + num_inputs:
raise ValueError(
'Block {} is trying to access uncreated block {}.'.format(
bi, inp))
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV2', reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v2.training_scope(is_training=None, bn_decay=0.99)), \
slim.arg_scope(
[mobilenet.depth_multiplier], min_depth=self._min_depth):
with slim.arg_scope(
training_scope(l2_weight_decay=4e-5,
is_training=self._is_training)):
_, image_features = mobilenet_v2.mobilenet_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='layer_18',
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
multiplier_func = functools.partial(
_apply_multiplier,
multiplier=self._depth_multiplier,
min_depth=self._min_depth)
with tf.variable_scope('MnasFPN', reuse=self._reuse_weights):
with slim.arg_scope(
training_scope(l2_weight_decay=1e-4, is_training=self._is_training)):
# Create C6 by downsampling C5.
c6 = slim.max_pool2d(
_maybe_pad(image_features['layer_18'], self._use_explicit_padding),
[3, 3],
stride=[2, 2],
padding='VALID' if self._use_explicit_padding else 'SAME',
scope='C6_downsample')
c6 = slim.conv2d(
c6,
multiplier_func(self._fpn_layer_depth),
[1, 1],
activation_fn=tf.identity,
normalizer_fn=slim.batch_norm,
weights_regularizer=None, # this 1x1 has no kernel regularizer.
padding='VALID',
scope='C6_Conv1x1')
image_features['C6'] = tf.identity(c6) # Needed for quantization.
for k in sorted(image_features.keys()):
tf.logging.error('{}: {}'.format(k, image_features[k]))
mnasfpn_inputs = [
image_features['layer_7'], # C3
image_features['layer_14'], # C4
image_features['layer_18'], # C5
image_features['C6'] # C6
]
self._verify_config(mnasfpn_inputs)
feature_maps = mnasfpn(
mnasfpn_inputs,
head_def=self._head_def,
output_channel=self._fpn_layer_depth,
use_explicit_padding=self._use_explicit_padding,
use_native_resize_op=self._use_native_resize_op,
multiplier_func=multiplier_func)
return feature_maps
|
the-stack_106_13381
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...table import Table
from ...worksheet import Worksheet
from ...workbook import WorksheetMeta
from ...sharedstrings import SharedStringTable
class TestAssembleTable(unittest.TestCase):
"""
Test assembling a complete Table file.
"""
def test_assemble_xml_file(self):
"""Test writing a table"""
self.maxDiff = None
worksheet = Worksheet()
worksheet.worksheet_meta = WorksheetMeta()
worksheet.str_table = SharedStringTable()
# Set the table properties.
worksheet.add_table('C3:F13', {'columns': [{'header': 'Foo'},
{'header': ''},
{},
{'header': 'Baz'}
]})
worksheet._prepare_tables(1, {})
fh = StringIO()
table = Table()
table._set_filehandle(fh)
table._set_properties(worksheet.tables[0])
table._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<table xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" id="1" name="Table1" displayName="Table1" ref="C3:F13" totalsRowShown="0">
<autoFilter ref="C3:F13"/>
<tableColumns count="4">
<tableColumn id="1" name="Foo"/>
<tableColumn id="2" name="Column2"/>
<tableColumn id="3" name="Column3"/>
<tableColumn id="4" name="Baz"/>
</tableColumns>
<tableStyleInfo name="TableStyleMedium9" showFirstColumn="0" showLastColumn="0" showRowStripes="1" showColumnStripes="0"/>
</table>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
the-stack_106_13383
|
# coding: utf-8
"""
Compliance API
Service for providing information to sellers about their listings being non-compliant, or at risk for becoming non-compliant, against eBay listing policies. # noqa: E501
OpenAPI spec version: 1.4.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SuppressViolationRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'compliance_type': 'str',
'listing_id': 'str'
}
attribute_map = {
'compliance_type': 'complianceType',
'listing_id': 'listingId'
}
def __init__(self, compliance_type=None, listing_id=None): # noqa: E501
"""SuppressViolationRequest - a model defined in Swagger""" # noqa: E501
self._compliance_type = None
self._listing_id = None
self.discriminator = None
if compliance_type is not None:
self.compliance_type = compliance_type
if listing_id is not None:
self.listing_id = listing_id
@property
def compliance_type(self):
"""Gets the compliance_type of this SuppressViolationRequest. # noqa: E501
The compliance type of the listing violation to suppress is specified in this field. The compliance type for each listing violation is found in the complianceType field under the listingViolations array in a getListingViolations response. Note: At this time, the suppressViolation method is only used to suppress aspect adoption listing violations in the 'at-risk' state, so ASPECTS_ADOPTION is currently the only supported value for this field. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/compliance/types/com:ComplianceTypeEnum'>eBay API documentation</a> # noqa: E501
:return: The compliance_type of this SuppressViolationRequest. # noqa: E501
:rtype: str
"""
return self._compliance_type
@compliance_type.setter
def compliance_type(self, compliance_type):
"""Sets the compliance_type of this SuppressViolationRequest.
The compliance type of the listing violation to suppress is specified in this field. The compliance type for each listing violation is found in the complianceType field under the listingViolations array in a getListingViolations response. Note: At this time, the suppressViolation method is only used to suppress aspect adoption listing violations in the 'at-risk' state, so ASPECTS_ADOPTION is currently the only supported value for this field. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/sell/compliance/types/com:ComplianceTypeEnum'>eBay API documentation</a> # noqa: E501
:param compliance_type: The compliance_type of this SuppressViolationRequest. # noqa: E501
:type: str
"""
self._compliance_type = compliance_type
@property
def listing_id(self):
"""Gets the listing_id of this SuppressViolationRequest. # noqa: E501
The unique identifier of the listing with the violation(s) is specified in this field. The unique identifier of the listing with the listing violation(s) is found in the listingId field under the listingViolations array in a getListingViolations response. Note: At this time, the suppressViolation method is only used to suppress aspect adoption listing violations in the 'at-risk' state, so the listing specified in this field should be a listing with an ASPECTS_ADOPTION violation in the 'at-risk' state. # noqa: E501
:return: The listing_id of this SuppressViolationRequest. # noqa: E501
:rtype: str
"""
return self._listing_id
@listing_id.setter
def listing_id(self, listing_id):
"""Sets the listing_id of this SuppressViolationRequest.
The unique identifier of the listing with the violation(s) is specified in this field. The unique identifier of the listing with the listing violation(s) is found in the listingId field under the listingViolations array in a getListingViolations response. Note: At this time, the suppressViolation method is only used to suppress aspect adoption listing violations in the 'at-risk' state, so the listing specified in this field should be a listing with an ASPECTS_ADOPTION violation in the 'at-risk' state. # noqa: E501
:param listing_id: The listing_id of this SuppressViolationRequest. # noqa: E501
:type: str
"""
self._listing_id = listing_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SuppressViolationRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SuppressViolationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_13384
|
# -*- coding: utf-8 -*-
# """
# - Author: steve simmert
# - E-mail: [email protected]
# - Copyright: 2015
# """
"""
Manage power spectral densities measurements.
"""
import pdb
from . import ureg
from . import name_constants as co
from .data_parser import read_PSD_parameter_file
from .data_parser import read_std_data_file
from .data_parser import save_psd_data
from .data_parser import save_psd_params
from .physics import density_H2O
from .physics import drag
from .physics import faxen_factor
from .physics import MATERIALS
from .physics import Material
from .physics import oseen_factor
from .physics import viscosity_H2O
from .physics import dviscosity_H2O
from .plotting import add_plot_to_figure
from .plotting import col_dict
from .utilities import str2u
from collections import OrderedDict
import copy
from inspect import signature
import matplotlib.pyplot as plt
from os.path import join
from scipy import absolute
from scipy import array
from scipy import inf
from scipy import logical_or
from scipy import pi
from scipy import rand
from scipy import randn
from scipy import shape
from scipy import signal
from scipy import sqrt
from scipy import zeros
from scipy.fftpack import fft, ifft, fftfreq
import time
import warnings
#####--------------------------------------------------------------------------
#---- general ---
#####--------------------------------------------------------------------------
# generate filtered random data
def gen_filtered_data(fun, fs, T_msr, *args, **kwargs):
"""
Generate filtered random data.
The function simulates data by calling a random number
from a normal distribution, which simulates a trapped
particle with pure white noise. The signal is Fourier-
transformed and multiplied with the square-root of the given
filter function. Thus, the filter function must represent the shape
of a power spectral density. After that it's
inverse-fourier-transformed.
The number of data points is fs * T_msr, i.e. the sampling
frequency times the total measurment time.
To also account for the pyhsics, one can provide the following two
keyword arguments:
mean : float
Set the mean of the output data.
std : float
Set the standard deviation of the output data.
All other *args and **kwargs are passed to fun(*args, **kwargs).
Arguments
---------
fun : function
Filter function, e.g. a low-pass filter.
fs : float
Sampling frequency
T_msr : float
Measurement time, the total time of the measurement.
"""
mean = kwargs.pop('mean') if 'mean' in kwargs else 0.0
std = kwargs.pop('std') if 'std' in kwargs else 1.0
N = int(fs * T_msr)
freq = fftfreq(N, 1/fs)
xraw = randn(N)
x = ifft(fft(xraw) * sqrt(fun(freq, *args, **kwargs))).real
if std != 1.0:
x_out = (x / x.std() * std)
else:
x_out = x
x_out = x_out - x_out.mean() + mean
return x_out
def calculate_psd(x, fs, N_win=1):
"""
Calculate power spectral density of the signal x.
N_win splits the signal into the corresponding number of parts.
Arguments
---------
x : array(float)
Signal vector.
fs : float
Sampling frequency
N_win : int
Number of windows to devide the signal vector into. The power spectra
are then averaged this many times.
raise_exception : bool
Raises an exception instead of a warning if N_win is no common divisor
of the length of the signal vector.
"""
N = len(x)
rest = N % N_win
if rest > 0:
warnings.warn('N_win is no common divisor of N = len(x). '
'--> {0:1d} data points were omitted.'.format(rest))
x = x[:-rest] # throw away some data
len_ = int(N / N_win)
psds = []
for idx in range(N_win):
freq, psd = signal.welch(x[idx * len_: (idx + 1) * len_],
fs=fs,
window='boxcar',
nperseg=len_,
noverlap=0,
nfft=None,
detrend=None,
return_onesided=True,
scaling='density',
axis=-1)
psds.append(psd)
return (freq, array(psds))
def gen_PSD_from_time_series(x, fs, N_win, calc_errors=False, **PSD_kwargs):
"""
Generate a **psd** object from a time series data set.
The function uses the Welch algorithm form the scipy.signal package.
Arguments
---------
x : array(float)
Signal vector.
fs : float
Sampling frequency.
N_win : int
Number of windows to devide the signal vector into. The power spectra
are then averaged this many times.
calc_errors : bool
If True, the errors of the psd values are determined from the single
(windowed) psds. It is recommended to use the theoretical errors for
PSD analysis (see Nørrelykke 2010), thus using the default: False.
PSD_kwargs : keyword arguments
passed over to the init call of **PSD**.
Returns
-------
PSD
Power spectral density as an object of the **PSD** class.
"""
freq, psds = calculate_psd(x, fs, N_win=N_win)
psd_avg = psds.mean(axis=0)
if calc_errors:
err = psds.std(axis=0) / sqrt(N_win)
else:
err = psd_avg / sqrt(N_win)
p = PSD(freq,
psd_avg,
err=err,
N_avg=N_win,
f_sample=fs,
**PSD_kwargs)
return p
#####--------------------------------------------------------------------------
#---- objects ---
#####--------------------------------------------------------------------------
class PSD(object):
def __init__(self,
freq,
psd,
err=None,
name='',
f_sample=None,
N_avg=1,
direction=None,
freq_unit='Hz',
psd_unit='V**2/Hz'
):
"""
Describes the one-sided power spectral density for one dimension.
Arguments
---------
freq : array(float)
frequency vector of positive frequencies f in **Hertz**. This
vector could still hold f=0. The psd-value psd(f=0) would then be
taken as offset.
psd : array(float)
Vector with the corresponding power spectral densities. Normally,
the qunatities are given in Volt²/Hz, since they are, e.g measured
with a positional sensitive device. Thus the object assumes these
units.
err : array(float)
Corresponding errors to the psd values with the same units.
name : str
Gives the PSD a name that is used to categorize it with other PSDs.
This should idealy be one of 'x' 'y' or 'z' to ensure compatibility
with HeightCalibration methods, but could be different for other
purposes, e.g. collective_fit method of PSDFit object, where each
psd must have a different name.
f_sample : float
Sampling frequency.
N_avg : int
Number of averages of the psd. In case no error vector is given
this is used to calculate *err*.
direction : str
sould be either 'lateral' or 'axial' determining the direction
in which the fluctuations are measured.
'axial' means perpendicular to a sample surface.
freq_unit : str
Unit of the frequency values ('Hz').
psd_unit : str
Unit fo the psd values ('V**2/Hz').
"""
self.N_avg = N_avg
self._freq_unit = str2u(freq_unit)
self._psd_unit = str2u(psd_unit)
if 0 in freq:
self.offset = psd[freq == 0][0]
else:
self.offset = 0.0
self._freq = freq[freq > 0]
self._psd = psd[freq > 0]
if err is None:
self._err = self._psd / sqrt(self.N_avg)
else:
self._err = err[freq > 0]
self.reset_mask()
self.name = name
if direction is None:
if 'z' in name:
self.direction = 'axial'
else:
self.direction = 'lateral'
else:
self.direction = direction
if f_sample is None:
f_sample = 2 * max(freq)
else:
if 2 * max(freq) != f_sample:
warnings.warn('Maximum frequency of the spectrum should, in '
'general, be half of the sampling frequency.\n'
'2 x f_max = {0:1.3f} != f_sample = {1:1.3f}.\n'
'You are able to continue, though.'
''.format(2*max(freq), f_sample))
self._f_sample = f_sample
def _exclude_values(self, values, attr_name='freq'):
""" Excludes values for attr == value """
attr = getattr(self, attr_name)
try:
vals = iter(values)
except:
vals = [values]
for val in vals:
if val in attr:
mask = attr == val
self.mask = logical_or(self.mask, mask)
else:
warnings.warn('Value {} not available in attribute '
'{}.'.format(val, attr_name))
def _exclude_values_outside(self, vmin, vmax, attr_name='freq'):
"""
Exclude values outside the range vmin, vmax of attribute with name
attr_name'.
"""
attr = getattr(self, attr_name)
mask = logical_or(attr < vmin, attr > vmax)
self.mask = logical_or(self.mask, mask)
@property
def freq(self):
return self._freq[~self.mask]
def get_freq(self,
unit=None,
get_all=False,
get_masked=False,
offset=False):
"""
Return the frequency vector.
Arguments
---------
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
offset : bool
Whether to include zero hertz to the freq-vector.
"""
if unit is None or unit == self._freq_unit:
conv = 1.0
else:
conv = ureg(self._freq_unit).to(unit).magnitude
if get_masked:
mask = self.mask
else:
mask = ~self.mask
if get_all:
f_out = self._freq * conv
else:
f_out = self._freq[mask] * conv
if offset:
a = list(f_out)
a.insert(0, 0.0)
f_out_off = array(a)
return f_out_off
else:
return f_out
@property
def psd(self):
return self._psd[~self.mask]
def get_psd(self,
unit=None,
get_all=False,
get_masked=False,
offset=False):
"""
Return the psd vector.
Arguments
---------
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
offset : bool
Whether to include the value at zero hertz.
"""
if unit is None or unit == self._psd_unit:
conv = 1.0
else:
conv = ureg(self._psd_unit).to(unit).magnitude
if get_masked:
mask = self.mask
else:
mask = ~self.mask
if get_all:
p_out = self._psd * conv
else:
p_out = self._psd[mask] * conv
if offset:
a = list(p_out)
a.insert(0, self.offset * conv)
p_out_off = array(a)
return p_out_off
else:
return p_out
@property
def psd_err(self):
return self._err[~self.mask]
def get_err(self,
unit=None,
get_all=False,
get_masked=False,
offset=False):
"""
Return the error vector.
Arguments
---------
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
offset : bool
Whether to include the error value at zero hertz.
"""
if unit is None or unit == self._psd_unit:
conv = 1.0
else:
conv = ureg(self._psd_unit).to(unit).magnitude
if get_masked:
mask = self.mask
else:
mask = ~self.mask
if get_all:
e_out = self._err * conv
else:
e_out = self._err[mask] * conv
if offset:
offset_err = self.offset * conv / sqrt(self.N_avg)
a = list(e_out)
a.insert(0, offset_err)
e_out_off = array(a)
return e_out_off
else:
return e_out
@property
def f_sample(self):
return self._f_sample
def get_f_sample(self, unit=None):
if unit is None or unit == self._freq_unit:
return self._f_sample
else:
conv = ureg(self._freq_unit).to(unit).magnitude
return self._f_sample * conv
def set_f_sample(self, f_sample, unit):
if unit == self._f_sample:
conv = 1.0
else:
conv = ureg(unit).to(self._freq_unit).magnitude
self._f_sample = f_sample * conv
@property
def df(self):
""" Frequency resolution of the power spectrum"""
return self._freq[1] - self._freq[0]
@property
def T_msr(self):
""" Measurement time in seconds """
return (1 / min(self.get_freq(unit='Hz', get_all=True)))
@property
def N_samples(self):
""" Number of samples """
return (self.T_msr * self.f_sample)
def is_lateral(self):
"""
Whether the direction attribute is set to 'lateral' (True) or
'axial' (False).
"""
if self.direction == 'lateral':
return True
elif self.direction == 'axial':
return False
else:
raise Exception('Unknown direction {}'.format(self.direction))
def reset_mask(self):
""" Rest the internal mask, so all values a taken into account. """
self.mask = zeros(shape(self._freq)) > 0
def add_mask(self, mask):
"""
Logical OR with self.mask.
"""
self.mask = logical_or(self.mask, mask)
def exclude_freq(self, f_exclude):
"""
Exclude the data at frequency or frequencies. f_exclude can be both a
list of or a single float.
"""
self._exclude_values(f_exclude, attr_name='_freq')
def exclude_freq_outside(self, fmin, fmax):
"""
Exclude the frequencies outside the interval [fmin, fmax].
"""
self._exclude_values_outside(fmin, fmax, attr_name='_freq')
def plot_psd(self,
axis=None,
plot_all=False,
plot_masked=False,
plot_errors=False,
**kwargs
):
"""
Plots the power spectral density.
Arguments
---------
axis : axis
Axis to plot to.
plot_all : bool
If True, omits the internal mask.
plot_masked : bool
When True plots the masked values instead.
plot_errors : bool
Plot error bars as well.
**kwargs
key-word arguments handed over to plot()
Returns
-------
figure
"""
freq = self.get_freq(get_all=plot_all, get_masked=plot_masked)
psd = self.get_psd(get_all=plot_all, get_masked=plot_masked)
err = self.get_err(get_all=plot_all, get_masked=plot_masked)
if plot_masked:
if self.name in col_dict:
col = col_dict['o' + self.name]
else:
col = 'gray'
fmt = 'o'
if 'markersize' not in kwargs:
kwargs['markersize'] = 3
else:
if self.name in col_dict:
col = col_dict[self.name]
else:
col = tuple(rand(3))
fmt = '-'
if not plot_errors:
err = []
if axis is None:
fig = None
else:
fig = axis.figure
if 'color' not in kwargs and self.name in col_dict.keys():
kwargs['color'] = col
if 'fmt' not in kwargs:
kwargs['fmt'] = fmt
if 'linewidth' not in kwargs:
kwargs['linewidth'] = 1.0
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.5
if 'title' not in kwargs:
kwargs['title'] = 'PSD {}'.format(self.name)
if 'showLegend' not in kwargs:
kwargs['showLegend'] = True
if 'legend_kwargs' not in kwargs:
lg_kws = {'loc': 3}
kwargs['legend_kwargs'] = lg_kws
if 'fontsize' not in kwargs:
kwargs['fontsize'] = 16
ax = add_plot_to_figure(fig,
freq,
psd,
yerr=err,
label=self.name,
axis=axis,
logplot=True,
**kwargs
)
ax.grid(which='major')
plt.setp(ax,
xlabel='Frequency (Hz)',
ylabel=r'PSD ($\mathsf{V^2/Hz}$)'
)
return ax.figure
class ExpSetting(object):
"""
Describes the experimental setting of a Measurment.
"""
def __init__(self,
temp,
radius,
temp_err=0.0,
radius_err=0.0,
height=inf,
density_particle=None,
density_medium=None,
viscosity=None,
viscosity_err=0.0,
material='',
medium='water',
temp_unit='K',
radius_unit='m',
height_unit='m',
density_particle_unit='kg/m**3',
density_medium_unit='kg/m**3',
viscosity_unit='Pa*s',
warn=True):
"""
Define the experimental setting.
Arguments
---------
temp : float
Temperature at which the measurement was performed.
radius : float
Radius of the bead.
temp_err : float
Error of the Temperature value.
radius_err : float
Radius error.
height : float
Apperent distance between trapped particle and surface.
density_particle : float
The mass density of the bead. If this is provided, **material**
will only name the material, but has no other effect.
density_medium : float or callable.
Density of the medium. If callable, then the function is called
as density_medium(temp), with temp in Kelvin. Otherwise the float
value is used.
viscosity : float or callable
Viscosity of the medium. If callable, then the function is called
as viscosity(temp), with temp in Kelvin. Otherwise the float value
is used.
viscosity_err : float, callable or None
If callable, the function is used as
absolute(viscosity_err(temp) * temp_err), whit temp in Kelvin.
Otherwise the float value is used as error in the viscosity
estimate. If it's None, a known medium must be given.
material : str
Name of the particle material. E.g. 'polystyrene', 'titania',
'silica'. If the name is none of these the particle density must be
provided.
temp_unit : str
Unit of the Temperature ('K').
radius_unit : str
Unit of the radius values (e.g. 'um').
height_unit : str
Unit of the height.
density_particle_unit : str
The unit the density is provided in, e.g. 'kg/m**3'
density_medium_unit : str
The unit the density is provided in, e.g. 'kg/m**3'
viscosity_unit : str
Unit of the provided viscosity, e.g. 'Pa*s'.
Notes
-----
material
If material is a known material a set desnity_particle will not
have an effect.
copy
Use copy() to make a duplicate of the setting.
"""
# temperature
self._qtemp = ureg.Measurement(temp, temp_err, temp_unit)
# radius
self._radius = radius
self._radius_err = radius_err
self._radius_unit = str2u(radius_unit)
# height
self._height = height
self._height_unit = height_unit
# material, density_material
try:
self.material = Material(material,
density=density_particle,
density_unit=density_particle_unit)
except:
if density_particle:
if material == '':
material_name = 'Unknown material'
else:
material_name = material
self.material = Material(material_name,
density=density_particle,
density_unit=density_particle_unit)
else:
if warn:
warnings.warn('Unknown material or density not given. '
'Using fallback density=1050 kg/m**3 '
'(Polystyrene).')
self.material = Material('PS')
self.get_density_particle = self.material.get_density
# medium
if medium.lower() in ['water', 'h2o']:
self._density_medium = density_H2O(self.get_temp(unit='K'))
self._density_medium_unit = str2u('kg/m**3')
self._viscosity = viscosity_H2O(self.get_temp(unit='K'))
dv = absolute(dviscosity_H2O(self.get_temp(unit='K')) *
self.get_temp_err(unit='K'))
self._viscosity_err = dv
self._viscosity_unit = str2u('Pa*s')
self.medium = 'water'
else:
if density_medium and viscosity:
T = self.get_temp(unit='K')
dT = self.get_temp_err(unit='K')
try:
self._density_medium = density_medium(T)
except:
self._density_medium = density_medium
self._density_medium_unit = str2u(density_medium_unit)
try:
self._viscosity = viscosity(T)
except:
self._viscosity = viscosity
try:
self._viscosity_err = absolute(viscosity_err(T) * dT)
except:
self._viscosity_err = viscosity_err
self._viscosity_unit = str2u(viscosity_unit)
self.medium = medium
else:
raise Exception('You need to specify density and viscosity of '
'the medium "{}"'.format(medium))
@property
def _temp_unit(self):
return str(self._qtemp.units)
@property
def temp(self):
return self._qtemp.value.magnitude
def get_temp(self, unit=None):
if unit is None or unit == self._temp_unit:
return self.temp
else:
return self._qtemp.to(unit).value.magnitude
def set_temp(self, temp, unit):
""" Set the Temperature of the measurement in specified units. """
T_err = self._qtemp.error.magnitude
if unit == self._temp_unit:
self._qtemp = ureg.Measurement(temp, T_err, unit)
else:
T = ureg.Measurement(temp, T_err, unit)
self._qtemp = T.to(self._temp_unit)
@property
def temp_err(self):
return self._qtemp.error.magnitude
def get_temp_err(self, unit=None):
if unit is None or unit == self._temp_unit:
return self.temp_err
else:
return self._qtemp.to(unit).error.magnitude
def set_temp_err(self, T_err, unit):
""" Set the Temperature of the measurement in specified units. """
T = self.temp
if unit == self._temp_unit:
self._qtemp = ureg.Measurement(T, T_err, unit)
else:
T_ = ureg.Measurement(T, T_err, unit)
self._qtemp = T_.to(self._temp_unit)
@property
def radius(self):
return self._radius
def get_radius(self, unit=None):
if unit is None or unit == self._radius_unit:
return self._radius
else:
conv = ureg(self._radius_unit).to(unit).magnitude
return self._radius * conv
def set_radius(self, radius, unit):
"""
Set the radius of the used bead in the specified unit.
Unit is then converted into the preset units.
"""
if unit == self._radius_unit:
conv = 1.0
else:
conv = ureg(unit).to(self._radius_unit).magnitude
self._radius = radius * conv
@property
def radius_err(self):
return self._radius_err
def get_radius_err(self, unit=None):
if unit is None or unit == self._radius_unit:
return self._radius_err
else:
conv = ureg(self._radius_unit).to(unit).magnitude
return self._radius_err * conv
def set_radius_err(self, radius_err, unit):
"""
Set the error of the radius of the used bead in the specified unit.
Unit is then converted into the preset units.
"""
if unit == self._radius_unit:
conv = 1.0
else:
conv = ureg(unit).to(self._radius_unit).magnitude
self._radius_err = radius_err * conv
@property
def height(self):
""" Apperent distance between trapped particle and surface."""
return self._height
def get_height(self, unit=None):
"""
Return the apperent distance between trapped particle and surface in
the specified units.
"""
if unit is None or unit == self._height_unit:
return self._height
else:
conv = ureg(self._height_unit).to(unit).magnitude
return self._height * conv
def set_height(self, height, unit):
"""
Set the apperent distance between trapped particle and surface in
specified units.
"""
conv = ureg(unit).to(self._height_unit).magnitude
self._height = height * conv
@property
def density_particle(self):
"""Return the particle density in kg/m**3."""
return self.material.get_density(unit='kg/m**3')
#NOTE: get_density_particle is defined in __init__ and is hooked to
# material.get_density
def set_material(self, name, density, density_unit):
self.material = Material(name,
density=density,
density_unit=density_unit)
@property
def viscosity(self):
return self._viscosity
def get_viscosity(self, unit=None):
if unit is None or unit == self._viscosity_unit:
conv = 1.0
else:
conv = ureg(self._viscosity_unit).to(unit).magnitude
return self._viscosity * conv
def set_viscosity(self, viscosity, unit='Pa*s'):
"""
Set the viscosity.
Arguments
---------
viscosity : float or callable
Viscosity of the medium. If callable, then the function is called
as viscosity(temp), with temp in Kelvin. Otherwise the float value
is used.
unit : str
unit of the viscosity
"""
T = self.get_temp(unit='K')
try:
self._viscosity = viscosity(T)
except:
self._viscosity = viscosity
self._viscosity_unit = str2u(unit)
def get_viscosity_err(self, unit=None):
if unit is None or unit == self._viscosity_unit:
conv = 1.0
else:
conv = ureg(self._viscosity_unit).to(unit).magnitude
return self._viscosity_err * conv
def set_viscosity_err(self, viscosity_err, unit='Pa*s'):
"""
Set the viscosity.
Arguments
---------
viscosity_err : float or callable
If callable, the function is used as
absolute(viscosity_err(temp) * temp_err), whit temp in Kelvin.
Otherwise the float value is used as error in the viscosity
estimate.
unit : str
unit of the viscosity
"""
T = self.get_temp(unit='K')
dT = self.get_temp_err(unit='K')
try:
self._viscosity_err = absolute(viscosity_err(T) * dT)
except:
self._viscosity_err = viscosity_err
self._viscosity_unit = str2u(unit)
@property
def density_medium(self):
return self._density_medium
def get_density_medium(self, unit=None):
if unit is None or unit == self._density_medium_unit:
conv = 1.0
else:
conv = ureg(self._density_medium_unit).to(unit).magnitude
return self._density_medium * conv
def set_density_medium(self, density, unit='Pa*s'):
self._density_medium = density
self._density_medium_unit = str2u(unit)
def copy(self):
return copy.copy(self)
def get_dict(self):
"""
Return data inside a dictionary.
"""
d = OrderedDict()
d[co.temp] = self.temp
d[co.temp_err] = self.temp_err
d[co.temp_unit] = self._temp_unit
d[co.radius] = self.radius
d[co.radius_err] = self.radius_err
d[co.radius_unit] = self._radius_unit
d[co.height] = self.height
d[co.height_unit] = self._height_unit
d[co.material] = self.material.name
if self.material.name not in MATERIALS:
d[co.density_p] = self.density_particle
d[co.density_p_unit] = str2u('kg/m**3')
d[co.medium] = self.medium
if self.medium != 'water':
d[co.density_m] = self.density_medium
d[co.density_m_unit] = self._density_medium_unit
d[co.viscosity] = self.viscosity
d[co.viscosity_unit] = self._viscosity_unit
return d
class PSDMeasurement(object):
def __init__(self,
exp_setting=None,
freq_unit='Hz',
psd_unit='V**2/Hz',
warn=True):
"""
Manage power spectra (PSDs) of one measurement.
One-sided PSDs are assumed! They are stored in self.psds.
Use **load()** to read data from a PSD data file or
**add_psd()** to add **psd**-objects one after another.
Arguments
---------
exp_setting : ExpSetting
Object discribing the experimental setting. Satndard values are
assumed, if None. A warning will show up if warn is not False.
freq_unit : str
Unit of the frequency values ('Hz').
psd_unit : str
Unit fo the psd values ('V**2/Hz').
warn : bool
Show warnings if standard values are assumend.
"""
if not exp_setting:
temp = 25.0
radius = 0.5
exp_setting = ExpSetting(temp, radius,
temp_unit='degC',
radius_unit='um',
warn=warn)
if warn:
warnings.warn('Standard values for experimental setting used! '
'I.e. temp=25 degC, radius=0.5 um, height=inf,'
'material=Polystyrene')
if not isinstance(exp_setting, ExpSetting):
raise Exception('exp_setting is not an ExpSetting object.')
self.exp_setting = exp_setting
self.psds = OrderedDict()
self._freq_unit = freq_unit
self._psd_unit = psd_unit
self.active_calibration = False
self.ex_axis = None
self.ex_freq = None
self.ex_amplitude = None
self.ex_amplitude_err = None
self._ex_amplitude_unit = None
self.ex_power = None
self.ex_power_err = None
self._ex_power_unit = None
def add_psd(self, name, psd):
"""
Add a **psd** object to the measurement.
Arguments
---------
name : str
String specifying the name of the axis.
psd : PSD
Object of the **PSD** class
"""
# The first psd that's added set's the standard values of other psd
# that might be added later on.
if name in self.get_names():
warnings.warn("PSD-object with name '{}' already "
"present. Data overridden".format(name))
psd.name = name
self.psds[name] = psd
@property
def names(self):
"""names of the axes."""
return self.get_names()
def get_names(self):
"""Return the names of the axes."""
lst = [psd.name for psd in self.psds.values()]
lst.sort()
return lst
def get_f_sample(self, name, unit=None):
"""
Return fampling frequency of the PSD with the given name.
"""
if unit is None:
unit = self._freq_unit
return self.psds[name].get_f_sample(unit=unit)
def get_f_resolution(self, name, unit=None):
if unit is None:
unit = self._freq_unit
unit_psd = self.psds[name]._freq_unit
if unit_psd != unit:
conv = ureg(unit_psd).to(unit).magnitude
else:
conv = 1.0
return self.psds[name].df * conv
def get_N_samples(self, name):
return self.psds[name].N_samples
def get_laterality(self):
"""
Return a dictionary with names of the psds as keys and values
discribing if the direction is lateral (True) or axial (False).
"""
d = {name: psd.is_lateral()
for name, psd in self.psds.items()}
return d
@property
def freq_x(self):
return self.get_freq('x')
@property
def freq_y(self):
return self.get_freq('y')
@property
def freq_z(self):
return self.get_freq('z')
def get_freq(self, name, **kwargs):
"""
Return the frequency vector of the axis specified by **name**.
Arguments
---------
name : str
name of the axis.
Keyword Arguments
-----------------
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
"""
return self.psds[name].get_freq(**kwargs)
@property
def psd_x(self):
return self.get_psd('x')
@property
def psd_y(self):
return self.get_psd('y')
@property
def psd_z(self):
return self.get_psd('z')
def get_psd(self, name, **kwargs):
"""
Return the psd vector of the axis specified by 'name'.
Arguments
---------
name : str
name of the axis.
kwargs
can be:
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
"""
return self.psds[name].get_psd(**kwargs)
@property
def err_x(self):
return self.get_psd_err('x')
@property
def err_y(self):
return self.get_psd_err('y')
@property
def err_z(self):
return self.get_psd_err('z')
def get_psd_err(self, name, **kwargs):
"""
Return the error vector of the axis specified by 'name'.
Arguments
---------
name : str
name of the axis.
kwargs can be:
unit : str
get_all : bool
If False uses the internal mask.
get_masked : bool
If True returns the masked instead of the unmasked elements.
offset : bool
Whether to include the value at zero hertz.
"""
return self.psds[name].get_err(**kwargs)
def set_ac_params(self,
ex_axis,
ex_freq,
ex_amplitude,
ex_power,
ex_amplitude_err=0,
ex_power_err=0,
freq_unit='Hz',
amplitude_unit='m',
power_unit='V**2'):
"""
Set the values of an active PSD measurement.
Arguments
---------
ex_axis : str
Defines the name of the psd that was excited this must be one
of self.names.
ex_freq : float
Frequency at which the bead was driven.
ex_amplitude : float
Amplitude of the sine movement of the bead w.r.t. the trap center.
amplitude_unit : str
Unit that the amplitude is given in. The amplitude is then
converted to the same unit as the radius unit.
ex_power : float
The experimentally determined power at the given frequency.
power_unit : str
Unit the power is given in. This should be equal to
psd unit * freq unit.
ex_amplitude_err : float
Error of the amplitude.
ex_power_err : float
Error of the measured power.
Note
----
It is good practise to measure the driving amplitude, e.g. via the
piezo stage monitor signal, instead of taking the set amplitude for
granted. The set value often differs from the actual one,
especially at high driving frequencies.
Reference
---------
Tolić-Nørrelykke, et al. (2006)
Part IV A
Calibration of optical tweezers with positional detection in
the back focal plane.
Review of Scientific Instruments, 77(10), 103101.
http://doi.org/10.1063/1.2356852
"""
if ex_axis not in self.names:
raise Exception('Unknown axis {}'.format(ex_axis))
else:
self.ex_axis = ex_axis
if freq_unit != self._freq_unit:
conv = ureg(freq_unit).to(self._freq_unit).magnitude
else:
conv = 1.0
self.ex_freq = float(ex_freq) * conv
r_unit = self.exp_setting._radius_unit
if amplitude_unit != r_unit:
conv = ureg(amplitude_unit).to(r_unit).magnitude
else:
conv = 1.0
self.ex_amplitude = float(ex_amplitude) * conv
self.ex_amplitude_err = float(ex_amplitude_err) * conv
self._ex_amplitude_unit = r_unit
self.ex_power = float(ex_power)
self.ex_power_err = float(ex_power_err)
self._ex_power_unit = power_unit
self.active_calibration = True
def get_stokes_drag(self, unit='N*s/m'):
"""
Return Stoke's drag in the specified unit.
The function uses the drag function of pyotc.physics evaluated at
f=0 and height=inf.
See Also
--------
pyotc.physics.drag
"""
d = drag(self.exp_setting.get_radius(unit='m'),
self.exp_setting.get_temp(unit='K'),
density=self.exp_setting.get_density_medium(unit='kg/m**3'),
viscosity=self.exp_setting.get_viscosity(unit='Pa*s')).real
if unit == 'N*s/m':
conv = 1.0
else:
conv = ureg('N*s/m').to(unit).magnitude
return d * conv
def get_stokes_drag_err(self, unit='N*s/m'):
"""
Return the error to Stoke's drag in the specified unit.
The function calculates the error on the calculated stokes drag from
the relative errors of the viscosity and the radius.
See Also
--------
get_stokes_drag
"""
v = self.exp_setting.get_viscosity()
dv = self.exp_setting.get_viscosity_err()
dr = self.exp_setting.get_radius_err(unit='m')
r = self.exp_setting.get_radius(unit='m')
ddrag = (dv / v + dr / r) * self.get_stokes_drag(unit=unit)
return ddrag
def get_corrected_drag(self,
mode=1,
distance=None,
focal_shift=1.0,
distance_unit='um',
drag_unit='N*s/m'):
"""
Correct stokes drag for presence of either one wall or two parallel
walls at the given height of the trapped particle.
Only parallel movement of the sphere is considered!
mode = 1
Faxén's correction: drag = drag_stokes * faxen_factor
mode = 2
Linear superposition approach as done by Oseen in 1927; you need
to provide the thickness of the sample!
drag = drag_stokes * (faxen_factor(h) + faxen_factor(H-h)-1)
Arguments
---------
mode : 1 or 2
If one or two walls are present.
distance : float
The distance between two parallel walls.
focal_shift : float defaults to 1.0
Relative shift of the focal point of the trap, due to diffraction.
This usually is about 0.8 for an oil-immersion objective and a
sample in water. The given height is automatically corrected for
that shift.
distance_unit : str
Unit of distance.
drag_unit : str
unit of the output drag.
"""
drag_stokes = self.get_stokes_drag(unit=drag_unit)
radius = self.exp_setting.get_radius(unit=distance_unit)
height = self.exp_setting.get_height(unit=distance_unit) * focal_shift
if height < 1.5 * radius:
warnings.warn("Faxén's correction does not work for heights lower"
"than 1.5 * radius.")
if mode == 1:
drag = drag_stokes * faxen_factor(height, radius)
elif mode == 2:
if distance is None:
raise Exception('Distance between the two walls is not given.')
if height > (distance - 1.5 * radius):
warnings.warn('The correction does not work for heights above'
'thickness - 1.5 * radius.')
drag = drag_stokes * oseen_factor(height, radius, distance)
else:
raise Exception('Unknown mode {}'.format(mode))
return drag
def save(self,
directory=None,
datafile=None,
suffix='_psd_parameters.txt',
datafile_extension='.dat',
include_errors=False
):
"""
Save PSD measurment to data and parameter file.
Arguments
---------
directory : path
If None is given and there is no attribute 'directory' the current
working directory is used.
datafile : str
If None, and there is no attribute 'datafilename' the current date
and time is used.
suffix : str
Suffix for the parameter file.
datafile_extension : str
Extension of the data file.
"""
# save data in data file
if directory is None:
try:
directory = self.directory
except:
directory = './'
if datafile is None:
try:
datafile = self.datafilename
except:
dstr = time.strftime("%Y-%m-%d_%H-%M")
pfix = '_psd_measurement_'
datafile = pfix + dstr
if not datafile.endswith(datafile_extension):
datafile += datafile_extension
ptdfile = join(directory, datafile)
# check that frequency vectors have the same lengths
# TODO alternative: save freq vectors with axis name 'freq_x' etc.
flengths = [len(self.get_freq(ax, get_all=True, offset=True))
for ax in self.names]
if len(set(flengths)) > 1:
raise Exception('Frequency vectors have different lengths!')
psd_dict = OrderedDict()
psd_dict.update({'psd_' + ax: self.get_psd(ax,
get_all=True,
offset=True)
for ax in self.names})
if include_errors:
psd_dict.update({'err_' + ax: self.get_psd_err(ax,
get_all=True,
offset=True)
for ax in self.names})
plengths = [len(psd) for psd in psd_dict.values()]
if len(set(plengths)) > 1:
raise Exception('PSD vectors have different lengths!')
freq = self.get_freq(self.names[0], get_all=True, offset=True)
save_psd_data(ptdfile, freq, psd_dict)
# save parameters in parameter file
pfile = datafile[:datafile.rfind(datafile_extension)] + suffix
ptpfile = join(directory, pfile)
# generate parameters for parameter file
params = OrderedDict()
# names
params[co.names] = ','.join(self.names)
# N_avg
N_avg_ = [self.psds[name].N_avg for name in self.names]
params[co.N_avg] = ','.join(str(n) for n in N_avg_)
# f_sample
fs_ = [self.get_f_sample(name, unit=self._freq_unit)
for name in self.names]
params[co.f_sample] = ','.join(str(f) for f in fs_)
params[co.freq_unit] = self._freq_unit
params[co.psd_unit] = self._psd_unit
params.update(self.exp_setting.get_dict())
if self.active_calibration:
ac_params = OrderedDict()
ac_params[co.ex_axis] = self.ex_axis
ac_params[co.ex_freq] = self.ex_freq
ac_params[co.ex_amp] = self.ex_amplitude
ac_params[co.ex_amp_err] = self.ex_amplitude_err
ac_params[co.ex_pow] = self.ex_power
ac_params[co.ex_pow_err] = self.ex_power_err
ac_params[co.ex_amp_unit] = self.exp_setting._radius_unit
ac_params[co.ex_pow_unit] = self._ex_power_unit
else:
ac_params = None
save_psd_params(ptpfile, params, ac_param_dict=ac_params)
self.directory = directory
self.datafilename = datafile
self.paramfile = pfile
def load(self,
directory,
datafile,
paramfile=None,
suffix='_psd_parameters.txt'
):
"""
Load the psd-data and according parrameter file.
Arguments
---------
directory : path
Path to the data folder.
datafile : str
Filename of the psd-data file '*.dat'.
paramfile : str
Filename of the corresponding parameter file. If None, the
extensiotn of the datafile is cut off and **suffix** is appended
suffix : str
Suffix to the datafile name that characterizes the parameter file.
Note
----
datafile
The data file is assumed to have the data in columns, with their
names in the first row. Normally the columns are called 'freq',
'psd_x', psd_y, etc. Columns starting with 'psd_' are still loaded,
and the strings after the underscore '_' are taken as names for the
created **psd** objects. So 'PSD_x1', 'PSD_x2', etc. will also
work and create axes with names x1, x2, etc.
**Important:** Any name that contains 'z' is considered to be an
axis in the axial direction. Hence **psd.direction** is set to
'axial'.
paramfile
The parameter file is read by the module **configparser**,
so a **config-file standard** is assumed.
Parameters of the [DEFAULT] section are read.
The following parameters are read, the values in brackets are the
fallback values - if none is given the value is mendatory:
- freq_unit ('Hz')
- n_avg
- sampling_rate or f_sample
- psd_unit ('V**2/Hz')
- bead_dia or radius
- err_bead_dia or radius_err (0.0)
- diameter_unit or radius_unit ('um')
- density and density_unit or material ('PS')
- density_med (None - water is used later on)
- density_med_unit ('kg/m**3')
- viscosity (None - water is used later on)
- viscosity_unit ('Pa*s')
- height (inf)
- height_unit ('um')
- temperature (25)
- temperature_error (5)
- temp_unit ('celsius')
If the trapped particle was actively driven by a sine-wave and
an active calibration should be done, the following values must be
in the 'ACTIVE_CALIBRATION' section of the parameter file.
- excitation_axis
- excitation_frequency
- excitation_amplitude
- excitation_amplitude_error (0)
- amplitude_unit (um)
- reference power or power
- reference_power_error or power_err
- power_unit ('V**2')
"""
dfile = join(directory, datafile)
if paramfile is None:
paramfile = datafile[:-4] + suffix
self.paramfile = paramfile
pfile = join(directory, paramfile)
# read psd data file
data = read_std_data_file(dfile, lower_names=True)
# read parameter file
pars = read_PSD_parameter_file(pfile)
# set up fallback values
params = {co.freq_unit: 'Hz',
co.psd_unit: 'V**2/Hz',
co.radius_err: 0.0,
co.radius_unit: 'm',
co.height: inf,
co.height_unit: 'm',
co.temp: 25.0,
co.temp_err: 2.5,
co.temp_unit: 'celsius',
co.material: '',
co.density_p: None,
co.density_p_unit: 'kg/m**3',
co.medium: 'water',
co.density_m: None,
co.density_m_unit: 'kg/m**3',
co.viscosity: None,
co.viscosity_unit: 'Pa*s'}
# overwrite default parameters
params.update(pars)
# backward compatibility to version 0.2.2
if co.names not in params:
# get the axis names from the header in the datafile
names = [name.split('_')[1]
for name in data.keys() if name.lower().startswith('psd')]
else:
names = params[co.names].split(',')
# check if N_avg and f_sample are specified
for par in [co.N_avg, co.f_sample]:
if par not in params:
raise Exception('Parameter {} missing in parameter file {}'
''.format(par, pfile))
n_avg_ = params[co.N_avg].split(',')
# backward compatibility to version 0.2.2
if len(n_avg_) == 1:
n_avg_ = [n_avg_[0] for name in names]
N_avg = {}
for name, n_avg in zip(names, n_avg_):
N_avg[name] = int(n_avg)
f_sample_ = params[co.f_sample].split(',')
# backward compatibility to version 0.2.2
if len(f_sample_) == 1:
f_sample_ = [f_sample_[0] for name in names]
f_sample = {}
for name, fs in zip(names, f_sample_):
f_sample[name] = float(fs)
# check if radius or diameter was specified
if co.radius not in params:
raise Exception('Bead radius is not specified in '
'parameter file {}'.format(paramfile))
expset = ExpSetting(params[co.temp],
params[co.radius],
temp_err=params[co.temp_err],
radius_err=params[co.radius_err],
height=params[co.height],
density_particle=params[co.density_p],
density_medium=params[co.density_m],
viscosity=params[co.viscosity],
material=params[co.material],
medium=params[co.medium],
temp_unit=params[co.temp_unit],
radius_unit=params[co.radius_unit],
height_unit=params[co.height_unit],
density_particle_unit=params[co.density_p_unit],
density_medium_unit=params[co.density_m_unit],
viscosity_unit=params[co.viscosity_unit])
self.exp_setting = expset
# now get teh freq vector and psd data and put it into a PSD object
freq = data.pop(co.freq)
for name in names:
try:
psd_vals = data['psd_' + name]
except:
warnings.warn('PSD value for axis name {} not found'
''.format(name))
continue
try:
psd_err = data['err_' + name]
except:
psd_err = None
psd = PSD(freq,
psd_vals,
err=psd_err,
name=name,
f_sample=f_sample[name],
N_avg=N_avg[name],
freq_unit=params[co.freq_unit],
psd_unit=params[co.psd_unit]
)
self.add_psd(name, psd)
# check if there's active calibration information
if params['active_calibration']:
ex_axis = params[co.ex_axis]
ex_freq = params[co.ex_freq]
ex_amplitude = params[co.ex_amp]
ex_power = params[co.ex_pow]
if co.ex_amp_err in params:
ex_amplitude_err = params[co.ex_amp_err]
else:
ex_amplitude_err = 0
if co.ex_pow_err in params:
ex_power_err = params[co.ex_pow_err]
else:
ex_power_err = 0
if co.ex_amp_unit in params:
ex_amp_unit = params[co.ex_amp_unit]
else:
ex_amp_unit = self._radius_unit
if co.ex_pow_unit in params:
power_unit = params[co.ex_pow_unit]
else:
psd_unit = params[co.psd_unit]
freq_unit = params[co.freq_unit]
power_unit = str((ureg(psd_unit) * ureg(freq_unit)).units)
self.set_ac_params(ex_axis,
ex_freq,
ex_amplitude,
ex_power,
ex_amplitude_err=ex_amplitude_err,
ex_power_err=ex_power_err,
freq_unit=params[co.freq_unit],
amplitude_unit=ex_amp_unit,
power_unit=power_unit)
self.directory = directory
self.datafilename = datafile
def get_ex_freq(self, unit='Hz'):
if unit != self._freq_unit:
conv = ureg(self._freq_unit).to(unit).magnitude
else:
conv = 1.0
return self.ex_freq * conv
def get_ex_amplitude(self, unit=None):
if unit is None or unit == self._ex_amplitude_unit:
return self.ex_amplitude
else:
conv = ureg(self._ex_amplitude_unit).to(unit).magnitude
return self.ex_amplitude * conv
def get_ex_amplitude_err(self, unit=None):
if unit is None or unit == self._ex_amplitude_unit:
return self.ex_amplitude_err
else:
conv = ureg(self._ex_amplitude_unit).to(unit).magnitude
return self.ex_amplitude_err * conv
def get_ex_power(self, unit=None):
if unit is None:
conv = 1.0
else:
conv = ureg(self._ex_power_unit).to(unit).magnitude
return self.ex_power * conv
def get_ex_power_err(self, unit=None):
if unit is None:
conv = 1.0
else:
conv = ureg(self._ex_power_unit).to(unit).magnitude
return self.ex_power_err * conv
def reset_masks(self):
for psd in self.psds.values():
psd.reset_mask()
def exclude_freq(self, f_ex, names=None):
"""
Exclude data points at frequencies f_ex.
Arguments
---------
f_ex : float or list(floats) or None
Frequencies to be excluded. If None, the data point at the
excitation frequency of the excited axis is excluded.
name : str
Name of the psd where the data point shall be excluded. If None,
all psds get f_ex excluded
"""
if names and not isinstance(names, list):
names = [names]
for name in names if names else self.names:
self.psds[name].exclude_freq(f_ex)
def exclude_freq_outside(self, fmin, fmax, names=None, reset_mask=False):
"""
Exclude values outside the range fmin, fmax.
"""
if names and not isinstance(names, list):
names = [names]
for name in names if names else self.names:
if reset_mask:
self.psds[name].reset_mask()
self.psds[name].exclude_freq_outside(fmin, fmax)
def plot_psds(self, names=None, axis=None, **kwargs):
"""
Plots all power spectral densities or the specified name only.
Calls the function plot_psd of the PSD object.
Arguments
---------
name : str or None
axis : matplotlib.Axis or None
Axis to add the plot to.
kwargs : keyword arguments passed to PSD.plot_psd()
e.g.:
- plot_all=False
- plot_masked=False
- plot_errors=False
- plot() keyword argumets
Returns the figure object.
"""
if names and not isinstance(names, list):
names = [names]
if 'title' not in kwargs.keys():
kwargs['title'] = ('PSDs at {0:1.3f} µm'.format(self.exp_setting.get_height(unit='um')))
for name in names if names else self.names:
fig = self.psds[name].plot_psd(axis=axis, **kwargs)
if axis is None:
axis = fig.axes[0]
return fig
def gen_psdm_from_region(region, T_msr, N_avg,
T_delay=0.0, psd_traces=None,
active_calibration=False, position_traces=None,
ex_freq=None, position_unit='um', exp_setting=None):
"""
"""
pm = PSDMeasurement(exp_setting=exp_setting, warn=True)
psd_traces = psd_traces or region.traces
N_min = int(T_delay * region.samplingrate)
N_max = int(T_msr * region.samplingrate)
samples = slice(N_min, N_min + N_max)
data = region.get_data(psd_traces, samples=samples)
for name, dat in zip(psd_traces, data.T):
p = gen_PSD_from_time_series(dat, region.samplingrate, N_avg)
pm.add_psd(name, p)
if active_calibration:
if not ex_freq:
raise Exception('Excitation frequency must be provided for an'
' active calibration.')
position_traces = position_traces or ['positionX', 'positionY']
ex_axis = region._excited(position_traces)
stage_ex_axis = position_traces[ex_axis]
stage_signal = region.get_data(stage_ex_axis, samples=samples)[:, 0]
p = gen_PSD_from_time_series(stage_signal,
region.samplingrate,
N_avg,
calc_errors=True)
## !!! important this is already in µm, due to the setup-specific
## config file
ex_amp = float(sqrt(2 * p.psd[p.freq == ex_freq] * p.df))
ex_amp_err = float(sqrt(2 * p.psd_err[p.freq == ex_freq] * p.df))
psd_ex_axis = psd_traces[ex_axis]
freqs = pm.get_freq(psd_ex_axis)
ex_pow = float(pm.get_psd(psd_ex_axis)[freqs == ex_freq] *
pm.psds[psd_ex_axis].df)
ex_pow_err = float(pm.get_psd_err(psd_ex_axis)[freqs == ex_freq] *
pm.psds[psd_ex_axis].df)
pm.set_ac_params(psd_ex_axis, ex_freq, ex_amp, ex_pow,
ex_amplitude_err=ex_amp_err, ex_power_err=ex_pow_err,
amplitude_unit=position_unit)
return pm
#####--------------------------------------------------------------------------
#---- shape ---
#####--------------------------------------------------------------------------
def lorentzian_psd(freq, D, f_c):
"""
Return the values for frequenzies f of a Lorentzian-shaped **one-sided**
power spectral density with a diffusion constant D and a corner frequency
f_c.
The function applied is: psd = D / (pi**2 * (freq**2 + f_c**2)).
Arguments
---------
freq : array(float)
Frequency vector.
D : float
Diffusion constant.
f_c : float
Corner frequency.
Returns
-------
array
References
----------
Equ. (9) in:
Tolić-Nørrelykke et al. (2006)
Calibration of optical tweezers with positional detection in the back
focal plane. Review of Scientific Instruments, 77(10), 103101.
http://doi.org/10.1063/1.2356852
"""
l = D / (pi**2 * (freq**2 + f_c**2))
return l
def hydro_psd(freq, # Hz
D, # arb**2/s
f_c, # Hz
radius=0.5e-6, # m
height=inf, # m
temp=293.15, # K
rho=1000, # kg/m**3
density_med=None, # kg/m**3
viscosity=None, # Pa*s
lateral=True,
verbose=False):
"""
The hydrodynamically correct power spectral density of a sphere at a given
height in a viscous medium.
The function takes Faxén's law and the mass of the sphere into account.
Arguments
---------
freq : array(float)
Frequency vector in Hertz
D : float
Diffusion coefficient (with no correction, i.e. pure Stokes drag).
f_c : float
Corner frequency in Hz.
radius : float
Radius of the sphere in meters.
height : float
Height, i.e. the bead-center -- surface distance (the real one) in
meters.
temp : float
Absolute temperature in Kelvin
rho : float
Mass density of the sphere in kg/m³
density_med : float
Mass density of the medium in kg/m³. If None, the density of water at
the given temperatrue is used.
viscosity : float
Viscosity of the medium in Ns/m². If None, the viscosity of water at
temperature temp is assumed.
lateral : bool
deprecated.
verbose : bool
be verbose.
References
----------
[1] Appendix D in:
Tolić-Nørrelykke et al. (2006)
Calibration of optical tweezers with positional detection in the back
focal plane. Review of Scientific Instruments, 77(10), 103101.
[2] Berg-Sørensen, K., & Flyvbjerg, H. (2005). The colour of thermal noise
in classical Brownian motion: A feasibility study of direct
experimental observation. New Journal of Physics, 7.
See Also
--------
viscosity_H2O
"""
drag_stokes = drag(radius, temp, density=density_med,
viscosity=viscosity)
f_c0 = f_c
drag_l = drag(radius, temp, freq=freq, height=height, density=density_med,
viscosity=viscosity, lateral=lateral, verbose=verbose)
rel_drag = drag_l / drag_stokes
# see Ref. [2]
# f_m0 = drag(radius) / (2 * pi * m*)
# m* = m_p + 2/3*pi*R³*rho_fluid
m_p = 4/3 * pi * radius**3 * rho
f_m0 = drag_stokes / (2 * pi * (m_p + 2/3 * pi * radius**3 * density_med))
P = (D * rel_drag.real /
(pi**2 * ((f_c0 + freq * rel_drag.imag - freq**2 / f_m0)**2 +
(freq * rel_drag.real)**2)))
if verbose:
print('hydro_psd:')
print('Stokes drag (Ns/m) = {:1.4e}'.format(drag_stokes))
print('f_c (Hz) = {:1.4e}'.format(f_c0))
print('corrected drag (Faxen) (Ns/m) = {:1.4e}'.format(drag_l))
print('relative drage = {:1.4e}'.format(rel_drag))
print('mass sphere (kg) = {:1.4e}'.format(m_p))
print('f_m0 (Hz) = {:1.4e}'.format(f_m0))
print('P (arb²/Hz) = {:1.4e}'.format(P))
return P.real
def low_pass_filter(freq, f3dB, alpha=0):
"""
Produces the relative PSD of a signal that, to a factor (1-alpha²), is
filtered by a 1st-order low-pass filter.
This, in particular, describes the filtering of a quadrant photo-diode:
Only a fraction of the photons that reach the diode is absorped in the
depletion region, the other fraction get's absorped outside that region.
The electron-hole pairs need to diffuse to the depletion region until
they produce a photo-current. This process causes an effective low-pass
filter for these photons. The in the power spectrum the function looks
like this:
F(f) = alpha² + (1 - alpha²) / (1 + (f / f3dB)**2)
Arguments
---------
freq : array(float)
Frequency vector.
f3dB : float
Cut-off frequency of the low-pass filter.
alpha : float
Filter efficiency. Only a fraction (0 <= alpha <= 1) of the signal that
is not low pass filtered. Thus, alpha = 1, will produce no low-pass
filter at all, whereas alpha = 0, will produce a 1st-order low-pass
filter.
References
----------
Equ. (35) in:
Berg-Sørensen, K., & Flyvbjerg, H. (2004)
Power spectrum analysis for optical tweezers. Review of Scientific
Instruments, 75(3), 594–612.
http://doi.org/10.1063/1.1645654
Equ. (20) in:
Berg-So̸rensen, K., et al. (2006)
Power spectrum analysis for optical tweezers. II: Laser wavelength
dependence of parasitic filtering, and how to achieve high bandwidth.
Review of Scientific Instruments, 77(6), 063106.
http://doi.org/10.1063/1.2204589
"""
return alpha**2 + (1 - alpha**2) / (1 + (freq / f3dB)**2)
def apply_low_pass_filter(fun, f3dB, alpha):
"""
Decorator function to produces a low-pass filtered modification of the
input PSD function
See also
--------
low_pass_filter()
Arguments
---------
fun : function
Function with first positional argument being the frequency vector
'freq'.
f3dB : float
Cut-off frequency of the low-pass filter.
alpha : float
Filter efficiency. Only a fraction (0 <= alpha <= 1) of the signal that
is not low pass filtered. Thus, alpha = 1, will produce no low-pass
filter at all, whereas alpha = 0, will produce a 1st-order low-pass
filter.
Returns
-------
function with arguments (freq, *args, **kwargs).
References
----------
Equ. (35) in:
Berg-Sørensen, K., & Flyvbjerg, H. (2004)
Power spectrum analysis for optical tweezers. Review of Scientific
Instruments, 75(3), 594–612.
http://doi.org/10.1063/1.1645654
Equ. (20) in:
Berg-So̸rensen, K., et al. (2006)
Power spectrum analysis for optical tweezers. II: Laser wavelength
dependence of parasitic filtering, and how to achieve high bandwidth.
Review of Scientific Instruments, 77(6), 063106.
http://doi.org/10.1063/1.2204589
"""
def lp_filtering(freq, *args, **kwargs):
f = (low_pass_filter(freq, f3dB, alpha=alpha) *
fun(freq, *args, **kwargs))
return f
if hasattr(fun, '__name__'):
lp_filtering.__name__ = ('low-pass filtered "{0:s}"'
''.format(fun.__name__))
if hasattr(fun, '__doc__'):
lp_filtering.__doc__ = ('Low pass filtered function with cut-off '
'frequency f3dB = {0:1.1f} Hz and efficiency '
'alpha = {1:1.3f} \n\n'
'calls\n{2}{3} \n\n'
'Documentation of {2}:\n{4}'
''.format(f3dB, alpha, fun.__name__,
str(signature(fun)), fun.__doc__))
return lp_filtering
def apply_aliasing(fun, f_sample, N=9):
"""
Decorator function to produce an aliased version of the input PSD function.
The aliased version of a psd is calculated by adding the psd values of
frequencies that lie beyound the sampling frequency to the range below the
sampling frequency.
PSD_aliased(f) = sum(PSD(f + n * f_sample)) from n=-N to N.
Note
----
Note, that N is actually infinity, but a fintite number of summations
should be sufficient to account for aliasing. N=5 gives about 1.5% error at
high frequencies, N=10 should give less than 0.5%.
Arguments
---------
fun : function
Function to be aliased.
f_sample : float
Sampling frequency
N : int
Number that defines how many ranges of f_sample should be taken into
account. The default N=9 give a very good approximation with deviations
of less than 0.5%
*args
passed to the given function.
**kwargs
passed to the given function.
Returns
-------
function with arguments (freq, *args, **kwargs)
References
----------
Equ. (37) in:
Berg-Sørensen, K., & Flyvbjerg, H. (2004)
Power spectrum analysis for optical tweezers. Review of Scientific
Instruments, 75(3), 594–612.
http://doi.org/10.1063/1.1645654
"""
def aliasing(freq, *args, **kwargs):
aliased = sum(fun(freq + i * f_sample, *args, **kwargs)
for i in range(-N, N+1))
return aliased
if hasattr(fun, '__name__'):
aliasing.__name__ = 'aliased "{0:s}"'.format(fun.__name__)
if hasattr(fun, '__doc'):
aliasing.__doc__ = ('Aliased function with\n'
'N = {0} and f_sample = {1:1.1f} Hz. \n\n'
'Calls\n'
'{2}{3}\n\n'
'Documentation of {2}:\n{4}'
''.format(N, f_sample, fun.__name__,
str(signature(fun)), fun.__doc__))
return aliasing
|
the-stack_106_13385
|
import cv2
import os
from nnlib import nnlib
from facelib import LandmarksExtractor, S3FDExtractor
import numpy as np
import math
class Handler(object):
def __init__(self):
device_config = nnlib.DeviceConfig(cpu_only=True,
force_gpu_idx=0,
allow_growth=True)
self.frame = 0
self.rects = None
self.landmarks = None
nnlib.import_all(device_config)
S3FD_model_path = os.path.join('facelib', 'S3FD.h5')
S3FD_model = nnlib.keras.models.load_model(S3FD_model_path)
self.s3fd_model = S3FDExtractor(S3FD_model)
nnlib.import_all(device_config)
self.landmark_model = LandmarksExtractor(nnlib.keras)
self.landmark_model.manual_init()
def handle_image(self, im):
self.frame += 1
lms_update = False
# if self.frame == 1:
if self.frame < 5: # for test
rects = self.s3fd_model.extract(im)
print("rects >>> ", rects)
# 画框
r = rects[0]
cv2.rectangle(im, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 4)
lms = self.landmark_model.extract(im, rects[:1])
# 画点
f1 = lms[0]
l = [3, 5, 13, 15, 30, 1, 17, 15, 26, 27, 5, 7, 11, 13, 33]
for i in range(68):
cv2.circle(im, (int(f1[i][0]), int(f1[i][1])), 2, (0, 0, 255), lineType=cv2.LINE_AA)
cv2.putText(im, str(i), (int(f1[i][0]), int(f1[i][1])), 1, 1, (255, 255, 255), 1)
cv2.imwrite(f"{self.frame}.jpg", im)
self.rects = rects
self.landmarks = lms
lms_update = True
else:
rects = self.s3fd_model.extract(im)
r = rects[0]
r1 = self.rects[0]
print(">>>>>", math.fabs(r[0] - r1[0]), math.fabs(r[1] - r1[1]))
cv2.rectangle(im, (r[0], r[1]), (r[2], r[3]), (0, 255, 0), 4)
cv2.rectangle(im, (r1[0], r1[1]), (r1[2], r1[3]), (255, 255, 255), 4)
cv2.putText(im, "{} {}".format(math.fabs(r[0] - r1[0]), math.fabs(r[1] - r1[1])), (20, 25), 1, 1, (255, 255, 255), 1)
f1 = self.landmarks[0]
l = [3, 5, 13, 15, 30, 1, 17, 15, 26, 27, 5, 7, 11, 13, 33]
for i in range(68):
cv2.circle(im, (int(f1[i][0]), int(f1[i][1])), 2, (0, 0, 255), lineType=cv2.LINE_AA)
cv2.putText(im, str(i), (int(f1[i][0]), int(f1[i][1])), 1, 1, (255, 255, 255), 1)
# 稳定
# print(rects[0], self.rects[0], type(rects))
r1 = np.array(rects[0])
r2 = np.array(self.rects[0])
c = abs(np.sum(np.array(r1) - np.array(r2)))
cv2.putText(im, "rects1:{} rects2:{} c:{} >>>> ".format(r1, r2, c), (5, 10), 1, 1, (255, 255, 255), 1)
# print("current rects:{} current landmark:{} c:{} >>>> ".format(r1, r2, c))
if c < 30:
lms = self.landmarks
else:
print("\n get new lanmark", c)
self.rects = rects
lms = self.landmark_model.extract(im, rects[:1])
self.landmarks = lms
lms_update = True
# self.landmarks = lms # 测试时落下了
return lms, lms_update, im
def put_frame():
""" For test. """
count = 0
cap = cv2.VideoCapture('./media/jiangchao.mp4')
while cap.isOpened():
count += 1
ret, im = cap.read()
if not ret or ret > 2000:
break
h, w, _ = im.shape
lms, lms_update, im = a.handle_image(im)
out.write(im)
cv2.imshow("iii", im)
cv2.waitKey(1)
def put_img():
im = cv2.imread("data_input/test_face1.png")
idx = 0
while True:
if idx > 1:
break
lms, lms_update, im = a.handle_image(im)
cv2.imshow("iii", im)
cv2.waitKey(2)
idx += 1
def put_img_new(num):
idx = 0
while True:
if idx > num:
break
img_name = f"data_input/test_face{idx+1}.png"
im = cv2.imread(img_name)
lms, lms_update, im = face_detection.handle_image(im)
cv2.imshow("Show topaz", im)
cv2.waitKey(1)
idx += 1
if __name__ == "__main__":
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
out = cv2.VideoWriter("jvideo.mp4", fourcc, 24.0, (1920, 1080))
face_detection = Handler()
# put_frame()
# put_img()
put_img_new(4)
|
the-stack_106_13386
|
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, isna
import pandas.util.testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame["A"][:5] = np.nan
frame["B"][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc["A", "B"] = np.nan
expected.loc["B", "A"] = np.nan
# regular
float_frame["A"][:5] = np.nan
float_frame["B"][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov["A"]["C"], float_frame["A"].cov(float_frame["C"]))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(
np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(
np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns,
columns=df.columns,
)
tm.assert_frame_equal(result, expected)
class TestDataFrameCorr:
# DataFrame.corr(), as opposed to DataFrame.corrwith
@staticmethod
def _check_method(frame, method="pearson"):
correls = frame.corr(method=method)
expected = frame["A"].corr(frame["C"], method=method)
tm.assert_almost_equal(correls["A"]["C"], expected)
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame["A"][:5] = np.nan
float_frame["B"][5:10] = np.nan
self._check_method(float_frame, "pearson")
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame["A"][:5] = np.nan
float_frame["B"][5:10] = np.nan
self._check_method(float_frame, "kendall")
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame["A"][:5] = np.nan
float_frame["B"][5:10] = np.nan
self._check_method(float_frame, "spearman")
# ---------------------------------------------------------------------
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame["A"][:5] = np.nan
float_frame["B"][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame(
{
"A": [1, 1.5, 1, np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, np.nan, 1, 1.5, 1],
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
rs = df.corr(meth)
assert isna(rs.loc["A", "B"])
assert isna(rs.loc["B", "A"])
assert rs.loc["A", "A"] == 1
assert rs.loc["B", "B"] == 1
assert isna(rs.loc["C", "C"])
@td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "spearman"])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame(
{
"A": [1, 1, 1, np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, np.nan, 1, 1, 1],
}
)
rs = df.corr(meth)
assert isna(rs.values).all()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
for meth in ["pearson", "kendall", "spearman"]:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH#14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd"))
for method in ["cov", "corr"]:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH#22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_corr_int(self):
# dtypes other than float64 GH#1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
class TestDataFrameCorrWith:
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b["B"]
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"]))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"]))
assert "B" not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ["a", "b", "c", "d", "e"]
columns = ["one", "two", "three", "four"]
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ["A", "B", "C", "D"]
df1["obj"] = "foo"
df2["obj"] = "bar"
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame["A"])
expected = datetime_frame.apply(datetime_frame["A"].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=["a"])
df2 = DataFrame(np.arange(10000) ** 2, columns=["a"])
c1 = df1.corrwith(df2)["a"]
c2 = np.corrcoef(df1["a"], df2["a"])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH#18570
df = pd.DataFrame(
{"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
)
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df["a"].corr(s), df["b"].corr(s)]
expected = pd.Series(data=corrs, index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH#21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH#21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df ** 2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH#21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df ** 2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
|
the-stack_106_13389
|
import logging
import pickle
from typing import Any, Dict, List, Optional, Tuple, Union
from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION
from ray.tune.sample import Categorical, Domain, Float, Integer, LogUniform, \
Quantized, Uniform
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import flatten_dict, unflatten_dict
try:
import optuna as ot
from optuna.samplers import BaseSampler
except ImportError:
ot = None
BaseSampler = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
# Deprecate: 1.5
class _Param:
def __getattr__(self, item):
def _inner(*args, **kwargs):
return (item, args, kwargs)
return _inner
param = _Param()
class OptunaSearch(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Please note that this wrapper does not support define-by-run, so the
search space will be configured before running the optimization. You will
also need to use a Tune trainable (e.g. using the function API) with
this wrapper.
For defining the search space, use ``ray.tune.suggest.optuna.param``
(see example).
Args:
space (list): Hyperparameter search space definition for Optuna's
sampler. This is a list, and samples for the parameters will
be obtained in order.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler (optuna.samplers.BaseSampler): Optuna sampler used to
draw hyperparameter configurations. Defaults to ``TPESampler``.
Tune automatically converts search spaces to Optuna's format:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tune.run(trainable, config=config, search_alg=optuna_search)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
config = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
.. versionadded:: 0.8.8
"""
def __init__(self,
space: Optional[Union[Dict, List[Tuple]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional[BaseSampler] = None):
assert ot is not None, (
"Optuna must be installed! Run `pip install optuna`.")
super(OptunaSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=None,
use_early_stopped_trials=None)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self).__name__))
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
# Deprecate: 1.5
if isinstance(space, list):
logger.warning(
"Passing lists of `param.suggest_*()` calls to OptunaSearch "
"as a search space is deprecated and will be removed in "
"a future release of Ray. Please pass a dict mapping "
"to `optuna.distributions` objects instead.")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._study_name = "optuna" # Fixed study name for in-memory storage
self._sampler = sampler or ot.samplers.TPESampler()
assert isinstance(self._sampler, BaseSampler), \
"You can only pass an instance of `optuna.samplers.BaseSampler` " \
"as a sampler to `OptunaSearcher`."
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: str):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()
self._ot_study = ot.study.create_study(
storage=storage,
sampler=self._sampler,
pruner=pruner,
study_name=self._study_name,
direction="minimize" if mode == "min" else "maximize",
load_if_exists=True)
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(mode)
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if isinstance(self._space, list):
# Keep for backwards compatibility
# Deprecate: 1.5
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
# getattr will fetch the trial.suggest_ function on Optuna trials
params = {
args[0] if len(args) > 0 else kwargs["name"]: getattr(
ot_trial, fn)(*args, **kwargs)
for (fn, args, kwargs) in self._space
}
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(
fixed_distributions=self._space)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
ot_trial = self._ot_trials[trial_id]
val = result.get(self.metric, None) if result else None
try:
self._ot_study.tell(ot_trial, val)
except ValueError as exc:
logger.warning(exc) # E.g. if NaN was reported
def save(self, checkpoint_path: str):
save_object = (self._sampler, self._ot_trials, self._ot_study,
self._points_to_evaluate)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
self._sampler, self._ot_trials, self._ot_study, \
self._points_to_evaluate = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Optuna search space.")
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization.")
return ot.distributions.LogUniformDistribution(
domain.lower, domain.upper)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.DiscreteUniformDistribution(
domain.lower, domain.upper, quantize)
return ot.distributions.UniformDistribution(
domain.lower, domain.upper)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntLogUniformDistribution(
domain.lower, domain.upper, step=quantize or 1)
elif isinstance(sampler, Uniform):
return ot.distributions.IntUniformDistribution(
domain.lower, domain.upper, step=quantize or 1)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(
domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
values = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return values
|
the-stack_106_13390
|
"""Custom tags for Core application."""
import os
import re
from functools import reduce
import pkg_resources
from django import template
from django.conf import settings
from django.contrib.sessions.models import Session
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import smart_text
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext as _
from .. import models
from .. import signals
register = template.Library()
@register.simple_tag
def core_menu(selection, user):
"""Build the top level menu."""
entries = signals.extra_admin_menu_entries.send(
sender="core_menu", location="top_menu", user=user)
entries = reduce(lambda a, b: a + b, [entry[1] for entry in entries])
if user.is_superuser:
entries += [
{"name": "settings",
"label": _("Modoboa"),
"url": reverse("core:index")}
]
if not len(entries):
return ""
return render_to_string("common/menulist.html", {
"entries": entries,
"selection": selection,
"user": user}
)
@register.simple_tag
def extensions_menu(selection, user):
menu = signals.extra_user_menu_entries.send(
sender="core_menu", location="top_menu", user=user)
menu = reduce(lambda a, b: a + b, [entry[1] for entry in menu])
return render_to_string("common/menulist.html", {
"selection": selection, "entries": menu, "user": user
})
@register.simple_tag
def admin_menu(selection, user):
entries = [
{"name": "info",
"class": "ajaxnav",
"url": "info/",
"label": _("Information")},
{"name": "logs",
"class": "ajaxnav",
"url": "logs/?sort_order=-date_created",
"label": _("Logs")},
{"name": "parameters",
"class": "ajaxnav",
"url": "parameters/",
"img": "",
"label": _("Parameters")},
]
return render_to_string("common/menu.html", {
"entries": entries,
"css": "nav nav-sidebar",
"selection": selection,
"user": user
})
@register.simple_tag
def user_menu(user, selection):
entries = [
{"name": "user",
"img": "fa fa-user",
"label": user.fullname,
"menu": [
{"name": "settings",
"img": "fa fa-list",
"label": _("Settings"),
"url": reverse("core:user_index")}
]}
]
extra_entries = signals.extra_user_menu_entries.send(
sender="user_menu", location="options_menu", user=user)
extra_entries = reduce(
lambda a, b: a + b, [entry[1] for entry in extra_entries])
entries[0]["menu"] += (
extra_entries + [{"name": "logout",
"url": reverse("core:logout"),
"label": _("Logout"),
"img": "fa fa-sign-out"}]
)
return render_to_string("common/menulist.html", {
"selection": selection, "entries": entries, "user": user
})
@register.simple_tag
def uprefs_menu(selection, user):
entries = [
{"name": "profile",
"class": "ajaxnav",
"url": "profile/",
"label": _("Profile")},
{"name": "preferences",
"class": "ajaxnav",
"url": "preferences/",
"label": _("Preferences")},
{"name": "security",
"class": "ajaxnav",
"url": "security/",
"label": _("Security")},
]
if user.is_superuser:
entries.append({
"name": "api",
"class": "ajaxnav",
"url": "api/",
"label": _("API"),
})
extra_entries = signals.extra_user_menu_entries.send(
sender="user_menu", location="uprefs_menu", user=user)
extra_entries = reduce(
lambda a, b: a + b, [entry[1] for entry in extra_entries])
entries += extra_entries
entries = sorted(entries, key=lambda e: e["label"])
return render_to_string("common/menu.html", {
"entries": entries,
"css": "nav nav-sidebar",
"selection": selection,
"user": user
})
@register.filter
def colorize_level(level):
"""A simple filter a text using a boostrap color."""
classes = {
"INFO": "text-info",
"WARNING": "text-warning",
"CRITICAL": "text-danger"
}
if level not in classes:
return level
return "<p class='%s'>%s</p>" % (classes[level], level)
@register.filter
def tohtml(message):
"""Simple tag to format a text using HTML."""
return re.sub(r"'(.*?)'", r"<strong>\g<1></strong>", message)
@register.simple_tag
def visirule(field):
if not hasattr(field, "form") or \
not hasattr(field.form, "visirules") or \
field.html_name not in field.form.visirules:
return ""
rule = field.form.visirules[field.html_name]
return mark_safe(
" data-visibility-field='{}' data-visibility-value='{}' "
.format(rule["field"], rule["value"]))
@register.simple_tag
def get_version():
return pkg_resources.get_distribution("modoboa").version
class ConnectedUsers(template.Node):
def __init__(self, varname):
self.varname = varname
def render(self, context):
sessions = Session.objects.filter(expire_date__gte=timezone.now())
uid_list = []
# Build a list of user ids from that query
for session in sessions:
data = session.get_decoded()
uid = data.get("_auth_user_id", None)
if uid:
uid_list.append(uid)
# Query all logged in users based on id list
context[self.varname] = (
models.User.objects.filter(pk__in=uid_list).distinct())
return ""
@register.tag
def connected_users(parser, token):
try:
tag, a, varname = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"connected_users usage: {% connected_users as users %}"
)
return ConnectedUsers(varname)
@register.simple_tag
def get_modoboa_logo():
try:
logo = settings.MODOBOA_CUSTOM_LOGO
except AttributeError:
logo = None
if logo is None:
return os.path.join(settings.STATIC_URL, "css/modoboa.png")
return logo
@register.simple_tag
def load_optionalmenu(user):
menu = signals.extra_user_menu_entries.send(
sender="user_menu", location="top_menu_middle", user=user)
menu = reduce(
lambda a, b: a + b, [entry[1] for entry in menu])
return template.loader.render_to_string(
"common/menulist.html",
{"entries": menu, "user": user}
)
@register.simple_tag
def display_messages(msgs):
text = ""
level = "info"
for m in msgs:
level = m.tags
text += smart_text(m) + "\\\n"
if level == "info":
level = "success"
timeout = "2000"
else:
timeout = "undefined"
return mark_safe("""
<script type="text/javascript">
$(document).ready(function() {
$('body').notify('%s', '%s', %s);
});
</script>
""" % (level, text, timeout))
@register.filter
def currencyfmt(amount):
"""Simple temp. filter to replace babel."""
lang = get_language()
if lang == "fr":
return u"{} €".format(amount)
return u"€{}".format(amount)
|
the-stack_106_13396
|
from agent import source, cli
from ..test_zpipeline_base import TestInputBase
class TestMongo(TestInputBase):
__test__ = True
params = {
'test_create': [{'name': 'test_value_const', 'options': ['-a'], 'value': 'y\nclicksS\ny\n\n \n ',
'timestamp': 'timestamp_unix\nunix', 'advanced_options': 'key1:val1\n\n\n'},
{'name': 'test_timestamp_ms', 'options': [], 'value': 'n\nClicks:gauge\nClicks:clicks',
'timestamp': 'timestamp_unix_ms\nunix_ms', 'advanced_options': '\n\n'},
{'name': 'test_timestamp_datetime', 'options': [], 'value': 'n\nClicks:gauge\nClicks:clicks',
'timestamp': 'timestamp_datetime\ndatetime', 'advanced_options': '\n\n'},
{'name': 'test_timestamp_string', 'options': ['-a'], 'value': 'n\n\n\nClicks:gauge\nClicks:clicks',
'timestamp': 'timestamp_string\nstring\nM/d/yyyy H:mm:ss',
'advanced_options': 'key1:val1\n\n\n'}],
'test_edit': [{'options': ['-a', 'test_value_const'], 'value': 'y\nclicks\n\n\n\n'}],
'test_create_with_file': [{'file_name': 'mongo_pipelines'}],
'test_create_source_with_file': [{'file_name': 'mongo_sources'}],
}
def test_source_create(self, cli_runner):
result = cli_runner.invoke(cli.source.create, catch_exceptions=False,
input="""mongo\ntest_mongo\nmongodb://mongo:27017\nroot\nroot\nadmin\ntest\nadtech\n\n2015-01-02 00:00:00\n\n\n\n""")
assert result.exit_code == 0
assert source.repository.exists('test_mongo')
def test_source_edit(self, cli_runner):
result = cli_runner.invoke(cli.source.edit, ['test_mongo'], catch_exceptions=False,
input="""\n\n\n\n\n\n\n2015-01-01 00:00:00\n\n\n\n""")
source_ = source.repository.get_by_name_without_session('test_mongo')
assert source_.config['configBean.initialOffset'] == '2015-01-01 00:00:00'
assert result.exit_code == 0
def test_create(self, cli_runner, name, options, value, timestamp, advanced_options):
result = cli_runner.invoke(cli.pipeline.create, options, catch_exceptions=False,
input=f"test_mongo\n{name}\n\n{value}\n{timestamp}\nver Country\nExchange optional_dim ad_type ADTYPE GEN\n{advanced_options}\n")
assert result.exit_code == 0
def test_edit(self, cli_runner, options, value):
result = cli_runner.invoke(cli.pipeline.edit, options, catch_exceptions=False,
input=f"\n{value}\n\n\n\n\n\n\n\n\n\n\n\n")
assert result.exit_code == 0
|
the-stack_106_13397
|
# coding=utf-8
# Copyright 2018 The Batfish Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for reference library."""
from __future__ import absolute_import, print_function
import pytest
from pybatfish.datamodel.referencelibrary import NodeRolesData, ReferenceLibrary
def test_empty_referencelibrary():
"""Check proper deserialization for a reference library dict."""
dict = {}
reference_library = ReferenceLibrary(**dict)
assert len(reference_library.books) == 0
dict = {
"books": []
}
reference_library = ReferenceLibrary(**dict)
assert len(reference_library.books) == 0
def test_referencelibrary_addressgroups():
"""Test deserialization for a reference library with address groups."""
dict = {
"books": [
{
"name": "book1",
"addressGroups": [
{
"name": "ag1",
"addresses": [
"1.1.1.1/24",
"2.2.2.2",
"3.3.3.3:0.0.0.8"
]
},
{
"name": "ag2"
}
]
},
{
"name": "book2",
}
]
}
reference_library = ReferenceLibrary.from_dict(dict)
assert len(reference_library.books) == 2
assert reference_library.books[0].name == "book1"
assert len(reference_library.books[0].addressGroups) == 2
assert reference_library.books[0].addressGroups[0].name == "ag1"
assert len(reference_library.books[0].addressGroups[0].addresses) == 3
def test_referencelibrary_interfacegroups():
"""Test deserialization for a reference library with interface groups."""
dict = {
"books": [
{
"name": "book1",
"interfaceGroups": [
{
"name": "g1",
"interfaces": [
{
"hostname": "h1",
"interface": "i1"
},
{
"hostname": "h2",
"interface": "i2"
}
]
},
{
"name": "g2"
}
]
},
{
"name": "book2",
}
]
}
reference_library = ReferenceLibrary.from_dict(dict)
assert len(reference_library.books) == 2
assert reference_library.books[0].name == "book1"
assert len(reference_library.books[0].interfaceGroups) == 2
assert reference_library.books[0].interfaceGroups[0].name == "g1"
assert len(reference_library.books[0].interfaceGroups[0].interfaces) == 2
def test_noderolesdata():
"""Check proper deserialization for a node roles data."""
dict = {
"roleDimensions": [
{
"name": "dim1",
"type": "CUSTOM",
"roles": [
{
"name": "role1",
"regex": "regex",
},
{
"name": "role2",
"regex": "regex",
},
]
},
]
}
nodeRoleData = NodeRolesData.from_dict(dict)
assert len(nodeRoleData.roleDimensions) == 1
assert len(nodeRoleData.roleDimensions[0].roles) == 2
assert nodeRoleData.roleDimensions[0].roles[0].name == "role1"
if __name__ == "__main__":
pytest.main()
|
the-stack_106_13398
|
import re;
from .fu0ValueFromCdbHexOutput import fu0ValueFromCdbHexOutput;
grbSymbolOrAddress = re.compile(
rb"\A\s*" # optional whitespace
rb"(?:" # either {
rb"<Unloaded_" # "<Unloaded_"
rb"(.*)" # <<<module file name>>>
rb">" # ">"
rb"(?:" # optional{
rb"\+0x0*" rb"([0-9`a-f]+?)" # "+0x" "0"... <<<hex offset in unloaded module>>>
rb")?" # }
rb"|" # } or {
rb"(\w+)" # <<<cdb module id>>>
rb"(?:" # optional either {
rb"\+0x0*" rb"([0-9`a-f]+?)" # "+0x" "0"... <<<hex offset in module>>>
rb"|" # } or {
rb"!" rb"(.+?)" # "!" <<<function name>>>
rb"(?:" # optional {
rb"([\+\-])" rb"0x0*" rb"([0-9`a-f]+?)" # ["+" or "-"] "0x" "0"... <<<hex offset in function>>>
rb")?" # }
rb")?" # }
rb"|" # } or {
rb"(?:0x)?" # optional { "0x" }
rb"([0-9`a-f]+)" # <<<address>>>
rb")" # }
rb"\s*\Z" # optional whitespace
);
def cProcess_ftxSplitSymbolOrAddress(oProcess, sbSymbolOrAddress):
obSymbolOrAddressMatch = grbSymbolOrAddress.match(sbSymbolOrAddress);
assert obSymbolOrAddressMatch, \
"Symbol or address does not match a known format: %s" % repr(sbSymbolOrAddress);
(
sb0UnloadedModuleFileName, sb0UnloadedModuleOffset,
sb0ModuleCdbIdOrAddress, sb0ModuleOffset,
sb0FunctionSymbol, sbPlusOrMinusOffset, sb0OffsetInFunction,
sb0Address,
) = obSymbolOrAddressMatch.groups();
u0Address = None;
o0Module = None;
u0ModuleOffset = fu0ValueFromCdbHexOutput(sb0ModuleOffset);
o0Function = None;
i0OffsetFromStartOfFunction = None;
if sb0Address is not None:
u0Address = fu0ValueFromCdbHexOutput(sb0Address);
elif sb0UnloadedModuleFileName is not None:
# sb0UnloadedModuleFileName is returned without modification
u0ModuleOffset = fu0ValueFromCdbHexOutput(sb0UnloadedModuleOffset);
elif sb0ModuleCdbIdOrAddress == b"SharedUserData":
# "ShareUserData" is a symbol outside of any module that gets used as a module name in cdb.
# Any value referencing it will be converted to an address:
u0Address = oProcess.fuGetAddressForSymbol(b"%s!%s" % (sb0ModuleCdbIdOrAddress, sb0FunctionSymbol));
if u0ModuleOffset: uAddress += u0ModuleOffset;
else:
# a module cdb id can be "cdb", which is aldo a valid address; let's try
# to resolve it as a cdb id first:
o0Module = oProcess.fo0GetOrCreateModuleForCdbId(sb0ModuleCdbIdOrAddress);
if o0Module is None:
# That failed; it is an address.
u0Address = fu0ValueFromCdbHexOutput(sb0ModuleCdbIdOrAddress);
elif sb0FunctionSymbol is not None:
o0Function = o0Module.foGetOrCreateFunctionForSymbol(sb0FunctionSymbol);
i0OffsetFromStartOfFunction = (
0 if sb0OffsetInFunction is None else
fu0ValueFromCdbHexOutput(sb0OffsetInFunction) * (1 if sbPlusOrMinusOffset == b"+" else -1)
);
return (
u0Address,
sb0UnloadedModuleFileName, o0Module, u0ModuleOffset,
o0Function, i0OffsetFromStartOfFunction
);
|
the-stack_106_13399
|
import abc
import sys
import traceback
from cosmic_ray.work_record import WorkRecord
class TestOutcome:
"""A enum of the possible outcomes for any mutant test run.
"""
SURVIVED = 'survived'
KILLED = 'killed'
INCOMPETENT = 'incompetent'
class TestRunner(metaclass=abc.ABCMeta):
"""Specifies the interface for test runners in the system.
There are many ways to run unit tests in Python, and each method
supported by Cosmic Ray should be provided by a TestRunner
implementation.
"""
def __init__(self, test_args):
self._test_args = test_args
@property
def test_args(self):
"""The sequence of arguments for the test runner.
"""
return self._test_args
@abc.abstractmethod
def _run(self):
"""Run all of the tests and return the results.
The results are returned as a (success, result)
tuple. `success` is a boolean indicating if the tests
passed. `result` is any object that is appropriate to provide
more information about the success/failure of the tests.
"""
raise NotImplemented()
def __call__(self):
"""Call `_run()` and return a `WorkRecord` with the results.
Returns: A `WorkRecord` with the `test_outcome` and `data` fields
filled in.
"""
try:
test_result = self._run()
if test_result[0]:
return WorkRecord(
test_outcome=TestOutcome.SURVIVED,
data=test_result[1])
else:
return WorkRecord(
test_outcome=TestOutcome.KILLED,
data=test_result[1])
except Exception:
return WorkRecord(
test_outcome=TestOutcome.INCOMPETENT,
data=traceback.format_exception(*sys.exc_info()))
|
the-stack_106_13401
|
from __future__ import unicode_literals
import datetime
from django.test import RequestFactory, TestCase, override_settings
from model_mommy import mommy
from regulations3k.models import EffectiveVersion, Part
from regulations3k.views import get_version_date, redirect_eregs
@override_settings(FLAGS={'REGULATIONS3K': [('boolean', True)]})
class RedirectRegulations3kTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.test_reg_1002 = mommy.make(
Part,
part_number='1002')
self.test_reg_1005 = mommy.make(
Part,
part_number='1005')
self.test_version_1002 = mommy.make(
EffectiveVersion,
part=self.test_reg_1002,
effective_date=datetime.date(2016, 7, 11),
draft=False)
self.test_version_1002_2011 = mommy.make(
EffectiveVersion,
part=self.test_reg_1002,
effective_date=datetime.date(2011, 12, 30),
draft=False)
self.test_version_1002_not_live = mommy.make(
EffectiveVersion,
part=self.test_reg_1002,
effective_date=datetime.date(2014, 1, 10),
draft=True)
self.test_version_1005 = mommy.make(
EffectiveVersion,
part=self.test_reg_1005,
effective_date=datetime.date(2013, 3, 26),
draft=False)
def test_redirect_base_url(self):
request = self.factory.get('/eregulations/')
response = redirect_eregs(request)
self.assertEqual(response.get('location'),
'/policy-compliance/rulemaking/regulations/')
def test_redirect_reg_base_url(self):
request = self.factory.get('/eregulations/1002')
response = redirect_eregs(request)
self.assertEqual(response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/')
def test_redirect_reg_section_url(self):
request = self.factory.get(
'/eregulations/1002-1/2017-20417_20180101')
response = redirect_eregs(request)
self.assertEqual(response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/1/')
def test_redirect_search(self):
request = self.factory.get(
'/eregulations/search/1002',
{'q': 'california', 'version': '2011-1'})
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/'
'search-regulations/results/?regs=1002&q=california')
def test_redirect_invalid_part(self):
request = self.factory.get(
'/eregulations/1020-1/2017-20417_20180101')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/')
def test_redirect_invalid_part_pattern(self):
request = self.factory.get(
'/eregulations/102/')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/')
def test_redirect_past_version(self):
request = self.factory.get(
'/eregulations/1002-1/2016-16301')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/2016-07-11/1/')
def test_redirect_past_version_not_live(self):
request = self.factory.get(
'/eregulations/1002-1/2013-22752_20140110')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/1/')
def test_redirect_interp_appendix(self):
request = self.factory.get(
'/eregulations/1002-Appendices-Interp/2016-16301')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/'
'1002/2016-07-11/Interp-C/')
def test_redirect_interp_appendix_invalid_date(self):
request = self.factory.get(
'/eregulations/1024-Appendices-Interp/2017-20417_20180101')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1024/Interp-MS/')
def test_redirect_interp_intro(self):
request = self.factory.get(
'/eregulations/1002-Interp-h1/2016-16301')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/'
'2016-07-11/h1-Interp/')
def test_redirect_interp_intro_bad_version(self):
request = self.factory.get(
'/eregulations/1030-Interp-h1/2011-31727')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1030/Interp-0/')
def test_redirect_interp_section_past(self):
request = self.factory.get(
'/eregulations/1002-Subpart-Interp/2016-16301')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/'
'2016-07-11/Interp-1/')
def test_redirect_interp_section_past_lowercase(self):
# troublemaker URL on launch day
request = self.factory.get(
'/eregulations/1002-subpart-interp/2011-31714')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/'
'2011-12-30/Interp-1/')
def test_interp_section_current(self):
request = self.factory.get(
'/eregulations/1002-Subpart-Interp/2017-20417_20180101')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/Interp-1/')
def test_interp_section_no_subpart_with_default_section(self):
# another launch troublemaker
request = self.factory.get(
'/eregulations/1005-Interp/2013-06861')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1005/'
'2013-03-26/Interp-2/')
def test_redirect_no_pattern_match_after_part(self):
"""This tests our final fall-through redirect"""
request = self.factory.get(
'/eregulations/1002/9999/')
response = redirect_eregs(request)
self.assertEqual(
response.get('location'),
'/policy-compliance/rulemaking/regulations/1002/')
def test_get_version_date_bad_doc_number(self):
part = '1002'
doc = '2015-16301'
self.assertIs(get_version_date(part, doc), None)
|
the-stack_106_13402
|
#######################################################################
# Copyright (C) 2017 Shangtong Zhang([email protected]) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import pickle
import os
def run_episodes(agent):
config = agent.config
window_size = 100
ep = 0
rewards = []
steps = []
avg_test_rewards = []
agent_type = agent.__class__.__name__
while True:
ep += 1
reward, step = agent.episode()
rewards.append(reward)
steps.append(step)
avg_reward = np.mean(rewards[-window_size:])
config.logger.info('episode %d, reward %f, avg reward %f, total steps %d, episode step %d' % (
ep, reward, avg_reward, agent.total_steps, step))
if config.save_interval and ep % config.save_interval == 0:
with open('data/%s-%s-online-stats-%s.bin' % (
agent_type, config.tag, agent.task.name), 'wb') as f:
pickle.dump([steps, rewards], f)
if config.episode_limit and ep > config.episode_limit:
break
if config.max_steps and agent.total_steps > config.max_steps:
break
if config.test_interval and ep % config.test_interval == 0:
config.logger.info('Testing...')
agent.save('data/%s-%s-model-%s.bin' % (agent_type, config.tag, agent.task.name))
test_rewards = []
for _ in range(config.test_repetitions):
test_rewards.append(agent.episode(True)[0])
avg_reward = np.mean(test_rewards)
avg_test_rewards.append(avg_reward)
config.logger.info('Avg reward %f(%f)' % (
avg_reward, np.std(test_rewards) / np.sqrt(config.test_repetitions)))
with open('data/%s-%s-all-stats-%s.bin' % (agent_type, config.tag, agent.task.name), 'wb') as f:
pickle.dump({'rewards': rewards,
'steps': steps,
'test_rewards': avg_test_rewards}, f)
if avg_reward > config.success_threshold:
break
agent.close()
return steps, rewards, avg_test_rewards
def run_iterations(agent):
config = agent.config
agent_type = agent.__class__.__name__
iteration = 0
while True:
agent.iteration()
if iteration % config.iteration_log_interval == 0:
config.logger.info('total steps %d, mean/max/min reward %f/%f/%f' % (
agent.total_steps, np.mean(agent.last_episode_rewards),
np.max(agent.last_episode_rewards),
np.min(agent.last_episode_rewards)
))
iteration += 1
def sync_grad(target_network, src_network):
for param, src_param in zip(target_network.parameters(), src_network.parameters()):
param._grad = src_param.grad.clone()
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
class Batcher:
def __init__(self, batch_size, data):
self.batch_size = batch_size
self.data = data
self.num_entries = len(data[0])
self.reset()
def reset(self):
self.batch_start = 0
self.batch_end = self.batch_start + self.batch_size
def end(self):
return self.batch_start >= self.num_entries
def next_batch(self):
batch = []
for d in self.data:
batch.append(d[self.batch_start: self.batch_end])
self.batch_start = self.batch_end
self.batch_end = min(self.batch_start + self.batch_size, self.num_entries)
return batch
def shuffle(self):
indices = np.arange(self.num_entries)
np.random.shuffle(indices)
self.data = [d[indices] for d in self.data]
|
the-stack_106_13403
|
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.y(input_qubit[2]) # number=5
prog.y(input_qubit[2]) # number=6
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy53.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_13405
|
#!/usr/bin/env python
import re
import logging
import argparse
import requests
from plexapi.myplex import MyPlexAccount
logging.basicConfig(format='%(message)s', level=logging.INFO)
logging.getLogger('plexapi').setLevel(logging.CRITICAL)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("kodi_api_url", type=str, help="Kodi API URL IE: http://192.168.0.190:8080")
parser.add_argument("plex_username", type=str, help="Plex Account Username")
parser.add_argument("plex_password", type=str, help="Plex Account Password")
parser.add_argument("plex_server_name", type=str, help="Plex Server Name IE: media")
def get_json(rsp):
rsp.raise_for_status()
data = rsp.json()
if 'error' in data:
raise Exception('Kodi API Error: %s', data['error']['message'])
return data.get('result', {})
def get_movies(api_url):
payload = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetMovies',
'filter': {'field': 'playcount', 'operator': 'greaterthan', 'value': '0'},
'params': {'properties': ['playcount', 'imdbnumber', 'lastplayed']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload))
return dict((m['imdbnumber'], m) for m in data.get('movies', []))
def get_tv(api_url):
tv_shows = {}
payload_tv = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetTVShows',
'params': {'properties': ['uniqueid']},
'id': 'libTVShows'
}
data = get_json(requests.post(api_url, json=payload_tv))
tv_shows_data = dict((m['tvshowid'], m) for m in data.get('tvshows', []))
payload_ep = {
'jsonrpc': '2.0', 'method': 'VideoLibrary.GetEpisodes',
'params': {'properties': ['season', 'episode', 'uniqueid', 'playcount', 'tvshowid']},
'id': 'libMovies'
}
data = get_json(requests.post(api_url, json=payload_ep))
for ep in data.get('episodes', []):
tvdb_id = tv_shows_data.get(ep['tvshowid'], {}).get('uniqueid', {}).get('unknown')
if not tvdb_id:
continue
if tvdb_id not in tv_shows:
tv_shows[tvdb_id] = {}
tv_show = tv_shows[tvdb_id]
if ep['season'] not in tv_show:
tv_show[ep['season']] = {}
tv_show_season = tv_show[ep['season']]
tv_show_season[ep['episode']] = ep
return tv_shows
if __name__ == '__main__':
args = parser.parse_args()
kodi_api_url = '%s/jsonrpc' % args.kodi_api_url.rstrip('/')
plex = None
try:
account = MyPlexAccount(args.plex_username, args.plex_password)
plex = account.resource(args.plex_server_name).connect()
except Exception as e:
log.exception('Error connecting to Plex %s' % str(e))
exit(1)
# TVShows
try:
log.info('Getting Kodi Episodes List')
kodi_episodes = get_tv(kodi_api_url)
log.info('Getting Plex TVShows')
plex_episodes = plex.library.section('TV Shows').search(unwatched=True, libtype='episode')
log.info('Sorting through Plex Episodes to detect watched from Kodi')
for epsiode in plex_episodes:
# Only support TheTVDB parsed shows
tvdb_match = re.search(r'thetvdb://([0-9]+)/', epsiode.guid)
if tvdb_match:
kodi_ep = kodi_episodes.get(tvdb_match.group(1), {}).get(epsiode.seasonNumber, {}).get(epsiode.index)
if kodi_ep:
if kodi_ep.get('playcount') > 0 and not epsiode.isWatched:
log.info('Marking epsiode %s S%sE%s as watched' %
(epsiode.grandparentTitle, epsiode.seasonNumber, epsiode.index))
epsiode.markWatched()
except Exception as e:
log.exception('Error processing TVShows %s' % str(e))
exit(1)
# Movies
try:
log.info('Getting Kodi Movie List')
kodi_movies = []
kodi_movies = get_movies(kodi_api_url)
log.info('Getting Plex Movies')
plex_movies = plex.library.section('Movies').search(unwatched=True)
log.info('Sorting through Plex Movies to detect watched from Kodi')
for movie in plex_movies:
# Only support IMDB parsed movies
imdb_match = re.search(r'((?:nm|tt)[\d]{7})', movie.guid)
if imdb_match:
imdb_id = imdb_match.group(1)
kodi_movie = kodi_movies.get(imdb_id)
if kodi_movie:
if kodi_movie.get('playcount') > 0 and not movie.isWatched:
log.info('Marking movie %s as watched' % movie.title)
movie.markWatched()
except Exception as e:
log.critical('Error processing Movies %s' % str(e))
exit(1)
|
the-stack_106_13406
|
import torch
def create_model(opt):
if opt.model == 'pix2pixHD':
from .pix2pixHD_model import Pix2PixHDModel, InferenceModel
if opt.isTrain:
model = Pix2PixHDModel()
else:
model = InferenceModel()
else:
from .ui_model import UIModel
model = UIModel()
model.initialize(opt)
if opt.verbose:
print("model [%s] was created" % (model.name()))
if opt.isTrain and len(opt.gpu_ids) and not opt.fp16:
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
return model
|
the-stack_106_13407
|
import scipy.io as sio
import numpy as np
import pickle
data_dict = {}
def get_pred(seq_idx, frame_idx):
global data_dict
seq_idx = seq_idx + 1
if not seq_idx in data_dict:
# data = sio.loadmat('./results/%d.mat'%seq_idx)['preds']
data = pickle.load(open('mupots/pred/%d.pkl'%seq_idx, 'rb'))
data_dict[seq_idx] = np.float32(data)
data = data_dict[seq_idx]
return data[frame_idx]
|
the-stack_106_13409
|
import gspread
import json
from docassemble.base.util import get_config
from oauth2client.service_account import ServiceAccountCredentials
credential_info = json.loads(get_config('google').get('service account credentials'), strict=False)
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
__all__ = ['read_sheet', 'append_to_sheet']
def read_sheet(sheet_key, worksheet_index):
creds = ServiceAccountCredentials.from_json_keyfile_dict(credential_info, scope)
client = gspread.authorize(creds)
sheet = client.open_by_key(sheet_key).get_worksheet(worksheet_index)
return sheet.get_all_records()
def append_to_sheet(sheet_key, vals, worksheet_index=0):
creds = ServiceAccountCredentials.from_json_keyfile_dict(credential_info, scope)
client = gspread.authorize(creds)
sheet = client.open_by_key(sheet_key).get_worksheet(worksheet_index)
sheet.append_row(vals)
|
the-stack_106_13410
|
# -*- encoding: utf-8 -*-
from ..libs.sublimefunctions import *
from .appcommand import AppCommand
class FmOpenInBrowserCommand(AppCommand):
def run(self, paths=None, *args, **kwargs):
self.window = get_window()
self.view = get_view()
if paths is None:
paths = [self.view.file_name()]
url = self.view.settings().get("url")
if url is not None:
url = url.strip("/")
files = []
folders = self.window.folders()
for path in paths:
if url is None:
sublime.run_command("open_url", {"url": "file:///" + path})
else:
for folder in folders:
if folder in path:
if os.path.splitext(os.path.basename(path))[0] == "index":
path = os.path.dirname(path)
sublime.run_command(
"open_url", {"url": url + path.replace(folder, "")}
)
break
else:
sublime.run_command("open_url", {"url": "file:///" + path})
|
the-stack_106_13413
|
#!/usr/bin/env python
# encoding: utf-8
import logging
import argparse
import os
from init_serial import initSerial, closeSerial
import constants
parser = argparse.ArgumentParser(description='informs ESP32 about the ended game.')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument(
'-s', '--system',
help='the system (eg: atari2600, nes, snes, megadrive, fba, etc)',
required=True
)
requiredNamed.add_argument(
'-e', '--emulator',
help='the emulator (eg: lr-stella, lr-fceumm, lr-picodrive, pifba, etc)',
required=True
)
requiredNamed.add_argument(
'-r', '--rom_file',
help='the full path to the rom file (/home/pi/RetroPie/roms/mame-libretro/4player/bbmanw.zip)',
required=True
)
requiredNamed.add_argument(
'-c', '--command',
help='the full command line used to launch the emulator',
required=True
)
parser.add_argument(
'-d', '--debug',
help="Print debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
'-v', '--verbose',
help="Print verbose",
action="store_const", dest="loglevel", const=logging.INFO,
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(message)s', level=args.loglevel)
log = logging.getLogger(__name__)
game = os.path.basename(args.rom_file)
def main(ser):
ser.write('END_GAME ' + game + constants.END)
if __name__ == '__main__':
ser = initSerial(log)
if ser:
main(ser)
closeSerial(ser)
log.info('done.')
|
the-stack_106_13416
|
# Copyright (c) 2016, Will Thames and contributors
# Copyright (c) 2018, Ansible Project
from ansiblelint.rules import AnsibleLintRule
META_STR_INFO = (
'author',
'description'
)
META_INFO = tuple(list(META_STR_INFO) + [
'license',
'min_ansible_version',
'platforms',
])
def _platform_info_errors_itr(platforms):
if not isinstance(platforms, list):
yield 'Platforms should be a list of dictionaries'
return
for platform in platforms:
if not isinstance(platform, dict):
yield 'Platforms should be a list of dictionaries'
elif 'name' not in platform:
yield 'Platform should contain name'
def _galaxy_info_errors_itr(galaxy_info,
info_list=META_INFO,
str_info_list=META_STR_INFO):
for info in info_list:
ginfo = galaxy_info.get(info, False)
if ginfo:
if info in str_info_list and not isinstance(ginfo, str):
yield '{info} should be a string'.format(info=info)
elif info == 'platforms':
for err in _platform_info_errors_itr(ginfo):
yield err
else:
yield 'Role info should contain {info}'.format(info=info)
class MetaMainHasInfoRule(AnsibleLintRule):
id = '701'
shortdesc = 'meta/main.yml should contain relevant info'
str_info = META_STR_INFO
info = META_INFO
description = (
'meta/main.yml should contain: ``{}``'.format(', '.join(info))
)
severity = 'HIGH'
tags = ['metadata']
version_added = 'v4.0.0'
def matchplay(self, file, data):
if file['type'] != 'meta':
return False
meta = {'meta/main.yml': data}
galaxy_info = data.get('galaxy_info', False)
if galaxy_info:
return [(meta, err) for err
in _galaxy_info_errors_itr(galaxy_info)]
return [(meta, "No 'galaxy_info' found")]
|
the-stack_106_13417
|
def find(word, letter, start):
"""Searches word for letter, starting at index start,
returns index of first hit or -1 if letter not found"""
index = start
while index < len(word):
if word[index] == letter:
return index
index = index + 1
return -1
def count(word, letter):
count = 0
for c in word:
if c == letter:
count = count + 1
print(count)
def revised_count(word, letter):
count = 0
index = 0
while True:
result = find(word, letter, index)
if result == -1:
return count
count = count + 1
index = result + 1
|
the-stack_106_13419
|
'''
Defines VGG 16 Model
'''
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers as lyrs
conv_params = {
"padding": "same",
"kernel_initializer": tf.keras.initializers.glorot_normal(),
"bias_initializer": "zeros",
"kernel_regularizer": tf.keras.regularizers.l2(0.0001)
}
class VGG16(tf.keras.Model):
'''Defines the VGG 16 Model.
Note that the model expects a 224x224 Input Image.
Args:
input_size (tuple): Tuple of the input size
activation (tf.activation): Activation function to use, default `tf.nn.relu`
'''
def __init__(self, input_size=(224, 224), activation=tf.nn.relu):
super(VGG16, self).__init__()
# define the relevant data
self.conv3_1 = lyrs.Conv2D(64, (3, 3), 1, **conv_params, activation=activation, name="conv3_1")
self.conv3_2 = lyrs.Conv2D(64, (3, 3), 1, **conv_params, activation=activation, name="conv3_2")
self.pool2_1 = lyrs.MaxPool2D((2, 2), (2, 2), 'same', name="pool2_1")
self.conv3_3 = lyrs.Conv2D(128, (3, 3), 1, **conv_params, activation=activation, name="conv3_3")
self.conv3_4 = lyrs.Conv2D(128, (3, 3), 1, **conv_params, activation=activation, name="conv3_4")
self.pool2_2 = lyrs.MaxPool2D((2, 2), (2, 2), 'same', name="pool2_2")
self.conv3_5 = lyrs.Conv2D(256, (3, 3), 1, **conv_params, activation=activation, name="conv3_5")
self.conv3_6 = lyrs.Conv2D(256, (3, 3), 1, **conv_params, activation=activation, name="conv3_6")
self.conv1_7 = lyrs.Conv2D(256, (1, 1), 1, **conv_params, activation=activation, name="conv3_7")
self.pool2_3 = lyrs.MaxPool2D((2, 2), (2, 2), 'same', name="pool2_3")
self.conv3_8 = lyrs.Conv2D(512, (3, 3), 1, **conv_params, activation=activation, name="conv3_8")
self.conv3_9 = lyrs.Conv2D(512, (3, 3), 1, **conv_params, activation=activation, name="conv3_9")
self.conv1_10 = lyrs.Conv2D(512, (1, 1), 1, **conv_params, activation=activation, name="conv3_10")
self.pool2_4 = lyrs.MaxPool2D((2, 2), (2, 2), 'same', name="pool2_4")
self.conv3_11 = lyrs.Conv2D(512, (3, 3), 1, **conv_params, activation=activation, name="conv3_11")
self.conv3_12 = lyrs.Conv2D(512, (3, 3), 1, **conv_params, activation=activation, name="conv3_12")
self.conv1_13 = lyrs.Conv2D(512, (1, 1), 1, **conv_params, activation=activation, name="conv3_13")
self.pool2_5 = lyrs.MaxPool2D((2, 2), (2, 2), 'same', name="pool2_5")
self.reshape = lyrs.Reshape((np.ceil(input_size[0] / 32) * np.ceil(input_size[1] / 32) * 512, ))
self.fc_14 = lyrs.Dense(4096, activation=activation, name="fc_14")
self.fc_15 = lyrs.Dense(4096, activation=activation, name="fc_15")
# NOTE: last layer will be created by the head (dense and softmax)
def call(self, input_tensor, training=False):
# build all the blocks accordingly
b1 = self.pool2_1(self.conv3_2(self.conv3_1(input_tensor)))
b2 = self.pool2_2(self.conv3_4(self.conv3_3(b1)))
b3 = self.pool2_3(self.conv1_7(self.conv3_6(self.conv3_5(b2))))
b4 = self.pool2_4(self.conv1_10(self.conv3_9(self.conv3_8(b3))))
b5 = self.pool2_5(self.conv1_13(self.conv3_12(self.conv3_11(b4))))
b5 = self.reshape(b5)
out = self.fc_15(self.fc_14(b5))
return out
|
the-stack_106_13420
|
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
from catkin_pkg.packages import find_packages
from catkin_tools.argument_parsing import add_context_args
from catkin_tools.context import Context
from catkin_tools.metadata import update_metadata
from catkin_tools.terminal_color import ColorMapper
color_mapper = ColorMapper()
clr = color_mapper.clr
# Exempt build directories
# See https://github.com/catkin/catkin_tools/issues/82
exempt_build_files = ['build_logs', '.built_by', '.catkin_tools.yaml']
setup_files = ['.catkin', 'env.sh', 'setup.bash', 'setup.sh', 'setup.zsh', '_setup_util.py']
def prepare_arguments(parser):
# Workspace / profile args
add_context_args(parser)
# Basic group
basic_group = parser.add_argument_group('Basic', 'Clean workspace subdirectories.')
add = basic_group.add_argument
add('-a', '--all', action='store_true', default=False,
help='Remove all of the *spaces associated with the given or active'
' profile. This will remove everything but the source space and the'
' hidden .catkin_tools directory.')
add('-b', '--build', action='store_true', default=False,
help='Remove the buildspace.')
add('-d', '--devel', action='store_true', default=False,
help='Remove the develspace.')
add('-i', '--install', action='store_true', default=False,
help='Remove the installspace.')
# Advanced group
advanced_group = parser.add_argument_group(
'Advanced',
"Clean only specific parts of the workspace. These options will "
"automatically enable the --force-cmake option for the next build "
"invocation.")
add = advanced_group.add_argument
add('-c', '--cmake-cache', action='store_true', default=False,
help='Clear the CMakeCache for each package, but leave build and devel spaces.')
add('-s', '--setup-files', action='store_true', default=False,
help='Clear the catkin-generated files in order to rebase onto another workspace.')
add('-o', '--orphans', action='store_true', default=False,
help='Remove only build directories whose source packages are no'
' longer enabled or in the source space. This might require'
' --force-cmake on the next build.')
return parser
def main(opts):
actions = ['all', 'build', 'devel', 'install', 'cmake_cache', 'orphans', 'setup_files']
if not any([v for (k, v) in vars(opts).items() if k in actions]):
print("[clean] No actions performed. See `catkin clean -h` for usage.")
return 0
needs_force = False
# Load the context
ctx = Context.load(opts.workspace, opts.profile, opts, strict=True, load_env=False)
if not ctx:
if not opts.workspace:
print(
"catkin clean: error: The current or desired workspace could not be "
"determined. Please run `catkin clean` from within a catkin "
"workspace or specify the workspace explicitly with the "
"`--workspace` option.")
else:
print(
"catkin clean: error: Could not clean workspace \"%s\" because it "
"either does not exist or it has no catkin_tools metadata." %
opts.workspace)
return 1
# Remove the requested spaces
if opts.all:
opts.build = opts.devel = opts.install = True
if opts.build:
if os.path.exists(ctx.build_space_abs):
print("[clean] Removing buildspace: %s" % ctx.build_space_abs)
shutil.rmtree(ctx.build_space_abs)
else:
# Orphan removal
if opts.orphans:
if os.path.exists(ctx.build_space_abs):
# TODO: Check for merged build and report error
# Get all enabled packages in source space
# Suppress warnings since this is looking for packages which no longer exist
found_source_packages = [
pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()]
# Iterate over all packages with build dirs
print("[clean] Removing orphaned build directories from %s" % ctx.build_space_abs)
no_orphans = True
for pkg_build_name in os.listdir(ctx.build_space_abs):
if pkg_build_name not in exempt_build_files:
pkg_build_path = os.path.join(ctx.build_space_abs, pkg_build_name)
# Remove package build dir if not found
if pkg_build_name not in found_source_packages:
no_orphans = False
print(" - Removing %s" % pkg_build_path)
shutil.rmtree(pkg_build_path)
if no_orphans:
print("[clean] No orphans found, nothing removed from buildspace.")
else:
# Remove the develspace
# TODO: For isolated devel, this could just remove individual packages
if os.path.exists(ctx.devel_space_abs):
print("Removing develspace: %s" % ctx.devel_space_abs)
shutil.rmtree(ctx.devel_space_abs)
needs_force = True
else:
print("[clean] No buildspace exists, no potential for orphans.")
return 0
# CMake Cache removal
if opts.cmake_cache:
# Clear the CMakeCache for each package
if os.path.exists(ctx.build_space_abs):
# Remove CMakeCaches
print("[clean] Removing CMakeCache.txt files from %s" % ctx.build_space_abs)
for pkg_build_name in os.listdir(ctx.build_space_abs):
if pkg_build_name not in exempt_build_files:
pkg_build_path = os.path.join(ctx.build_space_abs, pkg_build_name)
ccache_path = os.path.join(pkg_build_path, 'CMakeCache.txt')
if os.path.exists(ccache_path):
print(" - Removing %s" % ccache_path)
os.remove(ccache_path)
needs_force = True
else:
print("[clean] No buildspace exists, no CMake caches to clear.")
if opts.devel:
if os.path.exists(ctx.devel_space_abs):
print("[clean] Removing develspace: %s" % ctx.devel_space_abs)
shutil.rmtree(ctx.devel_space_abs)
else:
if opts.setup_files:
print("[clean] Removing setup files from develspace: %s" % ctx.devel_space_abs)
for filename in setup_files:
full_path = os.path.join(ctx.devel_space_abs, filename)
if os.path.exists(full_path):
print(" - Removing %s" % full_path)
os.remove(full_path)
needs_force = True
if opts.install:
if os.path.exists(ctx.install_space_abs):
print("[clean] Removing installspace: %s" % ctx.install_space_abs)
shutil.rmtree(ctx.install_space_abs)
if needs_force:
print(
"NOTE: Parts of the workspace have been cleaned which will "
"necessitate re-configuring CMake on the next build.")
update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True})
return 0
|
the-stack_106_13422
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PubSub verifier used for end-to-end test."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import time
from collections import Counter
from hamcrest.core.base_matcher import BaseMatcher
from apache_beam.io.gcp.pubsub import PubsubMessage
__all__ = ['PubSubMessageMatcher']
# Protect against environments where pubsub library is not available.
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
DEFAULT_TIMEOUT = 5 * 60
DEFAULT_SLEEP_TIME = 1
DEFAULT_MAX_MESSAGES_IN_ONE_PULL = 50
DEFAULT_PULL_TIMEOUT = 30.0
_LOGGER = logging.getLogger(__name__)
class PubSubMessageMatcher(BaseMatcher):
"""Matcher that verifies messages from given subscription.
This matcher can block the test and keep pulling messages from given
subscription until all expected messages are shown or timeout.
"""
def __init__(
self,
project,
sub_name,
expected_msg=None,
expected_msg_len=None,
timeout=DEFAULT_TIMEOUT,
with_attributes=False,
strip_attributes=None,
sleep_time=DEFAULT_SLEEP_TIME,
max_messages_in_one_pull=DEFAULT_MAX_MESSAGES_IN_ONE_PULL,
pull_timeout=DEFAULT_PULL_TIMEOUT):
"""Initialize PubSubMessageMatcher object.
Args:
project: A name string of project.
sub_name: A name string of subscription which is attached to output.
expected_msg: A string list that contains expected message data pulled
from the subscription. See also: with_attributes.
expected_msg_len: Number of expected messages pulled from the
subscription.
timeout: Timeout in seconds to wait for all expected messages appears.
with_attributes: If True, will match against both message data and
attributes. If True, expected_msg should be a list of ``PubsubMessage``
objects. Otherwise, it should be a list of ``bytes``.
strip_attributes: List of strings. If with_attributes==True, strip the
attributes keyed by these values from incoming messages.
If a key is missing, will add an attribute with an error message as
value to prevent a successful match.
sleep_time: Time in seconds between which the pulls from pubsub are done.
max_messages_in_one_pull: Maximum number of messages pulled from pubsub
at once.
pull_timeout: Time in seconds after which the pull from pubsub is repeated
"""
if pubsub is None:
raise ImportError('PubSub dependencies are not installed.')
if not project:
raise ValueError('Invalid project %s.' % project)
if not sub_name:
raise ValueError('Invalid subscription %s.' % sub_name)
if not expected_msg_len and not expected_msg:
raise ValueError(
'Required expected_msg: {} or expected_msg_len: {}.'.format(
expected_msg, expected_msg_len))
if expected_msg and not isinstance(expected_msg, list):
raise ValueError('Invalid expected messages %s.' % expected_msg)
if expected_msg_len and not isinstance(expected_msg_len, int):
raise ValueError('Invalid expected messages %s.' % expected_msg_len)
self.project = project
self.sub_name = sub_name
self.expected_msg = expected_msg
self.expected_msg_len = expected_msg_len or len(self.expected_msg)
self.timeout = timeout
self.messages = None
self.with_attributes = with_attributes
self.strip_attributes = strip_attributes
self.sleep_time = sleep_time
self.max_messages_in_one_pull = max_messages_in_one_pull
self.pull_timeout = pull_timeout
def _matches(self, _):
if self.messages is None:
self.messages = self._wait_for_messages(
self.expected_msg_len, self.timeout)
if self.expected_msg:
return Counter(self.messages) == Counter(self.expected_msg)
else:
return len(self.messages) == self.expected_msg_len
def _wait_for_messages(self, expected_num, timeout):
"""Wait for messages from given subscription."""
total_messages = []
sub_client = pubsub.SubscriberClient()
start_time = time.time()
while time.time() - start_time <= timeout:
response = sub_client.pull(
self.sub_name,
max_messages=self.max_messages_in_one_pull,
return_immediately=True,
timeout=self.pull_timeout)
for rm in response.received_messages:
msg = PubsubMessage._from_message(rm.message)
if not self.with_attributes:
total_messages.append(msg.data)
continue
if self.strip_attributes:
for attr in self.strip_attributes:
try:
del msg.attributes[attr]
except KeyError:
msg.attributes[attr] = (
'PubSubMessageMatcher error: '
'expected attribute not found.')
total_messages.append(msg)
ack_ids = [rm.ack_id for rm in response.received_messages]
if ack_ids:
sub_client.acknowledge(self.sub_name, ack_ids)
if len(total_messages) >= expected_num:
break
time.sleep(self.sleep_time)
if time.time() - start_time > timeout:
_LOGGER.error(
'Timeout after %d sec. Received %d messages from %s.',
timeout,
len(total_messages),
self.sub_name)
return total_messages
def describe_to(self, description):
description.append_text('Expected %d messages.' % self.expected_msg_len)
def describe_mismatch(self, _, mismatch_description):
c_expected = Counter(self.expected_msg)
c_actual = Counter(self.messages)
mismatch_description.append_text("Got %d messages. " % (len(self.messages)))
if self.expected_msg:
mismatch_description.append_text(
"Diffs (item, count):\n"
" Expected but not in actual: %s\n"
" Unexpected: %s" % ((c_expected - c_actual).items(),
(c_actual - c_expected).items()))
if self.with_attributes and self.strip_attributes:
mismatch_description.append_text(
'\n Stripped attributes: %r' % self.strip_attributes)
|
the-stack_106_13423
|
import datetime
import calendar
import logging
import time
import numbers
import pytz
from sqlalchemy import distinct, or_, and_, UniqueConstraint, cast
from sqlalchemy.dialects import postgresql
from sqlalchemy.event import listens_for
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref, contains_eager, joinedload, subqueryload, load_only
from sqlalchemy.orm.exc import NoResultFound # noqa: F401
from sqlalchemy import func
from sqlalchemy_utils import generic_relationship
from sqlalchemy_utils.types import TSVectorType
from sqlalchemy_utils.models import generic_repr
from sqlalchemy_utils.types.encrypted.encrypted_type import FernetEngine
from redash import redis_connection, utils, settings
from redash.destinations import (
get_configuration_schema_for_destination_type,
get_destination,
)
from redash.metrics import database # noqa: F401
from redash.query_runner import (
with_ssh_tunnel,
get_configuration_schema_for_query_runner_type,
get_query_runner,
TYPE_BOOLEAN,
TYPE_DATE,
TYPE_DATETIME,
BaseQueryRunner)
from redash.utils import (
generate_token,
json_dumps,
json_loads,
mustache_render,
base_url,
sentry,
gen_query_hash)
from redash.utils.configuration import ConfigurationContainer
from redash.models.parameterized_query import ParameterizedQuery
from .base import db, gfk_type, Column, GFKBase, SearchBaseQuery, key_type, primary_key
from .changes import ChangeTrackingMixin, Change # noqa
from .mixins import BelongsToOrgMixin, TimestampMixin
from .organizations import Organization
from .types import (
EncryptedConfiguration,
Configuration,
MutableDict,
MutableList,
PseudoJSON,
pseudo_json_cast_property
)
from .users import AccessPermission, AnonymousUser, ApiUser, Group, User # noqa
logger = logging.getLogger(__name__)
class ScheduledQueriesExecutions(object):
KEY_NAME = "sq:executed_at"
def __init__(self):
self.executions = {}
def refresh(self):
self.executions = redis_connection.hgetall(self.KEY_NAME)
def update(self, query_id):
redis_connection.hmset(self.KEY_NAME, {query_id: time.time()})
def get(self, query_id):
timestamp = self.executions.get(str(query_id))
if timestamp:
timestamp = utils.dt_from_timestamp(timestamp)
return timestamp
scheduled_queries_executions = ScheduledQueriesExecutions()
@generic_repr("id", "name", "type", "org_id", "created_at")
class DataSource(BelongsToOrgMixin, db.Model):
id = primary_key("DataSource")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, backref="data_sources")
name = Column(db.String(255))
type = Column(db.String(255))
options = Column(
"encrypted_options",
ConfigurationContainer.as_mutable(
EncryptedConfiguration(
db.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
)
),
)
queue_name = Column(db.String(255), default="queries")
scheduled_queue_name = Column(db.String(255), default="scheduled_queries")
created_at = Column(db.DateTime(True), default=db.func.now())
data_source_groups = db.relationship(
"DataSourceGroup", back_populates="data_source", cascade="all"
)
__tablename__ = "data_sources"
__table_args__ = (db.Index("data_sources_org_id_name", "org_id", "name"),)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def to_dict(self, all=False, with_permissions_for=None):
d = {
"id": self.id,
"name": self.name,
"type": self.type,
"syntax": self.query_runner.syntax,
"paused": self.paused,
"pause_reason": self.pause_reason,
"supports_auto_limit": self.query_runner.supports_auto_limit
}
if all:
schema = get_configuration_schema_for_query_runner_type(self.type)
self.options.set_schema(schema)
d["options"] = self.options.to_dict(mask_secrets=True)
d["queue_name"] = self.queue_name
d["scheduled_queue_name"] = self.scheduled_queue_name
d["groups"] = self.groups
if with_permissions_for is not None:
d["view_only"] = (
db.session.query(DataSourceGroup.view_only)
.filter(
DataSourceGroup.group == with_permissions_for,
DataSourceGroup.data_source == self,
)
.one()[0]
)
return d
def __str__(self):
return str(self.name)
@classmethod
def create_with_group(cls, *args, **kwargs):
data_source = cls(*args, **kwargs)
data_source_group = DataSourceGroup(
data_source=data_source, group=data_source.org.default_group
)
db.session.add_all([data_source, data_source_group])
return data_source
@classmethod
def all(cls, org, group_ids=None):
data_sources = cls.query.filter(cls.org == org).order_by(cls.id.asc())
if group_ids:
data_sources = data_sources.join(DataSourceGroup).filter(
DataSourceGroup.group_id.in_(group_ids)
)
return data_sources.distinct()
@classmethod
def get_by_id(cls, _id):
return cls.query.filter(cls.id == _id).one()
def delete(self):
Query.query.filter(Query.data_source == self).update(
dict(data_source_id=None, latest_query_data_id=None)
)
QueryResult.query.filter(QueryResult.data_source == self).delete()
res = db.session.delete(self)
db.session.commit()
redis_connection.delete(self._schema_key)
return res
def get_cached_schema(self):
cache = redis_connection.get(self._schema_key)
return json_loads(cache) if cache else None
def get_schema(self, refresh=False):
out_schema = None
if not refresh:
out_schema = self.get_cached_schema()
if out_schema is None:
query_runner = self.query_runner
schema = query_runner.get_schema(get_stats=refresh)
try:
out_schema = self._sort_schema(schema)
except Exception:
logging.exception(
"Error sorting schema columns for data_source {}".format(self.id)
)
out_schema = schema
finally:
redis_connection.set(self._schema_key, json_dumps(out_schema))
return out_schema
def _sort_schema(self, schema):
return [
{"name": i["name"], "columns": sorted(i["columns"], key=lambda x: x["name"] if isinstance(x, dict) else x)}
for i in sorted(schema, key=lambda x: x["name"])
]
@property
def _schema_key(self):
return "data_source:schema:{}".format(self.id)
@property
def _pause_key(self):
return "ds:{}:pause".format(self.id)
@property
def paused(self):
return redis_connection.exists(self._pause_key)
@property
def pause_reason(self):
return redis_connection.get(self._pause_key)
def pause(self, reason=None):
redis_connection.set(self._pause_key, reason or "")
def resume(self):
redis_connection.delete(self._pause_key)
def add_group(self, group, view_only=False):
dsg = DataSourceGroup(group=group, data_source=self, view_only=view_only)
db.session.add(dsg)
return dsg
def remove_group(self, group):
DataSourceGroup.query.filter(
DataSourceGroup.group == group, DataSourceGroup.data_source == self
).delete()
db.session.commit()
def update_group_permission(self, group, view_only):
dsg = DataSourceGroup.query.filter(
DataSourceGroup.group == group, DataSourceGroup.data_source == self
).one()
dsg.view_only = view_only
db.session.add(dsg)
return dsg
@property
def uses_ssh_tunnel(self):
return "ssh_tunnel" in self.options
@property
def query_runner(self):
query_runner = get_query_runner(self.type, self.options)
if self.uses_ssh_tunnel:
query_runner = with_ssh_tunnel(query_runner, self.options.get("ssh_tunnel"))
return query_runner
@classmethod
def get_by_name(cls, name):
return cls.query.filter(cls.name == name).one()
# XXX examine call sites to see if a regular SQLA collection would work better
@property
def groups(self):
groups = DataSourceGroup.query.filter(DataSourceGroup.data_source == self)
return dict([(group.group_id, group.view_only) for group in groups])
@generic_repr("id", "data_source_id", "group_id", "view_only")
class DataSourceGroup(db.Model):
# XXX drop id, use datasource/group as PK
id = primary_key("DataSourceGroup")
data_source_id = Column(key_type("DataSource"), db.ForeignKey("data_sources.id"))
data_source = db.relationship(DataSource, back_populates="data_source_groups")
group_id = Column(key_type("Group"), db.ForeignKey("groups.id"))
group = db.relationship(Group, back_populates="data_sources")
view_only = Column(db.Boolean, default=False)
__tablename__ = "data_source_groups"
DESERIALIZED_DATA_ATTR = "_deserialized_data"
class DBPersistence(object):
@property
def data(self):
if self._data is None:
return None
if not hasattr(self, DESERIALIZED_DATA_ATTR):
setattr(self, DESERIALIZED_DATA_ATTR, json_loads(self._data))
return self._deserialized_data
@data.setter
def data(self, data):
if hasattr(self, DESERIALIZED_DATA_ATTR):
delattr(self, DESERIALIZED_DATA_ATTR)
self._data = data
QueryResultPersistence = (
settings.dynamic_settings.QueryResultPersistence or DBPersistence
)
@generic_repr("id", "org_id", "data_source_id", "query_hash", "runtime", "retrieved_at")
class QueryResult(db.Model, QueryResultPersistence, BelongsToOrgMixin):
id = primary_key("QueryResult")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization)
data_source_id = Column(key_type("DataSource"), db.ForeignKey("data_sources.id"))
data_source = db.relationship(DataSource, backref=backref("query_results"))
query_hash = Column(db.String(32), index=True)
query_text = Column("query", db.Text)
_data = Column("data", db.Text)
runtime = Column(postgresql.DOUBLE_PRECISION)
retrieved_at = Column(db.DateTime(True))
__tablename__ = "query_results"
def __str__(self):
return "%d | %s | %s" % (self.id, self.query_hash, self.retrieved_at)
def to_dict(self):
return {
"id": self.id,
"query_hash": self.query_hash,
"query": self.query_text,
"data": self.data,
"data_source_id": self.data_source_id,
"runtime": self.runtime,
"retrieved_at": self.retrieved_at,
}
@classmethod
def unused(cls, days=7):
age_threshold = datetime.datetime.now() - datetime.timedelta(days=days)
return (
cls.query.filter(
Query.id.is_(None), cls.retrieved_at < age_threshold
).outerjoin(Query)
).options(load_only("id"))
@classmethod
def get_latest(cls, data_source, query, max_age=0):
query_hash = gen_query_hash(query)
if max_age == -1:
query = cls.query.filter(
cls.query_hash == query_hash, cls.data_source == data_source
)
else:
query = cls.query.filter(
cls.query_hash == query_hash,
cls.data_source == data_source,
(
db.func.timezone("utc", cls.retrieved_at)
+ datetime.timedelta(seconds=max_age)
>= db.func.timezone("utc", db.func.now())
),
)
return query.order_by(cls.retrieved_at.desc()).first()
@classmethod
def store_result(
cls, org, data_source, query_hash, query, data, run_time, retrieved_at
):
query_result = cls(
org_id=org,
query_hash=query_hash,
query_text=query,
runtime=run_time,
data_source=data_source,
retrieved_at=retrieved_at,
data=data,
)
db.session.add(query_result)
logging.info("Inserted query (%s) data; id=%s", query_hash, query_result.id)
return query_result
@property
def groups(self):
return self.data_source.groups
def should_schedule_next(
previous_iteration, now, interval, time=None, day_of_week=None, failures=0
):
# if time exists then interval > 23 hours (82800s)
# if day_of_week exists then interval > 6 days (518400s)
if time is None:
ttl = int(interval)
next_iteration = previous_iteration + datetime.timedelta(seconds=ttl)
else:
hour, minute = time.split(":")
hour, minute = int(hour), int(minute)
# The following logic is needed for cases like the following:
# - The query scheduled to run at 23:59.
# - The scheduler wakes up at 00:01.
# - Using naive implementation of comparing timestamps, it will skip the execution.
normalized_previous_iteration = previous_iteration.replace(
hour=hour, minute=minute
)
if normalized_previous_iteration > previous_iteration:
previous_iteration = normalized_previous_iteration - datetime.timedelta(
days=1
)
days_delay = int(interval) / 60 / 60 / 24
days_to_add = 0
if day_of_week is not None:
days_to_add = (
list(calendar.day_name).index(day_of_week)
- normalized_previous_iteration.weekday()
)
next_iteration = (
previous_iteration
+ datetime.timedelta(days=days_delay)
+ datetime.timedelta(days=days_to_add)
).replace(hour=hour, minute=minute)
if failures:
try:
next_iteration += datetime.timedelta(minutes=2 ** failures)
except OverflowError:
return False
return now > next_iteration
@gfk_type
@generic_repr(
"id",
"name",
"query_hash",
"version",
"user_id",
"org_id",
"data_source_id",
"query_hash",
"last_modified_by_id",
"is_archived",
"is_draft",
"schedule",
"schedule_failures",
)
class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
id = primary_key("Query")
version = Column(db.Integer, default=1)
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, backref="queries")
data_source_id = Column(key_type("DataSource"), db.ForeignKey("data_sources.id"), nullable=True)
data_source = db.relationship(DataSource, backref="queries")
latest_query_data_id = Column(
key_type("QueryResult"), db.ForeignKey("query_results.id"), nullable=True
)
latest_query_data = db.relationship(QueryResult)
name = Column(db.String(255))
description = Column(db.String(4096), nullable=True)
query_text = Column("query", db.Text)
query_hash = Column(db.String(32))
api_key = Column(db.String(40), default=lambda: generate_token(40))
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User, foreign_keys=[user_id])
last_modified_by_id = Column(key_type("User"), db.ForeignKey("users.id"), nullable=True)
last_modified_by = db.relationship(
User, backref="modified_queries", foreign_keys=[last_modified_by_id]
)
is_archived = Column(db.Boolean, default=False, index=True)
is_draft = Column(db.Boolean, default=True, index=True)
schedule = Column(MutableDict.as_mutable(PseudoJSON), nullable=True)
interval = pseudo_json_cast_property(db.Integer, "schedule", "interval", default=0)
schedule_failures = Column(db.Integer, default=0)
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
search_vector = Column(
TSVectorType(
"id",
"name",
"description",
"query",
weights={"name": "A", "id": "B", "description": "C", "query": "D"},
),
nullable=True,
)
tags = Column(
"tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True
)
query_class = SearchBaseQuery
__tablename__ = "queries"
__mapper_args__ = {"version_id_col": version, "version_id_generator": False}
def __str__(self):
return str(self.id)
def archive(self, user=None):
db.session.add(self)
self.is_archived = True
self.schedule = None
for vis in self.visualizations:
for w in vis.widgets:
db.session.delete(w)
for a in self.alerts:
db.session.delete(a)
if user:
self.record_changes(user)
def regenerate_api_key(self):
self.api_key = generate_token(40)
@classmethod
def create(cls, **kwargs):
query = cls(**kwargs)
db.session.add(
Visualization(
query_rel=query,
name="表格",
description="",
type="TABLE",
options="{}",
)
)
return query
@classmethod
def all_queries(
cls, group_ids, user_id=None, include_drafts=False, include_archived=False
):
query_ids = (
db.session.query(distinct(cls.id))
.join(
DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id
)
.filter(Query.is_archived.is_(include_archived))
.filter(DataSourceGroup.group_id.in_(group_ids))
)
queries = (
cls.query.options(
joinedload(Query.user),
joinedload(Query.latest_query_data).load_only(
"runtime", "retrieved_at"
),
)
.filter(cls.id.in_(query_ids))
# Adding outer joins to be able to order by relationship
.outerjoin(User, User.id == Query.user_id)
.outerjoin(QueryResult, QueryResult.id == Query.latest_query_data_id)
.options(
contains_eager(Query.user), contains_eager(Query.latest_query_data)
)
)
if not include_drafts:
queries = queries.filter(
or_(Query.is_draft.is_(False), Query.user_id == user_id)
)
return queries
@classmethod
def favorites(cls, user, base_query=None):
if base_query is None:
base_query = cls.all_queries(user.group_ids, user.id, include_drafts=True)
return base_query.join(
(
Favorite,
and_(Favorite.object_type == "Query", Favorite.object_id == Query.id),
)
).filter(Favorite.user_id == user.id)
@classmethod
def all_tags(cls, user, include_drafts=False):
queries = cls.all_queries(
group_ids=user.group_ids, user_id=user.id, include_drafts=include_drafts
)
tag_column = func.unnest(cls.tags).label("tag")
usage_count = func.count(1).label("usage_count")
query = (
db.session.query(tag_column, usage_count)
.group_by(tag_column)
.filter(Query.id.in_(queries.options(load_only("id"))))
.order_by(usage_count.desc())
)
return query
@classmethod
def by_user(cls, user):
return cls.all_queries(user.group_ids, user.id).filter(Query.user == user)
@classmethod
def by_api_key(cls, api_key):
return cls.query.filter(cls.api_key == api_key).one()
@classmethod
def past_scheduled_queries(cls):
now = utils.utcnow()
queries = Query.query.filter(Query.schedule.isnot(None)).order_by(Query.id)
return [
query
for query in queries
if query.schedule["until"] is not None
and pytz.utc.localize(
datetime.datetime.strptime(query.schedule["until"], "%Y-%m-%d")
)
<= now
]
@classmethod
def outdated_queries(cls):
queries = (
Query.query.options(
joinedload(Query.latest_query_data).load_only("retrieved_at")
)
.filter(Query.schedule.isnot(None))
.order_by(Query.id)
.all()
)
now = utils.utcnow()
outdated_queries = {}
scheduled_queries_executions.refresh()
for query in queries:
try:
if query.schedule.get("disabled"):
continue
if query.schedule["until"]:
schedule_until = pytz.utc.localize(
datetime.datetime.strptime(query.schedule["until"], "%Y-%m-%d")
)
if schedule_until <= now:
continue
retrieved_at = scheduled_queries_executions.get(query.id) or (
query.latest_query_data and query.latest_query_data.retrieved_at
)
if should_schedule_next(
retrieved_at or now,
now,
query.schedule["interval"],
query.schedule["time"],
query.schedule["day_of_week"],
query.schedule_failures,
):
key = "{}:{}".format(query.query_hash, query.data_source_id)
outdated_queries[key] = query
except Exception as e:
query.schedule["disabled"] = True
db.session.commit()
message = (
"如果不能确定查询 %d 超时原因 %s,自动刷新将会停用。"
% (query.id, repr(e))
)
logging.info(message)
sentry.capture_exception(
type(e)(message).with_traceback(e.__traceback__)
)
return list(outdated_queries.values())
@classmethod
def search(
cls,
term,
group_ids,
user_id=None,
include_drafts=False,
limit=None,
include_archived=False,
multi_byte_search=False,
):
all_queries = cls.all_queries(
group_ids,
user_id=user_id,
include_drafts=include_drafts,
include_archived=include_archived,
)
if multi_byte_search:
# Since tsvector doesn't work well with CJK languages, use `ilike` too
pattern = "%{}%".format(term)
return (
all_queries.filter(
or_(cls.name.ilike(pattern), cls.description.ilike(pattern))
)
.order_by(Query.id)
.limit(limit)
)
# sort the result using the weight as defined in the search vector column
return all_queries.search(term, sort=True).limit(limit)
@classmethod
def search_by_user(cls, term, user, limit=None):
return cls.by_user(user).search(term, sort=True).limit(limit)
@classmethod
def recent(cls, group_ids, user_id=None, limit=20):
query = (
cls.query.filter(Event.created_at > (db.func.current_date() - 7))
.join(Event, Query.id == Event.object_id.cast(db.Integer))
.join(
DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id
)
.filter(
Event.action.in_(
["edit", "execute", "edit_name", "edit_description", "view_source"]
),
Event.object_id != None,
Event.object_type == "query",
DataSourceGroup.group_id.in_(group_ids),
or_(Query.is_draft == False, Query.user_id == user_id),
Query.is_archived == False,
)
.group_by(Event.object_id, Query.id)
.order_by(db.desc(db.func.count(0)))
)
if user_id:
query = query.filter(Event.user_id == user_id)
query = query.limit(limit)
return query
@classmethod
def get_by_id(cls, _id):
return cls.query.filter(cls.id == _id).one()
@classmethod
def all_groups_for_query_ids(cls, query_ids):
query = """SELECT group_id, view_only
FROM queries
JOIN data_source_groups ON queries.data_source_id = data_source_groups.data_source_id
WHERE queries.id in :ids"""
return db.session.execute(query, {"ids": tuple(query_ids)}).fetchall()
@classmethod
def update_latest_result(cls, query_result):
# TODO: Investigate how big an impact this select-before-update makes.
queries = Query.query.filter(
Query.query_hash == query_result.query_hash,
Query.data_source == query_result.data_source,
)
for q in queries:
q.latest_query_data = query_result
# don't auto-update the updated_at timestamp
q.skip_updated_at = True
db.session.add(q)
query_ids = [q.id for q in queries]
logging.info(
"Updated %s queries with result (%s).",
len(query_ids),
query_result.query_hash,
)
return query_ids
def fork(self, user):
forked_list = [
"org",
"data_source",
"latest_query_data",
"description",
"query_text",
"query_hash",
"options",
"tags",
]
kwargs = {a: getattr(self, a) for a in forked_list}
# Query.create will add default TABLE visualization, so use constructor to create bare copy of query
forked_query = Query(
name="副本(#{}) {}".format(self.id, self.name), user=user, **kwargs
)
for v in sorted(self.visualizations, key=lambda v: v.id):
forked_v = v.copy()
forked_v["query_rel"] = forked_query
fv = Visualization(
**forked_v
) # it will magically add it to `forked_query.visualizations`
db.session.add(fv)
db.session.add(forked_query)
return forked_query
@property
def runtime(self):
return self.latest_query_data.runtime
@property
def retrieved_at(self):
return self.latest_query_data.retrieved_at
@property
def groups(self):
if self.data_source is None:
return {}
return self.data_source.groups
@hybrid_property
def lowercase_name(self):
"Optional property useful for sorting purposes."
return self.name.lower()
@lowercase_name.expression
def lowercase_name(cls):
"The SQLAlchemy expression for the property above."
return func.lower(cls.name)
@property
def parameters(self):
return self.options.get("parameters", [])
@property
def parameterized(self):
return ParameterizedQuery(self.query_text, self.parameters, self.org)
@property
def dashboard_api_keys(self):
query = """SELECT api_keys.api_key
FROM api_keys
JOIN dashboards ON object_id = dashboards.id
JOIN widgets ON dashboards.id = widgets.dashboard_id
JOIN visualizations ON widgets.visualization_id = visualizations.id
WHERE object_type='dashboards'
AND active=true
AND visualizations.query_id = :id"""
api_keys = db.session.execute(query, {"id": self.id}).fetchall()
return [api_key[0] for api_key in api_keys]
def update_query_hash(self):
should_apply_auto_limit = self.options.get("apply_auto_limit", False) if self.options else False
query_runner = self.data_source.query_runner if self.data_source else BaseQueryRunner({})
self.query_hash = query_runner.gen_query_hash(self.query_text, should_apply_auto_limit)
@listens_for(Query, "before_insert")
@listens_for(Query, "before_update")
def receive_before_insert_update(mapper, connection, target):
target.update_query_hash()
@listens_for(Query.user_id, "set")
def query_last_modified_by(target, val, oldval, initiator):
target.last_modified_by_id = val
@generic_repr("id", "object_type", "object_id", "user_id", "org_id")
class Favorite(TimestampMixin, db.Model):
id = primary_key("Favorite")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
object_type = Column(db.Unicode(255))
object_id = Column(key_type("Favorite"))
object = generic_relationship(object_type, object_id)
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User, backref="favorites")
__tablename__ = "favorites"
__table_args__ = (
UniqueConstraint("object_type", "object_id", "user_id", name="unique_favorite"),
)
@classmethod
def is_favorite(cls, user, object):
return cls.query.filter(cls.object == object, cls.user_id == user).count() > 0
@classmethod
def are_favorites(cls, user, objects):
objects = list(objects)
if not objects:
return []
object_type = str(objects[0].__class__.__name__)
return [
fav.object_id
for fav in cls.query.filter(
cls.object_id.in_([o.id for o in objects]),
cls.object_type == object_type,
cls.user_id == user,
)
]
OPERATORS = {
">": lambda v, t: v > t,
">=": lambda v, t: v >= t,
"<": lambda v, t: v < t,
"<=": lambda v, t: v <= t,
"==": lambda v, t: v == t,
"!=": lambda v, t: v != t,
# backward compatibility
"greater than": lambda v, t: v > t,
"less than": lambda v, t: v < t,
"equals": lambda v, t: v == t,
}
def next_state(op, value, threshold):
if isinstance(value, bool):
# If it's a boolean cast to string and lower case, because upper cased
# boolean value is Python specific and most likely will be confusing to
# users.
value = str(value).lower()
else:
try:
value = float(value)
value_is_number = True
except ValueError:
value_is_number = isinstance(value, numbers.Number)
if value_is_number:
try:
threshold = float(threshold)
except ValueError:
return Alert.UNKNOWN_STATE
else:
value = str(value)
if op(value, threshold):
new_state = Alert.TRIGGERED_STATE
else:
new_state = Alert.OK_STATE
return new_state
@generic_repr(
"id", "name", "query_id", "user_id", "state", "last_triggered_at", "rearm"
)
class Alert(TimestampMixin, BelongsToOrgMixin, db.Model):
UNKNOWN_STATE = "unknown"
OK_STATE = "ok"
TRIGGERED_STATE = "triggered"
id = primary_key("Alert")
name = Column(db.String(255))
query_id = Column(key_type("Query"), db.ForeignKey("queries.id"))
query_rel = db.relationship(Query, backref=backref("alerts", cascade="all"))
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User, backref="alerts")
options = Column(MutableDict.as_mutable(PseudoJSON))
state = Column(db.String(255), default=UNKNOWN_STATE)
subscriptions = db.relationship("AlertSubscription", cascade="all, delete-orphan")
last_triggered_at = Column(db.DateTime(True), nullable=True)
rearm = Column(db.Integer, nullable=True)
__tablename__ = "alerts"
@classmethod
def all(cls, group_ids):
return (
cls.query.options(joinedload(Alert.user), joinedload(Alert.query_rel))
.join(Query)
.join(
DataSourceGroup, DataSourceGroup.data_source_id == Query.data_source_id
)
.filter(DataSourceGroup.group_id.in_(group_ids))
)
@classmethod
def get_by_id_and_org(cls, object_id, org):
return super(Alert, cls).get_by_id_and_org(object_id, org, Query)
def evaluate(self):
data = self.query_rel.latest_query_data.data
if data["rows"] and self.options["column"] in data["rows"][0]:
op = OPERATORS.get(self.options["op"], lambda v, t: False)
value = data["rows"][0][self.options["column"]]
threshold = self.options["value"]
new_state = next_state(op, value, threshold)
else:
new_state = self.UNKNOWN_STATE
return new_state
def subscribers(self):
return User.query.join(AlertSubscription).filter(
AlertSubscription.alert == self
)
def render_template(self, template):
if template is None:
return ""
data = self.query_rel.latest_query_data.data
host = base_url(self.query_rel.org)
col_name = self.options["column"]
if data["rows"] and col_name in data["rows"][0]:
result_value = data["rows"][0][col_name]
else:
result_value = None
context = {
"ALERT_NAME": self.name,
"ALERT_URL": "{host}/alerts/{alert_id}".format(host=host, alert_id=self.id),
"ALERT_STATUS": self.state.upper(),
"ALERT_CONDITION": self.options["op"],
"ALERT_THRESHOLD": self.options["value"],
"QUERY_NAME": self.query_rel.name,
"QUERY_URL": "{host}/queries/{query_id}".format(
host=host, query_id=self.query_rel.id
),
"QUERY_RESULT_VALUE": result_value,
"QUERY_RESULT_ROWS": data["rows"],
"QUERY_RESULT_COLS": data["columns"],
}
return mustache_render(template, context)
@property
def custom_body(self):
template = self.options.get("custom_body", self.options.get("template"))
return self.render_template(template)
@property
def custom_subject(self):
template = self.options.get("custom_subject")
return self.render_template(template)
@property
def groups(self):
return self.query_rel.groups
@property
def muted(self):
return self.options.get("muted", False)
def generate_slug(ctx):
slug = utils.slugify(ctx.current_parameters["name"])
tries = 1
while Dashboard.query.filter(Dashboard.slug == slug).first() is not None:
slug = utils.slugify(ctx.current_parameters["name"]) + "_" + str(tries)
tries += 1
return slug
@gfk_type
@generic_repr(
"id", "name", "slug", "user_id", "org_id", "version", "is_archived", "is_draft"
)
class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
id = primary_key("Dashboard")
version = Column(db.Integer)
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, backref="dashboards")
slug = Column(db.String(140), index=True, default=generate_slug)
name = Column(db.String(100))
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User)
# layout is no longer used, but kept so we know how to render old dashboards.
layout = Column(db.Text)
dashboard_filters_enabled = Column(db.Boolean, default=False)
is_archived = Column(db.Boolean, default=False, index=True)
is_draft = Column(db.Boolean, default=True, index=True)
widgets = db.relationship("Widget", backref="dashboard", lazy="dynamic")
tags = Column(
"tags", MutableList.as_mutable(postgresql.ARRAY(db.Unicode)), nullable=True
)
options = Column(
MutableDict.as_mutable(postgresql.JSON), server_default="{}", default={}
)
__tablename__ = "dashboards"
__mapper_args__ = {"version_id_col": version}
def __str__(self):
return "%s=%s" % (self.id, self.name)
@property
def name_as_slug(self):
return utils.slugify(self.name)
@classmethod
def all(cls, org, group_ids, user_id):
query = (
Dashboard.query.options(
joinedload(Dashboard.user).load_only(
"id", "name", "details", "email"
)
).distinct(Dashboard.created_at, Dashboard.slug)
.outerjoin(Widget)
.outerjoin(Visualization)
.outerjoin(Query)
.outerjoin(
DataSourceGroup, Query.data_source_id == DataSourceGroup.data_source_id
)
.filter(
Dashboard.is_archived == False,
(
DataSourceGroup.group_id.in_(group_ids)
| (Dashboard.user_id == user_id)
),
Dashboard.org == org,
)
)
query = query.filter(
or_(Dashboard.user_id == user_id, Dashboard.is_draft == False)
)
return query
@classmethod
def search(cls, org, groups_ids, user_id, search_term):
# TODO: switch to FTS
return cls.all(org, groups_ids, user_id).filter(
cls.name.ilike("%{}%".format(search_term))
)
@classmethod
def search_by_user(cls, term, user, limit=None):
return cls.by_user(user).filter(cls.name.ilike("%{}%".format(term))).limit(limit)
@classmethod
def all_tags(cls, org, user):
dashboards = cls.all(org, user.group_ids, user.id)
tag_column = func.unnest(cls.tags).label("tag")
usage_count = func.count(1).label("usage_count")
query = (
db.session.query(tag_column, usage_count)
.group_by(tag_column)
.filter(Dashboard.id.in_(dashboards.options(load_only("id"))))
.order_by(usage_count.desc())
)
return query
@classmethod
def favorites(cls, user, base_query=None):
if base_query is None:
base_query = cls.all(user.org, user.group_ids, user.id)
return base_query.join(
(
Favorite,
and_(
Favorite.object_type == "Dashboard",
Favorite.object_id == Dashboard.id,
),
)
).filter(Favorite.user_id == user.id)
@classmethod
def by_user(cls, user):
return cls.all(user.org, user.group_ids, user.id).filter(Dashboard.user == user)
@classmethod
def get_by_slug_and_org(cls, slug, org):
return cls.query.filter(cls.slug == slug, cls.org == org).one()
@hybrid_property
def lowercase_name(self):
"Optional property useful for sorting purposes."
return self.name.lower()
@lowercase_name.expression
def lowercase_name(cls):
"The SQLAlchemy expression for the property above."
return func.lower(cls.name)
@generic_repr("id", "name", "type", "query_id")
class Visualization(TimestampMixin, BelongsToOrgMixin, db.Model):
id = primary_key("Visualization")
type = Column(db.String(100))
query_id = Column(key_type("Query"), db.ForeignKey("queries.id"))
# query_rel and not query, because db.Model already has query defined.
query_rel = db.relationship(Query, back_populates="visualizations")
name = Column(db.String(255))
description = Column(db.String(4096), nullable=True)
options = Column(db.Text)
__tablename__ = "visualizations"
def __str__(self):
return "%s %s" % (self.id, self.type)
@classmethod
def get_by_id_and_org(cls, object_id, org):
return super(Visualization, cls).get_by_id_and_org(object_id, org, Query)
def copy(self):
return {
"type": self.type,
"name": self.name,
"description": self.description,
"options": self.options,
}
@generic_repr("id", "visualization_id", "dashboard_id")
class Widget(TimestampMixin, BelongsToOrgMixin, db.Model):
id = primary_key("Widget")
visualization_id = Column(
key_type("Visualization"), db.ForeignKey("visualizations.id"), nullable=True
)
visualization = db.relationship(
Visualization, backref=backref("widgets", cascade="delete")
)
text = Column(db.Text, nullable=True)
width = Column(db.Integer)
options = Column(db.Text)
dashboard_id = Column(key_type("Dashboard"), db.ForeignKey("dashboards.id"), index=True)
__tablename__ = "widgets"
def __str__(self):
return "%s" % self.id
@classmethod
def get_by_id_and_org(cls, object_id, org):
return super(Widget, cls).get_by_id_and_org(object_id, org, Dashboard)
@generic_repr(
"id", "object_type", "object_id", "action", "user_id", "org_id", "created_at"
)
class Event(db.Model):
id = primary_key("Event")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, back_populates="events")
user_id = Column(key_type("User"), db.ForeignKey("users.id"), nullable=True)
user = db.relationship(User, backref="events")
action = Column(db.String(255))
object_type = Column(db.String(255))
object_id = Column(db.String(255), nullable=True)
additional_properties = Column(
MutableDict.as_mutable(PseudoJSON), nullable=True, default={}
)
created_at = Column(db.DateTime(True), default=db.func.now())
__tablename__ = "events"
def __str__(self):
return "%s,%s,%s,%s" % (
self.user_id,
self.action,
self.object_type,
self.object_id,
)
def to_dict(self):
return {
"org_id": self.org_id,
"user_id": self.user_id,
"action": self.action,
"object_type": self.object_type,
"object_id": self.object_id,
"additional_properties": self.additional_properties,
"created_at": self.created_at.isoformat(),
}
@classmethod
def record(cls, event):
org_id = event.pop("org_id")
user_id = event.pop("user_id", None)
action = event.pop("action")
object_type = event.pop("object_type")
object_id = event.pop("object_id", None)
created_at = datetime.datetime.utcfromtimestamp(event.pop("timestamp"))
event = cls(
org_id=org_id,
user_id=user_id,
action=action,
object_type=object_type,
object_id=object_id,
additional_properties=event,
created_at=created_at,
)
db.session.add(event)
return event
@generic_repr("id", "created_by_id", "org_id", "active")
class ApiKey(TimestampMixin, GFKBase, db.Model):
id = primary_key("ApiKey")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization)
api_key = Column(db.String(255), index=True, default=lambda: generate_token(40))
active = Column(db.Boolean, default=True)
# 'object' provided by GFKBase
object_id = Column(key_type("ApiKey"))
created_by_id = Column(key_type("User"), db.ForeignKey("users.id"), nullable=True)
created_by = db.relationship(User)
__tablename__ = "api_keys"
__table_args__ = (
db.Index("api_keys_object_type_object_id", "object_type", "object_id"),
)
@classmethod
def get_by_api_key(cls, api_key):
return cls.query.filter(cls.api_key == api_key, cls.active == True).one()
@classmethod
def get_by_object(cls, object):
return cls.query.filter(
cls.object_type == object.__class__.__tablename__,
cls.object_id == object.id,
cls.active == True,
).first()
@classmethod
def create_for_object(cls, object, user):
k = cls(org=user.org, object=object, created_by=user)
db.session.add(k)
return k
@generic_repr("id", "name", "type", "user_id", "org_id", "created_at")
class NotificationDestination(BelongsToOrgMixin, db.Model):
id = primary_key("NotificationDestination")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, backref="notification_destinations")
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User, backref="notification_destinations")
name = Column(db.String(255))
type = Column(db.String(255))
options = Column(
"encrypted_options",
ConfigurationContainer.as_mutable(
EncryptedConfiguration(
db.Text, settings.DATASOURCE_SECRET_KEY, FernetEngine
)
),
)
created_at = Column(db.DateTime(True), default=db.func.now())
__tablename__ = "notification_destinations"
__table_args__ = (
db.Index(
"notification_destinations_org_id_name", "org_id", "name", unique=True
),
)
def __str__(self):
return str(self.name)
def to_dict(self, all=False):
d = {
"id": self.id,
"name": self.name,
"type": self.type,
"icon": self.destination.icon(),
}
if all:
schema = get_configuration_schema_for_destination_type(self.type)
self.options.set_schema(schema)
d["options"] = self.options.to_dict(mask_secrets=True)
return d
@property
def destination(self):
return get_destination(self.type, self.options)
@classmethod
def all(cls, org):
notification_destinations = cls.query.filter(cls.org == org).order_by(
cls.id.asc()
)
return notification_destinations
def notify(self, alert, query, user, new_state, app, host):
schema = get_configuration_schema_for_destination_type(self.type)
self.options.set_schema(schema)
return self.destination.notify(
alert, query, user, new_state, app, host, self.options
)
@generic_repr("id", "user_id", "destination_id", "alert_id")
class AlertSubscription(TimestampMixin, db.Model):
id = primary_key("AlertSubscription")
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User)
destination_id = Column(
key_type("NotificationDestination"), db.ForeignKey("notification_destinations.id"), nullable=True
)
destination = db.relationship(NotificationDestination)
alert_id = Column(key_type("Alert"), db.ForeignKey("alerts.id"))
alert = db.relationship(Alert, back_populates="subscriptions")
__tablename__ = "alert_subscriptions"
__table_args__ = (
db.Index(
"alert_subscriptions_destination_id_alert_id",
"destination_id",
"alert_id",
unique=True,
),
)
def to_dict(self):
d = {"id": self.id, "user": self.user.to_dict(), "alert_id": self.alert_id}
if self.destination:
d["destination"] = self.destination.to_dict()
return d
@classmethod
def all(cls, alert_id):
return AlertSubscription.query.join(User).filter(
AlertSubscription.alert_id == alert_id
)
def notify(self, alert, query, user, new_state, app, host):
if self.destination:
return self.destination.notify(alert, query, user, new_state, app, host)
else:
# User email subscription, so create an email destination object
config = {"addresses": self.user.email}
schema = get_configuration_schema_for_destination_type("email")
options = ConfigurationContainer(config, schema)
destination = get_destination("email", options)
return destination.notify(alert, query, user, new_state, app, host, options)
@generic_repr("id", "trigger", "user_id", "org_id")
class QuerySnippet(TimestampMixin, db.Model, BelongsToOrgMixin):
id = primary_key("QuerySnippet")
org_id = Column(key_type("Organization"), db.ForeignKey("organizations.id"))
org = db.relationship(Organization, backref="query_snippets")
trigger = Column(db.String(255), unique=True)
description = Column(db.Text)
user_id = Column(key_type("User"), db.ForeignKey("users.id"))
user = db.relationship(User, backref="query_snippets")
snippet = Column(db.Text)
__tablename__ = "query_snippets"
@classmethod
def all(cls, org):
return cls.query.filter(cls.org == org)
def to_dict(self):
d = {
"id": self.id,
"trigger": self.trigger,
"description": self.description,
"snippet": self.snippet,
"user": self.user.to_dict(),
"updated_at": self.updated_at,
"created_at": self.created_at,
}
return d
def init_db():
default_org = Organization(name="Default", slug="default", settings={})
admin_group = Group(
name="admin",
permissions=["admin", "super_admin"],
org=default_org,
type=Group.BUILTIN_GROUP,
)
default_group = Group(
name="default",
permissions=Group.DEFAULT_PERMISSIONS,
org=default_org,
type=Group.BUILTIN_GROUP,
)
db.session.add_all([default_org, admin_group, default_group])
# XXX remove after fixing User.group_ids
db.session.commit()
return default_org, admin_group, default_group
|
the-stack_106_13425
|
"""
This example shows how we can utilise a plugin/factory pattern to
make it easy to read file contents easily without having to hard code
readers into the core of our system.
Instead our core contains a factory, and we add plugins which are capable
of handling different file types. We can then ask each plugin whether the
plugin is able to parse the file - the first plugin to say yes is used and the
resulting data is returned.
You can try this example by running:
.. code-block:: python
>>> from factories.examples import reader
>>>
>>> # -- Instance a reader
>>> data_reader = reader.DataReader()
>>>
>>> # -- Read some data from an ini file
>>> data = data_reader.read(reader.ini_path)
>>> print(data)
>>>
>>> # -- Now read some json data
>>> data = data_reader.read(reader.json_path)
>>> print(data)
"""
import os
from .core import(
DataReader,
ReaderPlugin,
)
# -- Some convience variables for testing this example
ini_path = os.path.join(
os.path.dirname(__file__),
'data',
'data.ini',
)
json_path = os.path.join(
os.path.dirname(__file__),
'data',
'data.json',
)
|
the-stack_106_13432
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <[email protected]>
# Date: 2019-06-13
# Desc:
from utils.math_tools import INCH
class CNCRouter:
def __init__(self, name, unit):
self.name = name
self.control = "arduino grbl"
self.unit = unit
self.z_axis_safety_height = 5.0
self.max_x = None
self.max_y = None
self.spindle_speed = None
self.feed_rate = None
self.drilling_speed = None
# 单层铣削厚度
self.layer_thickness = 2
# 单层步距
def __repr__(self):
return "<CNCRouter>"
class RouterBits:
def __init__(self, diameter, desc=""):
if diameter[-2:] == "in":
self.diameter = self.inch_to_mm(self.to_float(diameter[:-2]))
elif diameter[-4:] == "inch":
self.diameter = self.inch_to_mm(self.to_float(diameter[:-4]))
elif diameter[-2:] == "mm":
self.diameter = self.to_float(diameter[:-2])
elif diameter[-2:] == "cm":
self.diameter = self.to_float(diameter[:-2]) * 10
else:
raise ValueError("diameter variable must specify the unit. (support inch、in、cm、mm)")
self.description = desc
# 刃有关
self.blade_length = None
self.blade_number = 1
@property
def radius(self):
return self.diameter / 2
@staticmethod
def to_float(value):
return float(eval(value))
@staticmethod
def inch_to_mm(inch_value):
"""
:param inch_value:
:return: mm
"""
return inch_value * INCH
def __repr__(self):
return "bit: ⌀{}mm.".format(self.diameter)
|
the-stack_106_13434
|
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import ast
import astor
import numbers
# pylint: disable=unidiomatic-typecheck
# list of functions related to search space generating
_ss_funcs = [
'choice',
'randint',
'uniform',
'quniform',
'loguniform',
'qloguniform',
'normal',
'qnormal',
'lognormal',
'qlognormal',
'function_choice',
'mutable_layer'
]
class SearchSpaceGenerator(ast.NodeTransformer):
"""Generate search space from smart parater APIs"""
def __init__(self, module_name):
self.module_name = module_name
self.search_space = {}
self.last_line = 0 # last parsed line, useful for error reporting
def generate_mutable_layer_search_space(self, args):
mutable_block = args[0].s
mutable_layer = args[1].s
key = self.module_name + '/' + mutable_block
args[0].s = key
if key not in self.search_space:
self.search_space[key] = {'_type': 'mutable_layer', '_value': {}}
self.search_space[key]['_value'][mutable_layer] = {
'layer_choice': [k.s for k in args[2].keys],
'optional_inputs': [k.s for k in args[5].keys],
'optional_input_size': args[6].n if isinstance(args[6], ast.Num) else [args[6].elts[0].n, args[6].elts[1].n]
}
def visit_Call(self, node): # pylint: disable=invalid-name
self.generic_visit(node)
# ignore if the function is not 'nni.*'
if type(node.func) is not ast.Attribute:
return node
if type(node.func.value) is not ast.Name:
return node
if node.func.value.id != 'nni':
return node
# ignore if its not a search space function (e.g. `report_final_result`)
func = node.func.attr
if func not in _ss_funcs:
return node
self.last_line = node.lineno
if func == 'mutable_layer':
self.generate_mutable_layer_search_space(node.args)
return node
if node.keywords:
# there is a `name` argument
assert len(node.keywords) == 1, 'Smart parameter has keyword argument other than "name"'
assert node.keywords[0].arg == 'name', 'Smart paramater\'s keyword argument is not "name"'
assert type(node.keywords[0].value) is ast.Str, 'Smart parameter\'s name must be string literal'
name = node.keywords[0].value.s
specified_name = True
else:
# generate the missing name automatically
name = '__line' + str(str(node.args[-1].lineno))
specified_name = False
node.keywords = list()
if func in ('choice', 'function_choice'):
# we will use keys in the dict as the choices, which is generated by code_generator according to the args given by user
assert len(node.args) == 1, 'Smart parameter has arguments other than dict'
# check if it is a number or a string and get its value accordingly
args = [key.n if type(key) is ast.Num else key.s for key in node.args[0].keys]
else:
# arguments of other functions must be literal number
assert all(isinstance(ast.literal_eval(astor.to_source(arg)), numbers.Real) for arg in node.args), \
'Smart parameter\'s arguments must be number literals'
args = [ast.literal_eval(astor.to_source(arg)) for arg in node.args]
key = self.module_name + '/' + name + '/' + func
# store key in ast.Call
node.keywords.append(ast.keyword(arg='key', value=ast.Str(s=key)))
if func == 'function_choice':
func = 'choice'
value = {'_type': func, '_value': args}
if specified_name:
# multiple functions with same name must have identical arguments
old = self.search_space.get(key)
assert old is None or old == value, 'Different smart parameters have same name'
else:
# generated name must not duplicate
assert key not in self.search_space, 'Only one smart parameter is allowed in a line'
self.search_space[key] = value
return node
def generate(module_name, code):
"""Generate search space.
Return a serializable search space object.
module_name: name of the module (str)
code: user code (str)
"""
try:
ast_tree = ast.parse(code)
except Exception:
raise RuntimeError('Bad Python code')
visitor = SearchSpaceGenerator(module_name)
try:
visitor.visit(ast_tree)
except AssertionError as exc:
raise RuntimeError('%d: %s' % (visitor.last_line, exc.args[0]))
return visitor.search_space, astor.to_source(ast_tree)
|
the-stack_106_13435
|
from discord.ext import commands
from cogs.utils.dataIO import fileIO
from .utils.chat_formatting import *
from __main__ import send_cmd_help
import os
from random import choice as randchoice
class Quotes:
def __init__(self, bot):
self.bot = bot
self.quotes = fileIO("data/quotes/quotes.json", "load")
def _get_random_quote(self):
if len(self.quotes) == 0:
send_cmd_help(self.quote)
return "There are no saved quotes!"
return randchoice(self.quotes)
def _get_quote(self, num):
if num > 0 and num <= len(self.quotes):
return self.quotes[num - 1]
else:
return "That quote doesn't exist!"
def _add_quote(self, message):
self.quotes.append(message)
fileIO("data/quotes/quotes.json", "save", self.quotes)
def _fmt_quotes(self):
ret = "```"
for num, quote in enumerate(self.quotes):
ret += str(num + 1) + ") " + quote + "\n"
ret += "```"
return ret
@commands.command()
async def delquote(self, num: int):
"""Deletes a quote by its number
Use !allquotes to find quote numbers
Example: !delquote 3"""
if num > 0 and num <= len(self.quotes):
quotes = []
for i in range(len(self.quotes)):
if num - 1 == i:
await self.bot.say("Quote number " + str(num) +
" has been deleted.")
else:
quotes.append(self.quotes[i])
self.quotes = quotes
fileIO("data/quotes/quotes.json", "save", self.quotes)
else:
await self.bot.say("Quote " + str(num) + " does not exist.")
@commands.command(pass_context=True)
async def allquotes(self, ctx):
"""Gets a list of all quotes"""
strbuffer = self._fmt_quotes().split("\n")
mess = ""
for line in strbuffer:
if len(mess) + len(line) + 1 < 2000:
mess += "\n" + line
else:
await self.bot.send_message(ctx.message.author, mess)
mess = line
if mess != "":
await self.bot.send_message(ctx.message.author, mess)
@commands.command()
async def quote(self, *message):
"""Adds quote, retrieves random one, or a numbered one.
Use !allquotes to get a list of all quotes.
Example: !quote The quick brown fox -> adds quote
!quote -> gets random quote
!quote 4 -> gets quote #4"""
try:
if len(message) == 1:
message = int(message[0])
await self.bot.say(self._get_quote(message))
return
except:
pass
message = " ".join(message)
if message.lstrip() == "":
await self.bot.say(self._get_random_quote())
else:
self._add_quote(escape_mass_mentions(message))
await self.bot.say("Quote added.")
def check_folder():
if not os.path.exists("data/quotes"):
print("Creating data/quotes folder...")
os.makedirs("data/quotes")
def check_file():
quotes = []
f = "data/quotes/quotes.json"
if not fileIO(f, "check"):
print("Creating default quotes's quotes.json...")
fileIO(f, "save", quotes)
def setup(bot):
check_folder()
check_file()
n = Quotes(bot)
bot.add_cog(n)
|
the-stack_106_13436
|
# Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl import app, logging
import neurst.utils.flags_core as flags_core
from neurst.data.data_pipelines.data_pipeline import lowercase_and_remove_punctuations
from neurst.data.text import Tokenizer, build_tokenizer
FLAG_LIST = [
flags_core.Flag("input", dtype=flags_core.Flag.TYPE.STRING, default=None,
help="The path to the input text file."),
flags_core.Flag("output", dtype=flags_core.Flag.TYPE.STRING, default=None,
help="The path to the output text file."),
flags_core.Flag("lowercase", dtype=flags_core.Flag.TYPE.BOOLEAN, default=None,
help="Whether to lowercase."),
flags_core.Flag("remove_punctuation", dtype=flags_core.Flag.TYPE.BOOLEAN, default=None,
help="Whether to remove the punctuations."),
flags_core.Flag("language", dtype=flags_core.Flag.TYPE.BOOLEAN, default="en",
help="The text language."),
flags_core.ModuleFlag(Tokenizer.REGISTRY_NAME, help="The tokenizer."),
]
def _main(_):
arg_parser = flags_core.define_flags(FLAG_LIST, with_config_file=False)
args, remaining_argv = flags_core.intelligent_parse_flags(FLAG_LIST, arg_parser)
flags_core.verbose_flags(FLAG_LIST, args, remaining_argv)
tokenizer = build_tokenizer(args)
with tf.io.gfile.GFile(args["input"]) as fp:
with tf.io.gfile.GFile(args["output"], "w") as fw:
for line in fp:
line = lowercase_and_remove_punctuations(args["language"], line.strip(),
args["lowercase"], args["remove_punctuation"])
fw.write(tokenizer.tokenize(line, return_str=True) + "\n")
if tokenizer is None:
fw.write(line + "\n")
else:
fw.write(tokenizer.tokenize(line, return_str=True) + "\n")
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
app.run(_main, argv=["pseudo.py"])
|
the-stack_106_13437
|
from typing import Any, Callable, Dict, Optional, Type, Union
from fugue.execution.execution_engine import ExecutionEngine, SQLEngine
from fugue.execution.native_execution_engine import NativeExecutionEngine
from triad.utils.convert import to_instance
from triad import assert_or_throw, ParamDict
class _ExecutionEngineFactory(object):
def __init__(self):
self._funcs: Dict[str, Callable] = {}
self._type_funcs: Dict[Type, Callable] = {}
self._sql_funcs: Dict[str, Callable] = {}
self.register_default(lambda conf, **kwargs: NativeExecutionEngine(conf=conf))
self.register_default_sql_engine(lambda engine, **kwargs: engine.sql_engine)
def register(
self, name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
if isinstance(name_or_type, str):
self._register(self._funcs, name=name_or_type, func=func, on_dup=on_dup)
else:
self._register(
self._type_funcs, name=name_or_type, func=func, on_dup=on_dup
)
def register_default(self, func: Callable, on_dup="overwrite") -> None:
self.register("", func, on_dup)
def register_sql_engine(
self, name: str, func: Callable, on_dup="overwrite"
) -> None:
self._register(self._sql_funcs, name=name, func=func, on_dup=on_dup)
def register_default_sql_engine(self, func: Callable, on_dup="overwrite") -> None:
self.register_sql_engine("", func, on_dup)
def make(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
if isinstance(engine, tuple):
execution_engine = self.make_execution_engine(
engine[0], conf=conf, **kwargs
)
sql_engine = self.make_sql_engine(engine[1], execution_engine)
execution_engine.set_sql_engine(sql_engine)
return execution_engine
else:
return self.make((engine, None), conf=conf, **kwargs)
def make_execution_engine(
self, engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
# Apply this function to an Execution Engine instance can
# make sure the compile conf is a superset of conf
# TODO: it's a mess here, can we make the logic more intuitive?
def make_engine(engine: Any) -> ExecutionEngine:
if isinstance(engine, str) and engine in self._funcs:
return self._funcs[engine](conf, **kwargs)
for k, f in self._type_funcs.items():
if isinstance(engine, k):
return f(engine, conf, **kwargs)
if isinstance(engine, ExecutionEngine):
if conf is not None:
engine.compile_conf.update(conf)
engine.compile_conf.update(kwargs)
return engine
return to_instance(
engine, ExecutionEngine, kwargs=dict(conf=conf, **kwargs)
)
result = make_engine(engine or "")
result.compile_conf.update(result.conf, on_dup=ParamDict.IGNORE)
result.compile_conf.update(conf, on_dup=ParamDict.OVERWRITE)
result.compile_conf.update(kwargs, on_dup=ParamDict.OVERWRITE)
return result
def make_sql_engine(
self,
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
if engine is None:
engine = ""
if isinstance(engine, str) and engine in self._sql_funcs:
return self._sql_funcs[engine](execution_engine, **kwargs)
if isinstance(engine, SQLEngine):
assert_or_throw(
execution_engine is None and len(kwargs) == 0,
lambda: ValueError(
f"{engine} is an instance, can't take arguments "
f"execution_engine={execution_engine}, kwargs={kwargs}"
),
)
return engine
return to_instance(
engine, SQLEngine, kwargs=dict(execution_engine=execution_engine, **kwargs)
)
def _register(
self,
callables: Dict[Any, Callable],
name: Any,
func: Callable,
on_dup="overwrite",
) -> None:
if name not in callables:
callables[name] = func
if on_dup in ["raise", "throw"]:
raise KeyError(f"{name} is already registered")
if on_dup == "overwrite":
callables[name] = func
return
if on_dup == "ignore":
return
raise ValueError(on_dup)
_EXECUTION_ENGINE_FACTORY = _ExecutionEngineFactory()
def register_execution_engine(
name_or_type: Union[str, Type], func: Callable, on_dup="overwrite"
) -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` with
a given name.
:param name_or_type: alias of the execution engine, or type of an object that
can be converted to an execution engine
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
Alias registration examples:
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_execution_engine("my", lambda conf: MyExecutionEngine(conf))
# 0
make_execution_engine("my")
make_execution_engine("my", {"myconfig":"value})
# 1
with FugueWorkflow("my") as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run("my", {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("my")
Type registration examples:
.. code-block:: python
from pyspark.sql import SparkSession
from fugue_spark import SparkExecutionEngine
from fugue_sql import fsql
register_execution_engine(
SparkSession,
lambda session, conf: SparkExecutionEngine(session, conf))
spark_session = SparkSession.builder.getOrCreate()
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run(spark_session)
"""
_EXECUTION_ENGINE_FACTORY.register(name_or_type, func, on_dup)
def register_default_execution_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.ExecutionEngine` as the
default engine.
:param func: a callable taking |ParamsLikeObject| and ``**kwargs`` and returning an
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_execution_engine(lambda conf: MyExecutionEngine(conf))
# the following examples will use MyExecutionEngine
# 0
make_execution_engine()
make_execution_engine(None, {"myconfig":"value})
# 1
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
# 2
dag = FugueWorkflow()
dag.create([[0]],"a:int").show()
dag.run(None, {"myconfig":"value})
# 3
fsql('''
CREATE [[0]] SCHEMA a:int
PRINT
''').run("", {"myconfig":"value})
"""
_EXECUTION_ENGINE_FACTORY.register_default(func, on_dup)
def register_sql_engine(name: str, func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` with
a given name.
:param name: name of the SQL engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_sql_engine("mysql", lambda engine: MySQLEngine(engine))
# create execution engine with MySQLEngine as the default
make_execution_engine(("", "mysql"))
# create DaskExecutionEngine with MySQLEngine as the default
make_execution_engine(("dask", "mysql"))
# default execution engine + MySQLEngine
with FugueWorkflow(("","mysql")) as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_sql_engine(name, func, on_dup)
def register_default_sql_engine(func: Callable, on_dup="overwrite") -> None:
"""Register :class:`~fugue.execution.execution_engine.SQLEngine` as the
default engine
:param func: a callable taking
:class:`~fugue.execution.execution_engine.ExecutionEngine`
and ``**kwargs`` and returning a
:class:`~fugue.execution.execution_engine.SQLEngine` instance
:param on_dup: action on duplicated ``name``. It can be "overwrite", "ignore"
(not overwriting) or "throw" (throw exception), defaults to "overwrite".
:raises KeyError: if ``on_dup`` is ``throw`` and the ``name`` already exists
.. note::
You should be careful to use this function, because when you set a custom
SQL engine as default, all execution engines you create will use this SQL
engine unless you are explicit. For example if you set the default SQL engine
to be a Spark specific one, then if you start a NativeExecutionEngine, it will
try to use it and will throw exceptions.
So it's always a better idea to use ``register_sql_engine`` instead
.. admonition:: Examples
.. code-block:: python
# create a new engine with name my (overwrites if existed)
register_default_sql_engine(lambda engine: MySQLEngine(engine))
# create NativeExecutionEngine with MySQLEngine as the default
make_execution_engine()
# create SparkExecutionEngine with MySQLEngine instead of SparkSQLEngine
make_execution_engine("spark")
# NativeExecutionEngine with MySQLEngine
with FugueWorkflow() as dag:
dag.create([[0]],"a:int").show()
"""
_EXECUTION_ENGINE_FACTORY.register_default_sql_engine(func, on_dup)
def make_execution_engine(
engine: Any = None, conf: Any = None, **kwargs: Any
) -> ExecutionEngine:
"""Create :class:`~fugue.execution.execution_engine.ExecutionEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default execution
engine), a string (use the registered execution engine), an
:class:`~fugue.execution.execution_engine.ExecutionEngine` type, or
the :class:`~fugue.execution.execution_engine.ExecutionEngine` instance
, or a tuple of two values where the first value represents execution
engine and the second value represents the sql engine (you can use ``None``
for either of them to use the default one), defaults to None
:param conf: |ParamsLikeObject|, defaults to None
:param kwargs: additional parameters to initialize the execution engine
:return: the :class:`~fugue.execution.execution_engine.ExecutionEngine`
instance
.. admonition:: Examples
.. code-block:: python
register_default_execution_engine(lambda conf: E1(conf))
register_execution_engine("e2", lambda conf, **kwargs: E2(conf, **kwargs))
register_sql_engine("s", lambda conf: S2(conf))
# E1 + E1.default_sql_engine
make_execution_engine()
# E2 + E2.default_sql_engine
make_execution_engine(e2)
# E1 + S2
make_execution_engine((None, "s"))
# E2(conf, a=1, b=2) + S2
make_execution_engine(("e2", "s"), conf, a=1, b=2)
# SparkExecutionEngine + SparkSQLEngine
make_execution_engine(SparkExecutionEngine)
make_execution_engine(SparkExecutionEngine(spark_session, conf))
# SparkExecutionEngine + S2
make_execution_engine((SparkExecutionEngine, "s"))
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make(engine, conf, **kwargs)
def make_sql_engine(
engine: Any = None,
execution_engine: Optional[ExecutionEngine] = None,
**kwargs: Any,
) -> SQLEngine:
"""Create :class:`~fugue.execution.execution_engine.SQLEngine`
with specified ``engine``
:param engine: it can be empty string or null (use the default SQL
engine), a string (use the registered SQL engine), an
:class:`~fugue.execution.execution_engine.SQLEngine` type, or
the :class:`~fugue.execution.execution_engine.SQLEngine` instance
(you can use ``None`` to use the default one), defaults to None
:param execution_engine: the
:class:`~fugue.execution.execution_engine.ExecutionEngine` instance
to create
the :class:`~fugue.execution.execution_engine.SQLEngine`. Normally you
should always provide this value.
:param kwargs: additional parameters to initialize the sql engine
:return: the :class:`~fugue.execution.execution_engine.SQLEngine`
instance
.. note::
For users, you normally don't need to call this function directly.
Use ``make_execution_engine`` instead
.. admonition:: Examples
.. code-block:: python
register_default_sql_engine(lambda conf: S1(conf))
register_sql_engine("s2", lambda conf: S2(conf))
engine = NativeExecutionEngine()
# S1(engine)
make_sql_engine(None, engine)
# S1(engine, a=1)
make_sql_engine(None, engine, a=1)
# S2(engine)
make_sql_engine("s2", engine)
# SqliteEngine(engine)
make_sql_engine(SqliteEngine)
"""
import fugue._utils.register # pylint: disable=W0611 # noqa: F401
return _EXECUTION_ENGINE_FACTORY.make_sql_engine(engine, execution_engine, **kwargs)
|
the-stack_106_13439
|
##-------------------------------------------------------------------
"""
Hosoya triangle (originally Fibonacci triangle) is a triangular arrangement
of numbers, where if you take any number it is the sum of 2 numbers above.
First line is always 1, and second line is always {1 1}.
This printHosoya function takes argument n which is the height of the triangle
(number of lines).
For example:
printHosoya( 6 ) would return:
1
1 1
2 1 2
3 2 2 3
5 3 4 3 5
8 5 6 6 5 8
The complexity is O(n^3).
##-------------------------------------------------------------------
"""
def hosoya(n, m):
if (
(n == 0 and m == 0)
or (n == 1 and m == 0)
or (n == 1 and m == 1)
or (n == 2 and m == 1)
):
return 1
if n > m:
return hosoya(n - 1, m) + hosoya(n - 2, m)
elif m == n:
return hosoya(n - 1, m - 1) + hosoya(n - 2, m - 2)
else:
return 0
def print_hosoya(n):
for i in range(n):
for j in range(i + 1):
print(hosoya(i, j), end=" ")
print("\n", end="")
def hosoya_testing(n):
x = []
for i in range(n):
for j in range(i + 1):
x.append(hosoya(i, j))
return x
|
the-stack_106_13440
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
df_wine = pd.read_csv('wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
# Splitting the data into 70% training and 30% test subsets.
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=0)
# Standardizing the data.
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(key=lambda k: k[0], reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis]))
print('Matrix W:\n', w)
print()
print(X_train_std[0].dot(w))
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
plt.show()
|
the-stack_106_13446
|
from pyspark.sql.tests import ReusedPySparkTestCase
from pyspark.sql import SparkSession
from pyspark.sql import functions as F
from semisupervised import LP_Graph
import timeit
class TestCreate_complete_graph(ReusedPySparkTestCase):
def setUp(self):
helix_path = '/home/svanhmic/workspace/data/DABAI/sparkdata/csv/double_helix3.csv'
big_data_path = '/home/svanhmic/workspace/data/DABAI/test.csv'
self.spark_session = SparkSession(sparkContext=self.sc)
self.helix_df = self.spark_session.read.csv(helix_path, header=True, inferSchema=True)
self.big_data_df = (self.spark_session.read
.csv(big_data_path, header=True, inferSchema=True)
.drop('_c0')
.withColumnRenamed('index','id')
.withColumn('label', F.when(F.rand()>0.01, None).otherwise(1)))
def test_create_complete_graph(self):
result = LP_Graph.create_complete_graph(self.helix_df, feature_columns='x y z'.split(),
id_column='id', label_column='unknown_label')
print('Number of data points {}. Final number of points should be {}'.format(self.helix_df.count(),result.count()))
print(result.rdd.getNumPartitions())
timeit.timeit()
self.assertEqual(self.helix_df.count()**2, result.count() )
# def test_create_rdd_graph(self):
# result_df = LP_Graph.create_rdd_graph(
# self.helix_df, id_column='id', label_column='unknown_label',
# feature_columns='x y z'.split())
# # for i in result_df.take(5):
# # print(i)
# result_df.show(5)
# result_df.printSchema()
# self.fail()
|
the-stack_106_13447
|
# Copyright (c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base_bs_erf
import numpy as np
from numpy import log, exp
from base_bs_erf import erf, invsqrt
def black_scholes ( nopt, price, strike, t, rate, vol ):
mr = -rate
sig_sig_two = vol * vol * 2
P = price
S = strike
T = t
a = log(P / S)
b = T * mr
z = T * sig_sig_two
c = 0.25 * z
y = invsqrt(z)
w1 = (a - b + c) * y
w2 = (a - b - c) * y
d1 = 0.5 + 0.5 * erf(w1)
d2 = 0.5 + 0.5 * erf(w2)
Se = exp(b) * S
call = P * d1 - Se * d2
put = call - P + Se
return (call, put)
base_bs_erf.run("Numpy", black_scholes)
|
the-stack_106_13448
|
import os
import sys
import csv
import math
import datetime
import shutil
import pickle
import argparse
import importlib
import numpy as np
import tensorflow as tf
import tensorflow.keras.callbacks as cbks
import matplotlib.pyplot as plt
from models.unet_2d import UNet2D
from utils import GLOBAL_TYPE, class_sums_from_generator, Config
from losses import (
CustomSmoothedWeightedCCE,
fixed_uniform_smoothing,
fixed_adjacent_smoothing,
weighted_uniform_smoothing,
weighted_adjacent_smoothing,
)
tf.keras.backend.set_floatx(GLOBAL_TYPE)
def load_data(
loc="data",
combined_nm="combined.npy",
segmented_nm="segmented.npy",
number=-1,
):
if number == "sample":
loc = "sample_data"
combined_nm = "combined_sample.npy"
segmented_nm = "segmented_sample.npy"
xs = np.load(os.path.join(loc, combined_nm)).astype(np.float32)
ys = np.load(os.path.join(loc, segmented_nm)).astype(np.int32)
else:
full_xs = np.load(os.path.join(loc, combined_nm)).astype(np.float32)
full_ys = np.load(os.path.join(loc, segmented_nm)).astype(np.int32)
if number > 0:
xs = full_xs[:number]
ys = full_ys[:number]
del full_xs, full_ys
elif number < 0:
xs = full_xs
ys = full_ys
else:
raise ValueError(
f"Cannot run with {number} data, try \"sample\""
)
n_classes = ys.shape[-1]
print("LOADED DATA", xs.shape)
print("LOADED LABELS", ys.shape)
print("Classes", n_classes)
return xs, ys, n_classes
def make_generator(
train_xs,
train_ys,
train_split,
val_split,
train_batch_size,
val_batch_size,
):
generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=(val_split / train_split),
rotation_range=10.0,
width_shift_range=2.,
height_shift_range=2.,
shear_range=0.0,
zoom_range=0.1,
# Defaults
featurewise_center=False, samplewise_center=False,
featurewise_std_normalization=False, samplewise_std_normalization=False,
zca_whitening=False, zca_epsilon=1e-06,
brightness_range=None,
channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False,
vertical_flip=False, preprocessing_function=None,
data_format=None, dtype=None,
)
training_generator = generator.flow(
x=train_xs, y=train_ys, batch_size=train_batch_size,
subset="training", shuffle=False,
# Defaults
sample_weight=None, seed=None,
save_to_dir=None, save_prefix='',
save_format='png',
)
validation_generator = generator.flow(
x=train_xs, y=train_ys, batch_size=val_batch_size,
subset="validation", shuffle=False,
# Defaults
sample_weight=None, seed=None,
save_to_dir=None, save_prefix='',
save_format='png',
)
return training_generator, validation_generator
def split_data(dset, split=(0.6, 0.2, 0.2), shuffle=True):
if shuffle:
np.random.shuffle(dset)
assert sum(split) == 1
lens = [math.floor(s * dset.shape[0]) for s in split]
lens[-1] = lens[-1] + (dset.shape[0] - sum(lens))
sets = []
start_from = 0
for set_len in lens:
sets.append(dset[start_from:start_from+set_len])
start_from += set_len
print(
"Split into sets", [s.shape[0] for s in sets],
"from", dset.shape[0]
)
return sets
def get_class_weights(
n_classes, training_generator,
mode="uniform",
generator_length=-1,
background_idx=10,
drop_background=0.10
):
"""Iterate training data to get balancing weights
Args:
n_classes: total number of classes
training_generator: when iterated for
num_training_batches (or completion, if non-repeating),
returns the whole set in batches of (x, labs)
generator_length: break the generator iteration, if
generator repeats (e.g. is infinite).
mode: Select mode from:
uniform: 1s
drop_background: set background_idx to 0.05 (else 1.)
balance_off_max: max value will be set to 1. Rest will be
max(a) / a for each value a (count of labels)
balance_off_min: min value will be set to 1. Rest will be
1 / a for each value a (count of labels)
balance_off_median: min value will be set to 1. Rest will be
1 / a for each value a (count of labels)
"""
if mode == "uniform":
class_weights = {c: 1. for c in range(n_classes)}
elif mode == "drop_background":
# TEMP: hardcoded 5%
class_weight_list = [1. for _ in range(n_classes)]
class_weight_list[background_idx] = drop_background
class_weights = {
c: class_weight_list[c] for c in range(n_classes)
}
else:
class_sums = class_sums_from_generator(
n_classes, training_generator, generator_length
)
balanced_weights = [
np.sum(class_sums) / class_sums[i]
for i in range(n_classes)
]
if mode == "balance_off_max":
# "Max" class (background) has weight 1 so no divis.
class_weights = {
i: balanced_weights[i] for i in range(n_classes)
}
elif mode == "balance_off_min":
class_weights = {
i: balanced_weights[i] / np.max(balanced_weights)
for i in range(n_classes)
}
elif mode == "balance_off_med":
class_weights = {
i: balanced_weights[i] / np.median(balanced_weights)
for i in range(n_classes)
}
return class_weights
def define_callbacks(
exp_dir,
es_delta=0.0001, es_patience=8,
rlr_factor=0.33, rlr_patience=4,
rlr_delta=0.001, rlr_min=0.00001,
):
early_stop = cbks.EarlyStopping(
monitor='val_loss', min_delta=es_delta,
patience=es_patience, verbose=2,
restore_best_weights=True,
# Defaults
mode='auto', baseline=None
)
reduce_plateau = cbks.ReduceLROnPlateau(
monitor='val_loss', factor=rlr_factor,
patience=rlr_patience, min_delta=rlr_delta,
min_lr=rlr_min, verbose=2,
# Defaults
cooldown=0, mode='auto',
)
log_dir = (
os.path.join(
"tb_logs",
f"{exp_dir}-{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
)
)
tensorboard = cbks.TensorBoard(
log_dir=log_dir, update_freq='epoch', # profile_batch=0,
histogram_freq=1,
# Defaults
# Bug reported elsewhere: can't do histograms with generators
write_graph=True, write_images=False,
embeddings_freq=0, embeddings_metadata=None
)
return [early_stop, reduce_plateau, tensorboard]
def display_multi(xs, ys, model, args):
"""Display random n images, save to exp dir
"""
plt.axis('off')
fig, axes = plt.subplots(nrows=args.display, ncols=3, figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
rand_idxs = np.random.choice(len(xs), size=args.display, replace=False)
for n, (rand_idx, plot_row) in enumerate(zip(rand_idxs, axes)):
# gather data
rand_x = xs[rand_idx]
rand_y = tf.argmax(ys[rand_idx], axis=-1)
rand_pred = tf.argmax(
model(np.expand_dims(rand_x, axis=0) / 255),
axis=-1
)[0]
display_list = [rand_x, rand_y, rand_pred]
# Plot subplots
for i, plot_col in enumerate(plot_row):
if n == 0:
plot_col.set_title(title[i])
try:
disp = tf.keras.preprocessing.image.array_to_img(
display_list[i]
)
except:
disp = display_list[i]
plot_col.imshow(disp)
# Save and show
fig_file = os.path.join(args.exp_dir, "example_segment.png")
if overwrite(fig_file, args):
fig.savefig(fig_file)
plt.axis('on') # reset
def parse_my_args(arg_list):
parser = argparse.ArgumentParser()
parser.add_argument(
"exp_dir", type=str,
help="A path to an experiment directory, optionally including a "
"config.py file which contains a variable called config = Config(). "
"See defaults/baseline_config.py for example."
)
parser.add_argument(
"--data-num", "-n", default=-1,
help="Number of datapoints to train on, integer or \"sample\""
"to use sample data in repo"
)
parser.add_argument(
"--display", "-d", default=0, type=int,
help="Display n example images of the network at the end with pyplot"
)
parser.add_argument("--force", "-f", action="store_true",
help="Overwrites current weights and graphs"
)
args = parser.parse_args(arg_list)
if args.data_num != "sample":
args.data_num = int(args.data_num)
return args
def overwrite(filename, args):
"""Whether to overwrite a file given arg config
Written out for clear logic
"""
file_exists = os.path.exists(filename)
sample_run = args.data_num == "sample"
if sample_run or (file_exists and not args.force):
return False
else:
return True
def main(cmdline_args):
args = parse_my_args(cmdline_args)
result_csv = "results.csv"
# FILES
os.makedirs(args.exp_dir, exist_ok=True)
weights_file = os.path.join(args.exp_dir, "weights.h5")
history_file = os.path.join(args.exp_dir, "history.p")
train = not os.path.exists(weights_file) or args.force
print("Weights", weights_file, "training? :", train)
# CONFIGURE
config_file = os.path.join(args.exp_dir, "config.py")
if not os.path.exists(config_file):
print("No config, using default")
config = Config() # defaults
else:
# Import with importlib so it's executable from other files
print("Reading config", config_file)
# First arg sets __name__ of the namespace. Not needed.
config_spec = importlib.util.spec_from_file_location(
"config_file", config_file
)
config_module = importlib.util.module_from_spec(config_spec)
config_spec.loader.exec_module(config_module)
config = config_module.config
if not isinstance(config, Config):
raise ValueError(
f"Config {config_file} is not valid."
f"\"Lacks config = Config()\" object.\n{config}"
)
# Fail early if invalid
if train:
callbacks = define_callbacks(args.exp_dir, **config.callback_args)
# SELECT DATA
xs, ys, n_classes = load_data(number=args.data_num)
xs = np.expand_dims(xs, axis=-1).astype(np.float32) # Indicate grayscale
input_shape = (None, *xs.shape[1:])
print("Input data shape", input_shape)
split = (config.train_split, 1.-config.train_split)
train_xs, test_xs = split_data(xs, split, shuffle=False)
train_ys, test_ys = split_data(ys, split, shuffle=False)
num_training_batches = (
len(train_xs) * config.train_split // config.train_batch_size
)
del xs, ys
# MAKE MODEL
print("Making model")
model = UNet2D(
input_shape[1:], # exclude batch dimension
n_classes,
encoding=config.encoding,
decoding=config.decoding,
central=config.central,
)
model.build(input_shape=input_shape)
model_summary = model.summary()
# Skip slow step if pretrained
if train:
print("Making dataset generator")
training_generator, validation_generator = make_generator(
train_xs,
train_ys,
config.train_split,
config.val_split,
config.train_batch_size,
config.val_batch_size,
)
train_samples = len(train_xs)
del train_xs, train_ys
# TRAIN (or load)
if not train:
model.load_weights(weights_file)
history = None
if os.path.exists(history_file):
with open(history_file, "rb") as f:
history = pickle.load(f)
print("Loaded history", history_file)
else:
print("Could not find history file", history_file)
else:
print(f"Getting class weights {config.class_weight_mode}...")
class_weights = get_class_weights(
n_classes,
training_generator,
mode=config.class_weight_mode,
generator_length=num_training_batches,
drop_background=config.drop_background,
)
print(f"Using class weights:\n{class_weights}")
print(f"Getting smoothing matrix {config.smoothing_function.__name__}")
if config.smoothing_function is None:
smoothing_matrix = None
else:
smoothing_matrix = config.smoothing_function(
n_classes, training_generator, num_training_batches
)
print(f"Using smoothing:\n{smoothing_matrix}")
weighted_cce = CustomSmoothedWeightedCCE(
class_weights=list(class_weights.values()),
label_smoothing=smoothing_matrix,
from_logits=True,
**config.loss_args
)
model.compile(
optimizer=config.optim,
loss=weighted_cce,
metrics=['accuracy'],
# Defaults to consider
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
)
train_report = model.fit(
x=training_generator,
validation_data=validation_generator,
epochs=config.max_epochs,
callbacks=callbacks,
shuffle=False, # already shuffled
verbose=1,
# Defaults. Ignore steps and batches;
# generators handle more cleanly
# (and with repeat data)
sample_weight=None,
class_weight=None, # handled with customLF
initial_epoch=0,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False
)
history = train_report.history
if overwrite(weights_file, args):
model.save_weights(weights_file)
if overwrite(history_file, args):
with open(history_file, "wb") as f:
pickle.dump(history, f, protocol=pickle.HIGHEST_PROTOCOL)
# LOG TRAINING
loss_graph_file = os.path.join(args.exp_dir, "losses.png")
if history is not None and overwrite(loss_graph_file, args):
plt.figure()
epochs = list(range(len(history["loss"])))
plt.plot(epochs, history["loss"], label="Training Loss")
plt.plot(epochs, history["val_loss"], label="Val Loss")
plt.title("Losses")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.savefig(loss_graph_file)
else:
print("Loss graph not (re)saved")
# TEST. TODO - batch
print("Evaluating...")
cm = np.zeros((n_classes, n_classes), dtype=np.float64)
for (x, y) in zip(test_xs, test_ys):
logits = model(np.expand_dims(x, axis=0) / 255) # rescale
prediction = tf.argmax(logits, axis=-1, output_type=tf.int64)
flat_y = tf.argmax(
np.expand_dims(y, axis=0), axis=-1, output_type=tf.int64
)
img_confusion = tf.math.confusion_matrix(
labels=tf.reshape(flat_y, [-1]),
predictions=tf.reshape(prediction, [-1]),
num_classes=n_classes, dtype=tf.int64,
).numpy()
cm += img_confusion
print("Complete")
# Process CM and save raw
cm_file = os.path.join(args.exp_dir, "confusion.csv")
if overwrite(cm_file, args):
np.savetxt(cm_file, cm, delimiter=",")
cm = cm.astype('double') # Cast, for calculations
total_accuracy = np.sum(np.diagonal(cm)) / np.sum(cm)
cm = cm / cm.sum(axis=1)[:, np.newaxis]
accuracy_per_class = np.diagonal(cm) # TP accuracy
avg_accuracy_per_class = np.mean(accuracy_per_class)
avg_accuracy_per_target_class = np.mean(accuracy_per_class[:-1])
# Create report
result_string = "Test set accuracy: {:.3%}".format(total_accuracy)
if history is not None:
result_string += "\n\nFinal val loss: {:.3f}".format(history["val_loss"][-1])
result_string += "\n\nAvg accuracy per class: {:.3%}".format(
avg_accuracy_per_class
)
result_string += "\n\nAvg accuracy per class exc background: {:.3%}".format(
avg_accuracy_per_target_class
)
# Beautify the output cm
np.set_printoptions(precision=3, suppress=True)
result_string += "\n\nConfusion:\n" + str(cm)
if smoothing_matrix is not None:
result_string += f"\n\nSmoothing:\n{smoothing_matrix}"
else:
result_string += f"\n\nSmoothing:\nNo smoothing applied."
result_string += f"\n\nModel summary:\n{model_summary}"
# Write to file and display
result_file = os.path.join(args.exp_dir, "results.txt")
if overwrite(result_file, args):
with open(result_file, "w") as rf:
rf.write(result_string)
else:
print("WARNING, force false, did not write results")
# Write csv to track experiments
if args.data_num != "sample": # Only track full runs
# Add titles
print("Writing to", result_csv)
if not os.path.exists(result_csv):
with open(result_csv, "w") as f:
csv_writer = csv.writer(f)
csv_writer.writerow([
"exp", "acc", "avg_cls_acc", "avg_cls_acc_exc_bg",
"val_loss"
])
csv_line = [
args.exp_dir, total_accuracy, avg_accuracy_per_class,
avg_accuracy_per_target_class, history["val_loss"][-1]
]
# Append result
with open(result_csv, "a") as f:
csv_writer = csv.writer(f)
csv_writer.writerow(csv_line)
print(result_string)
# Display (and save in-function) n random images
if args.display:
display_multi(test_xs, test_ys, model, args)
plt.show()
return True
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_106_13449
|
from ..connections import stats
class StatsMixin:
_stats = []
_default_stats = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stats = {}
self.do_stats()
def do_stats(self):
stats_list = getattr(self, '_default_stats', [])
stats_list.extend(getattr(self, '_stats', []))
for key, attribute, historical in stats_list:
if not self.pk:
continue
self.stats[key] = stats.create_stat(
obj=self,
key=key,
historical=historical,
obj_attr=attribute)
def increment_stat(self, stat):
self.stats[stat].incr(1)
def decrement_stat(self, stat):
self.stats[stat].decr(1)
|
the-stack_106_13456
|
# Copyright (c) 2012 Terence Honles <[email protected]> (maintainer)
# Copyright (c) 2008 Giorgos Verigakis <[email protected]> (author)
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from ctypes import *
from ctypes.util import find_library
from errno import *
from os import strerror
from platform import machine, system
from signal import signal, SIGINT, SIG_DFL
from stat import S_IFDIR
from traceback import print_exc
import logging
try:
from functools import partial
except ImportError:
# http://docs.python.org/library/functools.html#functools.partial
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
basestring
except NameError:
basestring = str
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
_machine = machine()
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or
find_library('fuse'))
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
else:
_libfuse = CDLL(_libfuse_path)
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('%s is not supported.' % _system)
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t),
('f_fsid', c_ulong),
#('unused', c_int),
('f_flag', c_ulong),
('f_namemax', c_ulong)
]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp,
CFUNCTYPE(c_int, c_voidp, c_char_p,
POINTER(c_stat), c_off_t),
c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int,
POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t,
POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t,
POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info),
c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))),
]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
timespec = getattr(st, key + 'spec')
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
def fuse_get_context():
'Returns a (uid, gid, pid) tuple'
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
'''
This class is the lower level interface and should not be subclassed under
normal use. Its methods are called by fuse.
Assumes API version 2.6 or later.
'''
OPTIONS = (
('foreground', '-f'),
('debug', '-d'),
('nothreads', '-s'),
)
def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8',
**kwargs):
'''
Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc.
'''
self.operations = operations
self.raw_fi = raw_fi
self.encoding = encoding
args = ['fuse']
args.extend(flag for arg, flag in self.OPTIONS
if kwargs.pop(arg, False))
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(self._normalize_fuse_options(**kwargs)))
args.append(mountpoint)
args = [arg.encode(encoding) for arg in args]
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for name, prototype in fuse_operations._fields_:
if prototype != c_voidp and getattr(operations, name, None):
op = partial(self._wrapper, getattr(self, name))
setattr(fuse_ops, name, prototype(op))
try:
old_handler = signal(SIGINT, SIG_DFL)
except ValueError:
old_handler = SIG_DFL
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
try:
signal(SIGINT, old_handler)
except ValueError:
pass
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
@staticmethod
def _normalize_fuse_options(**kargs):
for key, value in kargs.items():
if isinstance(value, bool):
if value is True: yield key
else:
yield '%s=%s' % (key, value)
@staticmethod
def _wrapper(func, *args, **kwargs):
'Decorator for the methods that follow'
try:
return func(*args, **kwargs) or 0
except OSError as e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path.decode(self.encoding)) \
.encode(self.encoding)
# copies a string into the given buffer
# (null terminated and truncated if necessary)
if not isinstance(ret, bytes):
ret = ret.encode('utf-8')
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path.decode(self.encoding), mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path.decode(self.encoding), mode)
def unlink(self, path):
return self.operations('unlink', path.decode(self.encoding))
def rmdir(self, path):
return self.operations('rmdir', path.decode(self.encoding))
def symlink(self, source, target):
'creates a symlink `target -> source` (e.g. ln -s source target)'
return self.operations('symlink', target.decode(self.encoding),
source.decode(self.encoding))
def rename(self, old, new):
return self.operations('rename', old.decode(self.encoding),
new.decode(self.encoding))
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding))
def chmod(self, path, mode):
return self.operations('chmod', path.decode(self.encoding), mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path.decode(self.encoding), uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path.decode(self.encoding), length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path.decode(self.encoding), fi)
else:
fi.fh = self.operations('open', path.decode(self.encoding),
fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
ret = self.operations('read', path.decode(self.encoding), size,
offset, fh)
if not ret: return 0
retsize = len(ret)
assert retsize <= size, \
'actual amount read %d greater than expected %d' % (retsize, size)
if not isinstance(ret, bytes):
ret = ret.encode('utf-8')
data = create_string_buffer(ret, retsize)
memmove(buf, ret, retsize)
return retsize
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('write', path.decode(self.encoding), data,
offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path.decode(self.encoding))
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('flush', path.decode(self.encoding), fh)
def release(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('release', path.decode(self.encoding), fh)
def fsync(self, path, datasync, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('fsync', path.decode(self.encoding), datasync,
fh)
def setxattr(self, path, name, value, size, options, *args):
return self.operations('setxattr', path.decode(self.encoding),
name.decode(self.encoding),
string_at(value, size), options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path.decode(self.encoding),
name.decode(self.encoding), *args)
retsize = len(ret)
# allow size queries
if not value: return retsize
# do not truncate
if retsize > size: return -ERANGE
if not isinstance(ret, bytes):
ret = ret.encode('utf-8')
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
attrs = self.operations('listxattr', path.decode(self.encoding)) or ''
ret = '\x00'.join(attrs).encode(self.encoding) + '\x00'
retsize = len(ret)
# allow size queries
if not namebuf: return retsize
# do not truncate
if retsize > size: return -ERANGE
if not isinstance(ret, bytes):
ret = ret.encode('utf-8')
buf = create_string_buffer(ret, retsize)
memmove(namebuf, buf, retsize)
return retsize
def removexattr(self, path, name):
return self.operations('removexattr', path.decode(self.encoding),
name.decode(self.encoding))
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir',
path.decode(self.encoding))
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', path.decode(self.encoding),
fip.contents.fh):
if isinstance(item, basestring):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name.encode(self.encoding), st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', path.decode(self.encoding),
fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', path.decode(self.encoding),
datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path.decode(self.encoding), amode)
def create(self, path, mode, fip):
fi = fip.contents
path = path.decode(self.encoding)
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
fi.fh = self.operations('create', path, mode)
return 0
def ftruncate(self, path, length, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('truncate', path.decode(self.encoding),
length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
if not fip:
fh = fip
elif self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
attrs = self.operations('getattr', path.decode(self.encoding), fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('lock', path.decode(self.encoding), fh, cmd,
lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path.decode(self.encoding), times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path.decode(self.encoding), blocksize,
idx)
class Operations(object):
'''
This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception on
error.
When in doubt of what an operation should do, check the FUSE header file
or the corresponding system call man page.
'''
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
'''
When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0.
'''
raise FuseOSError(EROFS)
def destroy(self, path):
'Called on filesystem destruction. Path is always /'
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
'''
Returns a dictionary with keys identical to the stat C structure of
stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X
concerning st_nlink of directories. Mac OS X counts all files inside
the directory, while Linux counts only the subdirectories.
'''
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0o0755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
'''
Called on filesystem initialization. (Path is always /)
Use it instead of __init__ if you start threads on initialization.
'''
pass
def link(self, target, source):
'creates a hard link `target -> source` (e.g. ln source target)'
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
'''
When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly.
'''
return 0
def opendir(self, path):
'Returns a numerical file handle.'
return 0
def read(self, path, size, offset, fh):
'Returns a string containing the data requested.'
raise FuseOSError(EIO)
def readdir(self, path, fh):
'''
Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr.
'''
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
'''
Returns a dictionary with keys identical to the statvfs C structure of
statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2
(minimum 512).
'''
return {}
def symlink(self, target, source):
'creates a symlink `target -> source` (e.g. ln -s source target)'
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
'Times is a (atime, mtime) tuple. If None use current time.'
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
log = logging.getLogger('fuse.log-mixin')
def __call__(self, op, path, *args):
self.log.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
self.log.debug('<- %s %s', op, repr(ret))
|
the-stack_106_13461
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import logging.config
import yaml
import argparse
LOG = logging.getLogger(__name__)
class Config:
def __init__(self, root):
self.root = root
self._modules = {}
self.data = {}
self._parse_args()
self.args = self.parser.parse_args()
filename = self.args.filename
with open(filename, "rb") as cf:
self.raw_data = yaml.safe_load(cf)
for item in self.raw_data:
if len(item.keys()) > 1:
raise ValueError("Invalid config entry %s" % item)
key = list(item.keys())[0]
value = list(item.values())[0]
name = value.get("name")
if name:
self.data.setdefault(key, {})
if name in self.data[key]:
raise ValueError("Duplicate name %s" % name)
self.data[key][name] = value
else:
self.data.setdefault(key, [])
self.data[key].append(value)
self._configure_logging()
def get_instance(self, cfg, *args, **kwargs):
return self._get_module(cfg["module"]).Class(cfg, *args, **kwargs)
def iter_instances(self, section):
section = self.data.get(section, {})
for config in section.values():
cls = self._get_module(config["module"]).Class
yield cls(self.root, **config)
def iter_providers(self):
for cfg in self.data.get("provider", {}).values():
yield self._get_module(cfg["module"]).Provider(self.root, cfg)
def _get_module(self, name):
"""Get module by name.
Import module if it is not imported.
"""
module = self._modules.get(name)
if not module:
module = importlib.import_module(name)
self._modules[name] = module
return module
def _parse_args(self):
self.parser = argparse.ArgumentParser()
group = self.parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", help="verbose mode",
action="store_true")
group.add_argument("-q", "--quiet", help="quiet mode",
action="store_true")
self.parser.add_argument("filename", type=str, help="config file")
def _configure_logging(self):
LOGGING = {
"version": 1,
"formatters": {
"standard": {
"format": "%(asctime)s %(name)s:"
"%(levelname)s: %(message)s "
"(%(filename)s:%(lineno)d)",
"datefmt": "%Y-%m-%d %H:%M:%S",
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"formatter": "standard",
"stream": "ext://sys.stdout",
},
},
"loggers": {
"": {
"handlers": ["console"],
"level": "DEBUG"
}
}
}
if self.args.quiet:
LOGGING["loggers"][""]["handlers"].remove("console")
elif self.args.verbose:
LOGGING["handlers"]["console"]["level"] = "DEBUG"
def _get_handler(key, value):
return {
"level": key.upper(),
"filename": value,
"class": "logging.handlers.RotatingFileHandler",
"formatter": "standard"
}
default_log = {
"debug": _get_handler,
"error": _get_handler,
"info": _get_handler,
}
if self.data.get("logging"):
section = self.data.get("logging")[0]
for key in section:
if default_log.get(key):
LOGGING["handlers"][key] = default_log[key](key, section[key])
LOGGING["loggers"][""]["handlers"].append(key)
else:
raise ValueError("Unknown logging level")
logging.config.dictConfig(LOGGING)
|
the-stack_106_13463
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import random
import fixtures
import mock
import six
import testtools
from sahara import context
from sahara import exceptions as ex
from sahara.tests.unit import base as test_base
rnd = random.Random()
class ContextTest(testtools.TestCase):
def setUp(self):
super(ContextTest, self).setUp()
self.useFixture(fixtures.FakeLogger('sahara'))
ctx = context.Context('test_user', 'tenant_1', 'test_auth_token', {},
remote_semaphore='123')
context.set_ctx(ctx)
def _add_element(self, lst, i):
context.sleep(rnd.uniform(0, 0.1))
lst.append(i)
def _raise_test_exc(self, exc_msg):
raise TestException(exc_msg)
def test_thread_group_waits_threads(self):
# That can fail with some probability, so making 5 attempts
# Actually it takes around 1 second, so maybe we should
# just remove it
for _ in six.moves.range(5):
lst = []
with context.ThreadGroup() as tg:
for i in six.moves.range(400):
tg.spawn('add %i' % i, self._add_element, lst, i)
self.assertEqual(len(lst), 400)
def test_thread_group_waits_threads_if_spawning_exception(self):
lst = []
with testtools.ExpectedException(RuntimeError):
with context.ThreadGroup() as tg:
for i in six.moves.range(400):
tg.spawn('add %i' % i, self._add_element, lst, i)
raise RuntimeError()
self.assertEqual(len(lst), 400)
def test_thread_group_waits_threads_if_child_exception(self):
lst = []
with testtools.ExpectedException(ex.ThreadException):
with context.ThreadGroup() as tg:
tg.spawn('raiser', self._raise_test_exc, 'exc')
for i in six.moves.range(400):
tg.spawn('add %i' % i, self._add_element, lst, i)
self.assertEqual(len(lst), 400)
def test_thread_group_handles_spawning_exception(self):
with testtools.ExpectedException(TestException):
with context.ThreadGroup():
raise TestException()
def test_thread_group_handles_child_exception(self):
try:
with context.ThreadGroup() as tg:
tg.spawn('raiser1', self._raise_test_exc, 'exc1')
except ex.ThreadException as te:
self.assertIn('exc1', six.text_type(te))
self.assertIn('raiser1', six.text_type(te))
def test_thread_group_prefers_spawning_exception(self):
with testtools.ExpectedException(RuntimeError):
with context.ThreadGroup() as tg:
tg.spawn('raiser1', self._raise_test_exc, 'exc1')
raise RuntimeError()
def test_wrapper_does_not_set_exception(self):
func = mock.MagicMock()
tg = mock.MagicMock(exc=None, failed_thread=None)
context._wrapper(None, 'test thread', tg, func)
self.assertIsNone(tg.exc)
self.assertIsNone(tg.failed_thread)
def test_wrapper_catches_base_exception(self):
func = mock.MagicMock()
func.side_effect = BaseException()
tg = mock.MagicMock(exc=None, failed_thread=None)
context._wrapper(None, 'test thread', tg, func)
self.assertIsNotNone(tg.exc)
self.assertEqual(tg.failed_thread, 'test thread')
def test_is_auth_capable_for_admin_ctx(self):
ctx = context.ctx()
self.assertFalse(ctx.is_auth_capable())
def test_is_auth_capable_for_user_ctx(self):
existing_ctx = context.ctx()
try:
ctx = context.Context('test_user', 'tenant_1', 'test_auth_token',
{"network": "aURL"}, remote_semaphore='123')
self.assertTrue(ctx.is_auth_capable())
finally:
context.set_ctx(existing_ctx)
class TestException(Exception):
pass
class GetAuthURITest(test_base.SaharaTestCase):
def setUp(self):
super(GetAuthURITest, self).setUp()
self.override_auth_config = functools.partial(
self.override_config, group='keystone_authtoken')
def test_get_auth_url_from_auth_uri_param(self):
self.override_auth_config('auth_uri', 'http://pony:5000/v2.0')
self.assertEqual('http://pony:5000/v2.0', context._get_auth_uri())
def test_get_auth_uri_from_identity_uri(self):
self.override_auth_config('identity_uri', 'http://spam:35357')
self.assertEqual('http://spam:35357/v3', context._get_auth_uri())
self.override_config('use_identity_api_v3', False)
self.assertEqual('http://spam:35357/v2.0', context._get_auth_uri())
def test_get_auth_uri_from_auth_params(self):
self.override_auth_config('auth_host', 'eggs')
self.override_auth_config('auth_port', 12345)
self.override_auth_config('auth_protocol', 'http')
self.assertEqual('http://eggs:12345/v3', context._get_auth_uri())
self.override_config('use_identity_api_v3', False)
self.assertEqual('http://eggs:12345/v2.0', context._get_auth_uri())
|
the-stack_106_13475
|
"""
MIT License
Copyright (c) 2020 Brandon Edgren
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import argparse
from collections import defaultdict, namedtuple
import csv
from datetime import datetime
import os
from os import path
# from pprint import pprint, pformat
import re
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
# import numpy
STAT_DIR_ENV = "KOVAAK_STAT_DIR"
IMG_DIR_ENV = "KOVAAK_STAT_IMG_DIR"
STAT_FILE_RE = re.compile(r"(?P<name>.+) - Challenge - (?P<date>.+) Stats\.csv")
class SessionStat(object):
"""Stats for a single session. """
Summary = namedtuple("Summary", ["kills", "deaths", "fight_time", "avg_ttk", "damage_done",
"damage_taken", "score", "game_version"])
Kill = namedtuple("Kill", ["kill", "timestamp", "bot", "weapon", "ttk", "shots", "hits",
"accuracy", "damage_done", "damage_possible", "efficiency",
"cheated"])
Weapon = namedtuple("Weapon", ["weapon", "shots", "hits", "damage_done", "damage_possible"])
def __init__(self, date, summary, kills, weapons):
self.date = date
self.summary = summary
self.kills = kills
self.weapons = weapons
@property
def accuracy(self):
total_shots, total_hits = 0, 0
for kill in self.kills:
total_shots += kill.shots
total_hits += kill.hits
if total_shots == 0:
return 0
return total_hits / total_shots
@property
def ttk(self):
total_ttk = 0
for kill1, kill2 in zip(self.kills, self.kills[1:]):
total_ttk += (kill2.timestamp - kill1.timestamp).total_seconds()
if len(self.kills) <= 1:
return 0
return total_ttk / (len(self.kills) - 1)
@staticmethod
def from_file(fname):
m = STAT_FILE_RE.match(fname)
date = datetime.strptime(m.group("date"), "%Y.%m.%d-%H.%M.%S")
with open(fname, "r") as f:
kill_csv, weapon_csv, summary_csv, settings_csv = f.read().split("\n\n")
summary_info = {row[0].strip(":"): row[1] for row in csv.reader(summary_csv.splitlines())}
score_offset = -99000 if "Ground Plaza NO UFO" in m.group("name") else 0
summary = SessionStat.Summary(
int(summary_info["Kills"]),
int(summary_info["Deaths"]),
float(summary_info["Fight Time"]),
float(summary_info["Avg TTK"]),
float(summary_info["Damage Done"]),
float(summary_info["Damage Taken"]),
float(summary_info["Score"]) + score_offset,
tuple(map(int, summary_info["Game Version"].split("."))))
timestamp_format = "%H:%M:%S:%f" if summary.game_version < (2, 0, 1, 0) else "%H:%M:%S.%f"
kills = []
reader = csv.DictReader(kill_csv.splitlines())
for row in reader:
kills.append(SessionStat.Kill(
int(row["Kill #"]),
datetime.strptime(row["Timestamp"], timestamp_format),
row["Bot"],
row["Weapon"],
float(row["TTK"][:-1]),
int(row["Shots"]),
int(row["Hits"]),
float(row["Accuracy"]),
float(row["Damage Done"]),
float(row["Damage Possible"]),
float(row["Efficiency"]),
bool(row["Cheated"])))
weapons = []
reader = csv.DictReader(weapon_csv.splitlines())
for row in reader:
weapons.append(SessionStat.Weapon(
row["Weapon"],
int(row["Shots"]),
int(row["Hits"]),
float(row["Damage Done"]),
float(row["Damage Possible"])))
# TODO: Skipping this for now. Not sure if it's useful.
_ = settings_csv
return SessionStat(date, summary, kills, weapons)
def daily_stats(stats, get_stat):
days, values = [], []
stats_by_day = defaultdict(list)
for stat in sorted(stats, key=lambda s: s.date):
day = stat.date.strftime("%Y-%m-%d")
if day not in days:
days.append(day)
stats_by_day[day].append(stat)
for day in days:
avg = sum(get_stat(s) for s in stats_by_day[day]) / len(stats_by_day[day])
values.append(avg)
return days, values
# For making box plots.
# def daily_stats2(stats, get_stat):
# days, values = [], []
# stats_by_day = defaultdict(list)
# for stat in sorted(stats, key=lambda s: s.date):
# day = stat.date.strftime("%Y-%m-%d")
# if day not in days:
# days.append(day)
# stats_by_day[day].append(get_stat(stat))
# for day in days:
# values.append(stats_by_day[day])
# return days, values
def parse_stats(stats_dir, stat_files):
return [SessionStat.from_file(path.join(stats_dir, f)) for f in stat_files]
# def _trendline(ax, x, y):
# z = numpy.polyfit(x, y, 1)
# p = numpy.poly1d(z)
# ax.plot(x,p(x), "r--")
def _conf_axes(axes, title, rotate):
axes.set_title(title)
axes.grid(True)
if rotate:
plt.setp(axes.get_xticklabels(), rotation=40, ha="right", rotation_mode="anchor")
def plot_tracking(challenge_name, fig_id, stats, img_dir):
width, height = 1, 2
fig = plt.figure(fig_id, figsize=(20, 12))
fig.suptitle(challenge_name)
ax = plt.subplot(height, width, 1)
_conf_axes(ax, "Score", False)
x, y = zip(*enumerate(s.summary.score for s in stats))
ax.plot(x, y, ".")
# _trendline(ax, x, y)
ax = plt.subplot(height, width, 2)
_conf_axes(ax, "Avg Score Per Day", True)
ax.scatter(*daily_stats(stats, lambda s: s.summary.score))
# ax.set_title("Avg Score Per Day")
# days, values = daily_stats2(stats, lambda s: s.summary.score)
# for _, value in zip(days, values):
# plt.boxplot(values)
# ax.set_xticklabels(days, rotation=45)
# plt.show()
canvas = FigureCanvasAgg(fig)
canvas.print_figure(path.join(img_dir, challenge_name))
print("Saved", path.join(img_dir, challenge_name) + ".png")
plt.close(fig)
def plot_click_timing(challenge_name, fig_id, stats, img_dir):
width, height = 2, 4
fig = plt.figure(fig_id, figsize=(20, 12))
fig.suptitle(challenge_name)
ax = plt.subplot(height, width, 1)
_conf_axes(ax, "Score", False)
x, y = zip(*enumerate(s.summary.score for s in stats))
ax.plot(x, y, ".")
# _trendline(ax, x, y)
ax = plt.subplot(height, width, 3)
_conf_axes(ax, "Avg Score Per Day", True)
days, values = daily_stats(stats, lambda s: s.summary.score)
ax.scatter(days, values)
# _trendline(ax, range(len(days)), values)
ax = plt.subplot(height, width, 2)
_conf_axes(ax, "Kills", False)
ax.plot([s.summary.kills for s in stats], ".")
ax = plt.subplot(height, width, 4)
_conf_axes(ax, "Avg Kills Per Day", True)
ax.scatter(*daily_stats(stats, lambda s: s.summary.kills))
ax = plt.subplot(height, width, 6)
_conf_axes(ax, "Accuracy", False)
ax.plot([s.accuracy for s in stats], ".")
ax = plt.subplot(height, width, 8)
_conf_axes(ax, "Avg Accuracy Per Day", True)
ax.scatter(*daily_stats(stats, lambda s: s.accuracy))
ax = plt.subplot(height, width, 5)
_conf_axes(ax, "TTK", False)
ax.plot([s.ttk for s in stats], ".")
ax = plt.subplot(height, width, 7)
_conf_axes(ax, "Avg TTK Per Day", True)
ax.scatter(*daily_stats(stats, lambda s: s.ttk))
plt.subplots_adjust(top=0.95, bottom=0.08, left=0.05, right=0.95, hspace=0.5, wspace=0.2)
# plt.show()
canvas = FigureCanvasAgg(fig)
canvas.print_figure(path.join(img_dir, challenge_name))
print("Saved", path.join(img_dir, challenge_name) + ".png")
plt.close(fig)
def main():
parser = argparse.ArgumentParser(description="Generate graphs from Kovaak data.")
parser.add_argument(
"--statsdir", type=str, default=os.environ.get(STAT_DIR_ENV, None),
help="File path to where the stat files are. This should be in "
".../SteamLibrary/steamapps/common/FPSAimTrainer/FPSAimTrainer/stats. Defaults to the "
"{} environment variable (currently: %(default)s)".format(STAT_DIR_ENV))
parser.add_argument(
"--imgdir", type=str, default=os.environ.get(IMG_DIR_ENV, None),
help="File path to save the generated images at. Defaults to the "
"{} environment variable (currently: %(default)s)".format(IMG_DIR_ENV))
args = parser.parse_args()
if not args.statsdir:
print("Please use the --statdir option or set the %s environment variable." % STAT_DIR_ENV)
exit(1)
if not args.imgdir:
print("Please use the --imgdir option or set the %s environment variable." % IMG_DIR_ENV)
exit(1)
all_stat_files = []
for _, _, files in os.walk(args.statsdir):
all_stat_files += files
files_by_challenge = defaultdict(list)
for fname in all_stat_files:
m = STAT_FILE_RE.match(fname)
if not m:
continue
files_by_challenge[m.group("name")].append(fname)
# challenges = sorted(files_by_challenge.keys())
# print("\n".join("%d: %s" % (i + 1, c) for i, c in enumerate(challenges)))
# def _request_digit():
# choice = input("Select challenge: ")
# if not choice.isdigit() or not 1 <= int(choice) <= len(challenges):
# print("Please enter a number in range 1-%d" % len(challenges))
# return None
# return choice
# choice = _request_digit()
# while not choice:
# choice = _request_digit()
# challenge_name = challenges[int(choice) - 1]
# stats = parse_stats(stats_dir, files_by_challenge[challenge_name])
# if stats[0].summary.kills > 0:
# plot_click_timing(challenge_name, stats, img_dir)
# else:
# plot_tracking(challenge_name, stats, img_dir)
fig_id = 1
for challenge_name, files in files_by_challenge.items():
stats = parse_stats(args.statsdir, files)
if stats[0].summary.kills > 0:
plot_click_timing(challenge_name, fig_id, stats, args.imgdir)
else:
plot_tracking(challenge_name, fig_id, stats, args.imgdir)
fig_id += 1
if __name__ == "__main__":
main()
|
the-stack_106_13476
|
class TrieNode:
''' A node in trie '''
def __init__(self, char):
# The character that the node holds
self.value = char
# To check whether the word has ended
self.stop = False
# dictionary
self.children = {}
#if __name__ == "__main__" :
# trying out the class
|
the-stack_106_13477
|
#
# Copyright 2013-2022 The Foundry Visionmongers Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A test suite that always fails
"""
# pylint: disable=invalid-name
# pylint: disable=missing-class-docstring,missing-function-docstring
from openassetio.test.manager.harness import FixtureAugmentedTestCase
__all__ = []
class Test_failingSuite(FixtureAugmentedTestCase):
def test_that_will_always_fail(self):
# pylint: disable=redundant-unittest-assert
self.assertTrue(False)
|
the-stack_106_13479
|
import pickle
import sys
import argparse
import numpy as np
import pandas as pd
from os.path import join, dirname, abspath, expanduser
from ast import literal_eval as make_tuple
import matplotlib.pyplot as plt
from pyrieef.geometry.workspace import SignedDistanceWorkspaceMap
from pyrieef.geometry.pixel_map import sdf
ROOT_DIR = join(dirname(abspath(__file__)), '..')
DATA_DIR = join(ROOT_DIR, 'data', 'experiments')
MODEL_DIR = join(expanduser("~"), '.qibullet', '1.4.3')
DATASET_DIR = join(ROOT_DIR, 'datasets', 'mogaze')
sys.path.append(ROOT_DIR)
sys.path.append(join(ROOT_DIR, "../humoro"))
robot_model_file = join(MODEL_DIR, 'pepper.urdf')
from lgp.utils.helpers import draw_numpy_trajectory
from lgp.geometry.workspace import HumoroWorkspace
from examples.prediction.hmp_interface import HumanRollout
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Example run: python process_data.py data.p')
parser.add_argument('--name', help='The scenario name of the domain, problem file', type=str, default='ground_truth.p')
parser.add_argument('-s', help='The scenario name of the domain, problem file', type=str, default="(\'p5_1\', 100648, 108344)")
parser.add_argument('-m', help='Mode', type=str, default=0)
args = parser.parse_args()
data_file = join(DATA_DIR, args.name)
segment = make_tuple(args.s)
with open(data_file, 'rb') as f:
data = pickle.load(f)
hr = HumanRollout(path_to_mogaze=DATASET_DIR)
ws = HumoroWorkspace(hr, robot_model_file=robot_model_file)
ws.initialize_workspace_from_humoro(segment=segment, objects=[])
if args.m == 1:
meshgrid = ws.box.stacked_meshgrid(100)
sdf_map = np.asarray(SignedDistanceWorkspaceMap(ws)(meshgrid))
sdf_map = (sdf_map < 0).astype(float)
signed_dist_field = np.asarray(sdf(sdf_map))
signed_dist_field = np.flip(signed_dist_field, axis=0)
signed_dist_field = np.interp(signed_dist_field, (signed_dist_field.min(), signed_dist_field.max()), (0, max(ws.box.dim)))
fig = plt.figure(figsize=(8, 8))
extents = ws.box.box_extent()
ax = fig.add_subplot(111)
im = ax.imshow(signed_dist_field, cmap='inferno', interpolation='nearest', extent=extents)
fig.colorbar(im)
else:
ax = ws.draw_workspace(show=False)
single_traj = data[segment]['single_actual_path']
dynamic_traj = data[segment]['dynamic_actual_path']
human_traj = data[segment]['human_path']
draw_numpy_trajectory(ax, single_traj, 'green')
draw_numpy_trajectory(ax, dynamic_traj, 'blue')
draw_numpy_trajectory(ax, human_traj, 'red')
plt.show()
|
the-stack_106_13481
|
# Print function may not work if proper GUI is not selected
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
print("Using OpenCV backend")
class OCR:
"""
OCR class
"""
__mode = ""
__codename = "pyimageocr"
__ver = 1.0
__image = []
__height, __width, __channels = 0, 0, 0
rowSegment = []
colSegment = []
def __init__(self, mode = "en"):
self.classes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.cache_file = "__pycache__"
if not os.path.exists(self.cache_file):
os.makedirs(self.cache_file)
self.__mode = mode
def train(self, folder, save="train.bin"):
sub_folder = os.listdir(folder)
training_images = []
training_label = []
i = 0
knn = cv2.ml.KNearest_create()
for class_lable in sub_folder:
i += 1
cu_folder = folder + os.sep + class_lable
imgs = os.listdir(cu_folder)
tmp = []
print(cu_folder)
for img in imgs:
char_image = cv2.imread(cu_folder+os.sep+img)
char_image = 255 - cv2.cvtColor(char_image, cv2.COLOR_BGR2GRAY)
char_image = cv2.resize(char_image, (64, 64)).astype(np.float32)
char_image = char_image.flatten()
training_images.append(char_image)
training_label.append([i])
training_images = np.array(training_images, dtype=np.float32)
training_label = np.array(training_label, dtype=np.float32)
# print("training_images : ", training_images.shape)
# print("training_label", training_label.shape)
train = knn.train(training_images, cv2.ml.ROW_SAMPLE, training_label)
np.savez(save,train=training_images, train_labels=training_label)
def pridict(self, filename):
self.getImageFormFile(filename)
self.thresoldImage()
return self.__Segment()
def getImageFormFile(self, filename):
try:
img = cv2.imread(filename)
self.__image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.__height, self.__width, self.__channels = img.shape
except Exception:
print("File Read Error... (Line 24)")
def thresoldImage(self):
try:
sum = 0
for i in range(0, self.__height):
for j in range(0, self.__width):
sum = sum+ self.__image[i, j]
thresh = sum/(self.__width*self.__height)
self.__image = cv2.threshold(self.__image, thresh, 255, cv2.THRESH_BINARY)[1]
except Exception:
print("Unknown Execption at line 34")
def imageShow(self):
try:
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', self.__image)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception:
print("System can't detect any compatible version of OpenCV")
def compressInt(self, number):
remove = []
for i in range(0, len(number)-1):
if abs(number[i] - number[i+1]) < 3:
remove.append(number[i+1])
for i in range(0, len(remove)):
number.remove(remove[i])
return number
def getNoOfFile(self, path):
path, dirs, files = os.walk(path).__next__()
return len(files)
def save(self, image, path):
num = str(self.getNoOfFile(path)+1)
cv2.imwrite(self.cache_file+"/tmp.jpg",image)
main = cv2.imread(self.cache_file+"/tmp.jpg")
main = cv2.resize(main, (64, 64))
cv2.imwrite(path+"/"+num+".jpg",main)
def pattern_match(self, image=None, file=None):
if file:
main = cv2.imread(file)
else:
cv2.imwrite(self.cache_file+"/tmp.jpg",image)
main = cv2.imread(self.cache_file+"/tmp.jpg")
main = cv2.resize(main, (64, 64))
main = cv2.cvtColor(main, cv2.COLOR_BGR2GRAY)
main = np.array([main.flatten()], np.float32)
#Load the kNN Model
with np.load('train.bin.npz') as data:
train = data['train']
train_labels = data['train_labels']
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
ret, result, neighbours, dist = knn.findNearest(main,k=1)
return self.classes[int(result)-1]
def __getRows(self, bit_factor=5):
strip = []
start = False
tmp = 0
loop = 0
shaped = cv2.resize(self.__image, (bit_factor, self.__height))
for i in shaped:
loop += 1
if sum(i) < bit_factor*255:
if not start:
start = True
tmp = loop
if sum(i) == bit_factor*255:
if start:
start = False
strip.append((tmp,loop))
return strip
def __getWord(self, image, bit_factor = 10):
height, width = image.shape
strip = []
start = False
tmp = 0
loop = 0
shaped = shaped = cv2.resize(image, (self.__width, bit_factor))
for i in zip(*shaped):
loop += 1
if sum(i) < bit_factor*255:
if not start:
start = True
tmp = loop
if sum(i) == bit_factor*255:
if start:
start = False
strip.append((tmp, loop))
buff = ""
for i, j in strip:
buff = buff + self.pattern_match(image=image[0:height, i:j])
return buff
def __Segment(self):
line = []
self.rowSegment = self.__getRows()
for i in range(len(self.rowSegment)):
line.append(self.__getWord(self.__image[self.rowSegment[i][0]:self.rowSegment[i][1], 0:self.__width]))
return line
def main():
ocr = OCR(mode='en')
ocr.train("../Training")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.