filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18842
|
import datetime
import os
import copy
from dataclasses import dataclass, astuple
from typing import Optional
import numpy
import torch
from colorama import Back
try:
from abstract_game import AbstractGame
except ImportError:
from .abstract_game import AbstractGame
try:
from models import MuZeroResidualNetwork
except ImportError:
from ..models import MuZeroResidualNetwork
BOARD_SIZE_X = 3
BOARD_SIZE_Y = 4
UNIT_KIND_NUM = 5 # Lion, Elephant, Giraph, Piyo, Chicken(Piyo Promoted)
CAPTURABLE_KIND_NUM = 3 # Elephant, Giraph, Piyo
ACTION_SPACE_SIZE = (
(BOARD_SIZE_X * BOARD_SIZE_Y + CAPTURABLE_KIND_NUM) * # FROM
(BOARD_SIZE_X * BOARD_SIZE_Y) * # TO
2 # Promote
)
P1_COLOR = Back.BLUE
P2_COLOR = Back.RED
RESET = Back.RESET
class MuZeroConfig:
def __init__(self):
# More information is available here: https://github.com/werner-duvaud/muzero-general/wiki/Hyperparameter-Optimization
self.seed = 0 # Seed for numpy, torch and the game
self.max_num_gpus = None # Fix the maximum number of GPUs to use. It's usually faster to use a single GPU (set it to 1) if it has enough memory. None will use every GPUs available
### Game
self.observation_shape = ((UNIT_KIND_NUM+CAPTURABLE_KIND_NUM)*2 + 1, BOARD_SIZE_Y, BOARD_SIZE_X) # Dimensions of the game observation, must be 3D (channel, height, width). For a 1D array, please reshape it to (1, 1, length of array)
self.action_space = list(range(ACTION_SPACE_SIZE)) # Fixed list of all possible actions. You should only edit the length
self.players = list(range(2)) # List of players. You should only edit the length
self.stacked_observations = 0 # Number of previous observations and previous actions to add to the current observation
# Evaluate
self.muzero_player = 0 # Turn Muzero begins to play (0: MuZero plays first, 1: MuZero plays second)
self.opponent = "expert" # Hard coded agent that MuZero faces to assess his progress in multiplayer games. It doesn't influence training. None, "random" or "expert" if implemented in the Game class
### Self-Play
self.num_workers = 5 # Number of simultaneous threads/workers self-playing to feed the replay buffer
self.selfplay_on_gpu = False
self.max_moves = 100 # Maximum number of moves if game is not finished before
self.num_simulations = 30 # Number of future moves self-simulated
self.discount = 1 # Chronological discount of the reward
self.temperature_threshold = None # Number of moves before dropping the temperature given by visit_softmax_temperature_fn to 0 (ie selecting the best action). If None, visit_softmax_temperature_fn is used every time
# Root prior exploration noise
self.root_dirichlet_alpha = 0.2 # 大きいほうが薄く広い
self.root_exploration_fraction = 0.25
# UCB formula
self.pb_c_base = 19652
self.pb_c_init = 1.25
### Network
self.network = "animal_shogi" # "resnet" / "fullyconnected"
self.support_size = 1 # Value and reward are scaled (with almost sqrt) and encoded on a vector with a range of -support_size to support_size. Choose it so that support_size <= sqrt(max(abs(discounted reward)))
# Residual Network and animal_shogi Network
self.downsample = False # Downsample observations before representation network, False / "CNN" (lighter) / "resnet" (See paper appendix Network Architecture)
self.blocks = 3 # Number of blocks in the ResNet
self.channels = 64 # Number of channels in the ResNet
self.reduced_channels_reward = 16 # Number of channels in reward head
self.reduced_channels_value = 16 # Number of channels in value head
self.reduced_channels_policy = 32 # Number of channels in policy head
self.resnet_fc_reward_layers = [8] # Define the hidden layers in the reward head of the dynamic network
self.resnet_fc_value_layers = [8] # Define the hidden layers in the value head of the prediction network
self.resnet_fc_policy_layers = [64] # Define the hidden layers in the policy head of the prediction network
# Fully Connected Network
self.encoding_size = 32
self.fc_representation_layers = [] # Define the hidden layers in the representation network
self.fc_dynamics_layers = [16] # Define the hidden layers in the dynamics network
self.fc_reward_layers = [16] # Define the hidden layers in the reward network
self.fc_value_layers = [] # Define the hidden layers in the value network
self.fc_policy_layers = [] # Define the hidden layers in the policy network
### Training
self.results_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../results", os.path.basename(__file__)[:-3], datetime.datetime.now().strftime("%Y-%m-%d--%H-%M-%S")) # Path to store the model weights and TensorBoard logs
self.save_model = True # Save the checkpoint in results_path as model.checkpoint
self.training_steps = 1000000 # Total number of training steps (ie weights update according to a batch)
self.batch_size = 256 # Number of parts of games to train on at each training step
self.checkpoint_interval = 10 # Number of training steps before using the model for self-playing
self.value_loss_weight = 0.25 # Scale the value loss to avoid overfitting of the value function, paper recommends 0.25 (See paper appendix Reanalyze)
self.train_on_gpu = torch.cuda.is_available() # Train on GPU if available
self.optimizer = "Adam" # "Adam" or "SGD". Paper uses SGD
self.weight_decay = 1e-5 # L2 weights regularization
self.momentum = 0.9 # Used only if optimizer is SGD
# Exponential learning rate schedule
self.lr_init = 0.003 # Initial learning rate
self.lr_decay_rate = 1 # Set it to 1 to use a constant learning rate
self.lr_decay_steps = 10000
### Replay Buffer
self.replay_buffer_size = 10000 # Number of self-play games to keep in the replay buffer
self.num_unroll_steps = 5 # Number of game moves to keep for every batch element
self.td_steps = self.max_moves # Number of steps in the future to take into account for calculating the target value
self.PER = False # Prioritized Replay (See paper appendix Training), select in priority the elements in the replay buffer which are unexpected for the network
self.PER_alpha = 0.5 # How much prioritization is used, 0 corresponding to the uniform case, paper suggests 1
# Reanalyze (See paper appendix Reanalyse)
self.use_last_model_value = False # Use the last model to provide a fresher, stable n-step value (See paper appendix Reanalyze)
self.reanalyse_on_gpu = False
### Adjust the self play / training ratio to avoid over/underfitting
self.self_play_delay = 0 # Number of seconds to wait after each played game
self.training_delay = 0 # Number of seconds to wait after each training step
self.ratio = None # Desired training steps per self played step ratio. Equivalent to a synchronous version, training can take much longer. Set it to None to disable it
@property
def random_move_till_n_action_in_self_play(self):
return numpy.random.choice([0, 2, 2, 4, 4, 4, 4, 4, 4, 4, 6])
def visit_softmax_temperature_fn(self, trained_steps):
"""
Parameter to alter the visit count distribution to ensure that the action selection becomes greedier as training progresses.
The smaller it is, the more likely the best action (ie with the highest visit count) is chosen.
Returns:
Positive float.
"""
if trained_steps < self.training_steps * 0.5:
return 1
else:
return 0.5
def num_simulations_fn(self, num_played_games):
rate = num_played_games / (self.replay_buffer_size * 10)
n = numpy.clip(self.num_simulations * rate, 20, self.num_simulations)
return int(n)
class Game(AbstractGame):
"""
Game wrapper.
"""
def __init__(self, seed=None):
self.env = AnimalShogi()
def step(self, action):
"""
Apply action to the game.
Args:
action : action of the action_space to take.
Returns:
The new observation, the reward and a boolean if the game has ended.
"""
observation, reward, done = self.env.step(action)
return observation, reward, done
def to_play(self):
"""
Return the current player.
Returns:
The current player, it should be an element of the players list in the config.
"""
return self.env.to_play()
def legal_actions(self):
"""
Should return the legal actions at each turn, if it is not available, it can return
the whole action space. At each turn, the game have to be able to handle one of returned actions.
For complex game where calculating legal moves is too long, the idea is to define the legal actions
equal to the action space but to return a negative reward if the action is illegal.
Returns:
An array of integers, subset of the action space.
"""
return self.env.legal_actions()
def reset(self):
"""
Reset the game for a new game.
Returns:
Initial observation of the game.
"""
return self.env.reset()
def render(self):
"""
Display the game observation.
"""
self.env.render()
input("Press enter to take a step ")
def human_to_action(self):
"""
For multiplayer games, ask the user for a legal action
and return the corresponding action number.
Returns:
An integer from the action space.
"""
return self.env.human_to_action()
def action_to_string(self, action_number):
"""
Convert an action number to a string representing the action.
Args:
action_number: an integer from the action space.
Returns:
String representing the action.
"""
return self.env.action_to_string(action_number)
def expert_agent(self):
"""
Hard coded agent that MuZero faces to assess his progress in multiplayer games.
It doesn't influence training
Returns:
Action as an integer to take in the current game state
"""
return self.env.expert_action()
@dataclass
class Move:
from_board: Optional[int] # (y*3 + x) or None
from_stock: Optional[int] # (E=0, G=1, P=2) or None
to_board: int # (y*3 + x)
promotion: int # 0 or 1(promote)
@classmethod
def decode_from_action_index(cls, action: int):
"""
:param action:
ActionSpace: combination of below
From H*W + 3(E G P stock) (15)
To H*W (12)
Promote 2 (2)
"""
board_size = BOARD_SIZE_Y * BOARD_SIZE_X
assert 0 <= action < (board_size+3) * board_size * 2
promote = action % 2
action //= 2
to_board = action % board_size
action //= board_size
if action < board_size:
from_board = action
from_stock = None
else:
from_board = None
from_stock = action - board_size # (E=0, G=1, P=2)
return cls(from_board, from_stock, to_board, promote)
def encode_to_action_index(self) -> int:
board_size = BOARD_SIZE_Y * BOARD_SIZE_X
if self.from_stock is None:
action = self.from_board
else:
action = board_size + self.from_stock
action *= board_size * 2
action += self.to_board * 2
action += self.promotion
assert 0 <= action < (board_size+3) * board_size * 2
return action
def from_pos(self):
assert self.from_board is not None
return self.from_board // BOARD_SIZE_X, self.from_board % BOARD_SIZE_X
def to_pos(self):
assert self.to_board is not None
return self.to_board // BOARD_SIZE_X, self.to_board % BOARD_SIZE_X
def clone(self):
return Move(*astuple(self))
class AnimalShogi:
board = None
stocks = None
player = 0
_legal_actions = None
def __init__(self):
self.init_game()
def clone(self):
obj = AnimalShogi()
obj.board = numpy.copy(self.board)
obj.stocks = numpy.copy(self.stocks)
obj.player = self.player
obj._legal_actions = copy.copy(self._legal_actions)
return obj
def init_game(self):
# Board(H=4, W=3)
# player-0: L=1, E=2, G=3, P=4, C=5
# player-1: L=6, E=7, G=8, P=9, C=10
# stocks for p0 = (E, G, P)
# stocks for p1 = (E, G, P)
self.board = numpy.array([
[G2, L2, E2],
[0 , P2, 0],
[0 , P1, 0],
[E1, L1, G1],
], dtype="int32")
self.stocks = numpy.zeros((2, CAPTURABLE_KIND_NUM), dtype="int32")
self.player = 0
self._legal_actions = None
def reset(self):
self.init_game()
return self.get_observation()
def to_play(self):
return self.player
def step(self, action):
move = Move.decode_from_action_index(action)
if not self.is_legal(move):
return self.get_observation(), -1, True
win, lose, done = self.do_move(move)
self.player = 1 - self.player
reward = 0
if win:
reward = 1
elif lose:
reward = -1
return self.get_observation(), reward, done
def do_move(self, move: Move):
self._legal_actions = None
player = self.to_play()
win = False
lose = False
done = False
if move.from_stock is not None: # drop
self.stocks[player][move.from_stock] -= 1
unit_kind = move.from_stock + 2 + player * 5 # (2,3,4 or 7,8,9)
self.board[move.to_pos()] = unit_kind
else:
unit_kind = self.board[move.from_pos()]
self.board[move.from_pos()] = 0
if self.board[move.to_pos()] > 0: # capture
captured_unit_kind = self.board[move.to_pos()] % 5
if captured_unit_kind == 1: # Lion
done = win = True
else:
stock_kind = [2, None, 0, 1, 2][captured_unit_kind] # board:E, G, P, C -> stock:E, G, P, P
self.stocks[player][stock_kind] += 1
self.board[move.to_pos()] = unit_kind + move.promotion
if player == 0 and numpy.any(self.board[BOARD_SIZE_Y-1] == L2): # Player1 Lion Try!
lose = done = True
elif player == 1 and numpy.any(self.board[0] == L1): # Player0 Lion Try!
lose = done = True
return win, lose, done
@staticmethod
def is_legal_move_direction(unit_kind, from_pos, to_pos):
diff = (to_pos[0]-from_pos[0], to_pos[1]-from_pos[1])
return diff in ALLOWED_MOVES[unit_kind]
def is_legal(self, move: Move):
player = self.to_play()
if move.from_stock is not None:
remain_num = self.stocks[self.to_play()][move.from_stock]
if remain_num < 1:
return False
if move.promotion == 1:
return False
else:
unit_kind = self.board[move.from_pos()]
if unit_kind == 0: # no unit there
return False
elif unit_kind < 6 and self.to_play() == 1: # opponent unit
return False
elif unit_kind > 5 and self.to_play() == 0: # opponent unit
return False
if move.promotion == 1:
if player == 0 and (unit_kind != P1 or move.to_pos()[0] != 0):
return False
elif player == 1 and (unit_kind != P2 or move.to_pos()[0] != BOARD_SIZE_Y-1):
return False
if not self.is_legal_move_direction(unit_kind, move.from_pos(), move.to_pos()):
return False
captured = self.board[move.to_pos()]
if captured:
if move.from_stock is not None:
return False # drop on the unit directly
if captured < 6 and self.to_play() == 0: # capture my team0
return False
if captured > 5 and self.to_play() == 1: # capture my team1
return False
return True
def get_observation(self):
channels = []
# board
for kind in range(1, 11):
ch = numpy.where(self.board == kind, 1, 0)
channels.append(ch)
# stock
for player in [0, 1]:
for kind in range(CAPTURABLE_KIND_NUM):
ch = numpy.full_like(channels[0], self.stocks[player][kind] / 2.)
channels.append(ch)
# to_play
ch = numpy.full_like(channels[0], 1 - self.to_play() * 2)
channels.append(ch)
return numpy.array(channels, dtype="int32")
def legal_actions(self):
if self._legal_actions is None:
ret = []
for action in range(ACTION_SPACE_SIZE):
if self.is_legal(Move.decode_from_action_index(action)):
ret.append(action)
self._legal_actions = ret
return copy.copy(self._legal_actions)
def human_to_action(self):
stock_kinds = {"E": 0, "G": 1, "C": 2}
if self.to_play() == 0:
print(P1_COLOR + f"Player1" + RESET)
else:
print(P2_COLOR + f"Player2" + RESET)
def convert_position_string_to_pos_index(pos_str):
try:
pos_str = pos_str.lower()
col = int(pos_str[0]) - 1
row = "abcd".index(pos_str[1])
return row * BOARD_SIZE_X + col
except:
return None
# input from
from_stock = None
from_board = None
to_board = None
player = self.to_play()
while True:
while True:
try:
from_str = input(f"From(ex: '1a', '2d', or 'E' 'G' 'C' from stock): ").strip()
if from_str == "random":
return numpy.random.choice(self.legal_actions())
if from_str.upper() in stock_kinds:
from_stock = stock_kinds[from_str.upper()]
if self.stocks[player][from_stock] > 0:
break
else:
print(f"You do not have {from_str}")
elif len(from_str) == 2:
from_board = convert_position_string_to_pos_index(from_str)
if from_board is None:
print(f"illegal position {from_str}")
else:
break
except:
pass
print("Wrong input, try again")
while True:
try:
to_str = input(f"To(ex: '1a', '2d'): ").strip()
if to_str == "random":
return numpy.random.choice(self.legal_actions())
if len(to_str) == 2:
to_board = convert_position_string_to_pos_index(to_str)
if to_str is None:
print(f"illegal position {from_str}")
else:
break
except:
pass
print("Wrong input, try again")
move = Move(from_board, from_stock, to_board, 0)
if self.is_legal(move) and move.from_board is not None:
m2 = move.clone()
m2.promotion = 1
if self.is_legal(m2):
pr_str = input("Promotion? [Y]/[n]: ").lower()
if pr_str != "n":
move.promotion = 1
if self.is_legal(move):
break
else:
print("Illegal Move, try again")
return move.encode_to_action_index()
def expert_action(self):
best_actions, _ = self.search_moves(self.clone(), 2, self.to_play())
return numpy.random.choice(best_actions)
def search_moves(self, state, search_depth: int, for_player: int):
"""
:param AnimalShogi state:
:param search_depth:
:param for_player:
:return:
"""
action_results = {}
for action in state.legal_actions():
s = state.clone()
_, reward, done = s.step(action)
if done or search_depth == 0:
action_results[action] = reward
else:
_, best_reward = self.search_moves(s, search_depth-1, for_player)
action_results[action] = -best_reward * 0.99
best_reward = numpy.max(list(action_results.values()))
best_actions = [a for a, r in action_results.items() if r == best_reward]
return best_actions, best_reward
def render(self):
chars = {
0: " ",
L1: P1_COLOR + "🐯" + RESET,
E1: P1_COLOR + "🐘" + RESET,
G1: P1_COLOR + "🐴" + RESET,
P1: P1_COLOR + "🐥" + RESET,
C1: P1_COLOR + "🐔" + RESET,
L2: P2_COLOR + "🐯" + RESET,
E2: P2_COLOR + "🐘" + RESET,
G2: P2_COLOR + "🐴" + RESET,
P2: P2_COLOR + "🐥" + RESET,
C2: P2_COLOR + "🐔" + RESET,
}
lines = []
for line in self.board:
line_ch_list = []
for kind in line:
line_ch_list.append(chars[kind])
lines.append("".join(line_ch_list))
stock_lines = []
for stocks in self.stocks:
stock = ""
for i, num in enumerate(stocks):
stock += "🐘🐴🐥"[i] * num
stock_lines.append(stock)
print(P2_COLOR + f"stock: {stock_lines[1]}" + RESET)
print(" | 1 2 3|")
print("-+------+-")
print("\n".join([f"{m}|{line}|" for m, line in zip("abcd", lines)]))
print("-+------+-")
print(P1_COLOR + f"stock: {stock_lines[0]}" + RESET)
def action_to_string(self, action_number):
move = Move.decode_from_action_index(action_number)
if move.from_board is not None:
from_pos, to_pos = move.from_pos(), move.to_pos()
kind = self.board[to_pos]
if kind == 0:
ch = " "
else:
ch = "🐯🐘🐴🐥🐔"[(kind-1) % 5]
pos_from = "123"[from_pos[1]] + "abcd"[from_pos[0]]
pos_to = "123"[to_pos[1]] + "abcd"[to_pos[0]]
return f"{pos_from}{pos_to}{ch}"
else:
to_pos = move.to_pos()
pos_to = "123"[to_pos[1]] + "abcd"[to_pos[0]]
ch = "🐘🐴🐥"[move.from_stock]
return f"->{pos_to}{ch}"
# first player
L1 = 1 # Lion
E1 = 2 # Elephant
G1 = 3 # Giraph
P1 = 4 # Chick (Piyo Piyo! or Pawn)
C1 = 5 # Chicken
# second player
L2 = 6
E2 = 7
G2 = 8
P2 = 9
C2 = 10
# move direction
UL = (-1, -1) # Y, X
UU = (-1, 0)
UR = (-1, 1)
ML = ( 0, -1)
MR = ( 0, 1)
DL = ( 1, -1)
DD = ( 1, 0)
DR = ( 1, 1)
ALLOWED_MOVES = {
L1: [UL, UU, UR, ML, MR, DL, DD, DR],
L2: [UL, UU, UR, ML, MR, DL, DD, DR],
E1: [UL, UR, DL, DR],
E2: [UL, UR, DL, DR],
G1: [UU, ML, MR, DD],
G2: [UU, ML, MR, DD],
P1: [UU],
P2: [DD],
C1: [UL, UU, UR, ML, MR, DD],
C2: [DL, DD, DR, ML, MR, UU],
}
class AnimalShogiNetwork(MuZeroResidualNetwork):
def get_action_channel_size(self):
return 6
def encode_hidden_and_action(self, encoded_state, action):
"""
:param encoded_state: [batch, ch, Height, Width]
:param action: [batch, 1]
:return:
"""
channels = self.encode_action(encoded_state.shape, action)
return torch.cat([encoded_state] + channels, dim=1)
@staticmethod
def encode_action(shape, action):
"""
:param shape: tuple(batch, ch, h, w)
:param action: [batch, 1]
>>> sh = (2, 8, 4, 3)
>>> moves = [Move(5, None, 0, 1), Move(None, 1, 11, 0)]
>>> action = torch.tensor([[m.encode_to_action_index()] for m in moves])
>>> channels = torch.cat(AnimalShogiNetwork.encode_action(sh, action), dim=1)
>>> channels.shape
torch.Size([2, 6, 4, 3])
>>> assert channels[0, 0, 1, 2] == 1. # From
>>> assert torch.sum(channels[0, 0, :, :]) == 1
>>> assert torch.sum(channels[0, 1:4, :, :]) == 0 # Stocks
>>> assert channels[0, 4, 0, 0] == 1 # To
>>> assert torch.sum(channels[0, 4, :, :]) == 1 # To
>>> assert torch.sum(channels[0, 5, :, :]) == 12 # Promotion
>>> #
>>> assert torch.sum(channels[1, 0, :, :]) == 0 # From Board
>>> assert torch.sum(channels[1, 1, :, :]) == 0 # Stock
>>> assert torch.sum(channels[1, 2, :, :]) == 12
>>> assert torch.sum(channels[1, 3, :, :]) == 0
>>> assert channels[1, 4, 3, 2] == 1 # To
>>> assert torch.sum(channels[1, 4, :, :]) == 1
>>> assert torch.sum(channels[1, 5, :, :]) == 0 # Promotion
"""
def ones(i):
sh = shape[0], i, shape[2], shape[3]
return torch.ones(sh).to(action.device).float()
def zeros(i):
sh = shape[0], i, shape[2], shape[3]
return torch.zeros(sh).to(action.device).float()
board_size = BOARD_SIZE_Y * BOARD_SIZE_X
promote = action % 2
action //= 2
to_board = (action % board_size).long().squeeze(1)
action //= board_size
minus_1 = torch.tensor(-1).to(action.device)
from_board = torch.where(action < board_size, action, minus_1).long().squeeze(1)
from_stock = torch.where(action < board_size, minus_1, action-board_size).long().squeeze(1)
channels = []
indexes = torch.arange(len(action)).long()
# From
from_ch = zeros(1)
from_ch[indexes, :, from_board // BOARD_SIZE_X, from_board % BOARD_SIZE_X] = (
torch.where(from_board >= 0., 1., 0.)[:, None].float()
)
channels.append(from_ch)
# Stock
stocks = zeros(CAPTURABLE_KIND_NUM)
stocks[indexes, from_stock, :, :] = torch.where(from_stock >= 0., 1., 0.)[:, None, None].float()
channels.append(stocks)
# To
to_ch = zeros(1)
to_ch[indexes, :, to_board // BOARD_SIZE_X, to_board % BOARD_SIZE_X] = 1.
channels.append(to_ch)
# promote
channels.append(ones(1) * promote[:, :, None, None])
return channels
if __name__ == "__main__":
game = Game()
game.reset()
while True:
game.render()
action = game.expert_agent()
_, r, done = game.step(action)
print(f"Player{game.to_play()}: {game.action_to_string(action)}")
if done:
print(f"reward: {r}, done")
break
|
the-stack_106_18847
|
from collections import OrderedDict
from datetime import date
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import JSONField
from django.core.validators import ValidationError
from django.db import models
from django.db.models import F, Q
from django.http import HttpResponse
from django.template import Template, Context
from django.urls import reverse
import graphviz
from jinja2 import Environment
from taggit.models import TagBase, GenericTaggedItemBase
from dcim.constants import CONNECTION_STATUS_CONNECTED
from utilities.fields import ColorField
from utilities.utils import deepmerge, foreground_color, model_names_to_filter_dict
from .constants import *
from .querysets import ConfigContextQuerySet
#
# Webhooks
#
def get_webhook_models():
return model_names_to_filter_dict(WEBHOOK_MODELS)
class Webhook(models.Model):
"""
A Webhook defines a request that will be sent to a remote application when an object is created, updated, and/or
delete in NetBox. The request will contain a representation of the object, which the remote application can act on.
Each Webhook can be limited to firing only on certain actions or certain object types.
"""
obj_type = models.ManyToManyField(
to=ContentType,
related_name='webhooks',
verbose_name='Object types',
limit_choices_to=get_webhook_models,
help_text="The object(s) to which this Webhook applies."
)
name = models.CharField(
max_length=150,
unique=True
)
type_create = models.BooleanField(
default=False,
help_text="Call this webhook when a matching object is created."
)
type_update = models.BooleanField(
default=False,
help_text="Call this webhook when a matching object is updated."
)
type_delete = models.BooleanField(
default=False,
help_text="Call this webhook when a matching object is deleted."
)
payload_url = models.CharField(
max_length=500,
verbose_name='URL',
help_text="A POST will be sent to this URL when the webhook is called."
)
http_content_type = models.PositiveSmallIntegerField(
choices=WEBHOOK_CT_CHOICES,
default=WEBHOOK_CT_JSON,
verbose_name='HTTP content type'
)
secret = models.CharField(
max_length=255,
blank=True,
help_text="When provided, the request will include a 'X-Hook-Signature' "
"header containing a HMAC hex digest of the payload body using "
"the secret as the key. The secret is not transmitted in "
"the request."
)
enabled = models.BooleanField(
default=True
)
ssl_verification = models.BooleanField(
default=True,
verbose_name='SSL verification',
help_text="Enable SSL certificate verification. Disable with caution!"
)
class Meta:
unique_together = ('payload_url', 'type_create', 'type_update', 'type_delete',)
def __str__(self):
return self.name
def clean(self):
"""
Validate model
"""
if not self.type_create and not self.type_delete and not self.type_update:
raise ValidationError(
"You must select at least one type: create, update, and/or delete."
)
#
# Custom fields
#
class CustomFieldModel(models.Model):
_cf = None
class Meta:
abstract = True
@property
def cf(self):
"""
Name-based CustomFieldValue accessor for use in templates
"""
if self._cf is None:
# Cache all custom field values for this instance
self._cf = {
field.name: value for field, value in self.get_custom_fields().items()
}
return self._cf
def get_custom_fields(self):
"""
Return a dictionary of custom fields for a single object in the form {<field>: value}.
"""
# Find all custom fields applicable to this type of object
content_type = ContentType.objects.get_for_model(self)
fields = CustomField.objects.filter(obj_type=content_type)
# If the object exists, populate its custom fields with values
if hasattr(self, 'pk'):
values = self.custom_field_values.all()
values_dict = {cfv.field_id: cfv.value for cfv in values}
return OrderedDict([(field, values_dict.get(field.pk)) for field in fields])
else:
return OrderedDict([(field, None) for field in fields])
def get_custom_field_models():
return model_names_to_filter_dict(CUSTOMFIELD_MODELS)
class CustomField(models.Model):
obj_type = models.ManyToManyField(
to=ContentType,
related_name='custom_fields',
verbose_name='Object(s)',
limit_choices_to=get_custom_field_models,
help_text='The object(s) to which this field applies.'
)
type = models.PositiveSmallIntegerField(
choices=CUSTOMFIELD_TYPE_CHOICES,
default=CF_TYPE_TEXT
)
name = models.CharField(
max_length=50,
unique=True
)
label = models.CharField(
max_length=50,
blank=True,
help_text='Name of the field as displayed to users (if not provided, '
'the field\'s name will be used)'
)
description = models.CharField(
max_length=100,
blank=True
)
required = models.BooleanField(
default=False,
help_text='If true, this field is required when creating new objects '
'or editing an existing object.'
)
filter_logic = models.PositiveSmallIntegerField(
choices=CF_FILTER_CHOICES,
default=CF_FILTER_LOOSE,
help_text='Loose matches any instance of a given string; exact '
'matches the entire field.'
)
default = models.CharField(
max_length=100,
blank=True,
help_text='Default value for the field. Use "true" or "false" for booleans.'
)
weight = models.PositiveSmallIntegerField(
default=100,
help_text='Fields with higher weights appear lower in a form.'
)
class Meta:
ordering = ['weight', 'name']
def __str__(self):
return self.label or self.name.replace('_', ' ').capitalize()
def serialize_value(self, value):
"""
Serialize the given value to a string suitable for storage as a CustomFieldValue
"""
if value is None:
return ''
if self.type == CF_TYPE_BOOLEAN:
return str(int(bool(value)))
if self.type == CF_TYPE_DATE:
# Could be date/datetime object or string
try:
return value.strftime('%Y-%m-%d')
except AttributeError:
return value
if self.type == CF_TYPE_SELECT:
# Could be ModelChoiceField or TypedChoiceField
return str(value.id) if hasattr(value, 'id') else str(value)
return value
def deserialize_value(self, serialized_value):
"""
Convert a string into the object it represents depending on the type of field
"""
if serialized_value == '':
return None
if self.type == CF_TYPE_INTEGER:
return int(serialized_value)
if self.type == CF_TYPE_BOOLEAN:
return bool(int(serialized_value))
if self.type == CF_TYPE_DATE:
# Read date as YYYY-MM-DD
return date(*[int(n) for n in serialized_value.split('-')])
if self.type == CF_TYPE_SELECT:
return self.choices.get(pk=int(serialized_value))
return serialized_value
class CustomFieldValue(models.Model):
field = models.ForeignKey(
to='extras.CustomField',
on_delete=models.CASCADE,
related_name='values'
)
obj_type = models.ForeignKey(
to=ContentType,
on_delete=models.PROTECT,
related_name='+'
)
obj_id = models.PositiveIntegerField()
obj = GenericForeignKey(
ct_field='obj_type',
fk_field='obj_id'
)
serialized_value = models.CharField(
max_length=255
)
class Meta:
ordering = ['obj_type', 'obj_id']
unique_together = ['field', 'obj_type', 'obj_id']
def __str__(self):
return '{} {}'.format(self.obj, self.field)
@property
def value(self):
return self.field.deserialize_value(self.serialized_value)
@value.setter
def value(self, value):
self.serialized_value = self.field.serialize_value(value)
def save(self, *args, **kwargs):
# Delete this object if it no longer has a value to store
if self.pk and self.value is None:
self.delete()
else:
super().save(*args, **kwargs)
class CustomFieldChoice(models.Model):
field = models.ForeignKey(
to='extras.CustomField',
on_delete=models.CASCADE,
related_name='choices',
limit_choices_to={'type': CF_TYPE_SELECT}
)
value = models.CharField(
max_length=100
)
weight = models.PositiveSmallIntegerField(
default=100,
help_text='Higher weights appear lower in the list'
)
class Meta:
ordering = ['field', 'weight', 'value']
unique_together = ['field', 'value']
def __str__(self):
return self.value
def clean(self):
if self.field.type != CF_TYPE_SELECT:
raise ValidationError("Custom field choices can only be assigned to selection fields.")
def delete(self, using=None, keep_parents=False):
# When deleting a CustomFieldChoice, delete all CustomFieldValues which point to it
pk = self.pk
super().delete(using, keep_parents)
CustomFieldValue.objects.filter(field__type=CF_TYPE_SELECT, serialized_value=str(pk)).delete()
#
# Custom links
#
def get_custom_link_models():
return model_names_to_filter_dict(CUSTOMLINK_MODELS)
class CustomLink(models.Model):
"""
A custom link to an external representation of a NetBox object. The link text and URL fields accept Jinja2 template
code to be rendered with an object as context.
"""
content_type = models.ForeignKey(
to=ContentType,
on_delete=models.CASCADE,
limit_choices_to=get_custom_link_models
)
name = models.CharField(
max_length=100,
unique=True
)
text = models.CharField(
max_length=500,
help_text="Jinja2 template code for link text"
)
url = models.CharField(
max_length=500,
verbose_name='URL',
help_text="Jinja2 template code for link URL"
)
weight = models.PositiveSmallIntegerField(
default=100
)
group_name = models.CharField(
max_length=50,
blank=True,
help_text="Links with the same group will appear as a dropdown menu"
)
button_class = models.CharField(
max_length=30,
choices=BUTTON_CLASS_CHOICES,
default=BUTTON_CLASS_DEFAULT,
help_text="The class of the first link in a group will be used for the dropdown button"
)
new_window = models.BooleanField(
help_text="Force link to open in a new window"
)
class Meta:
ordering = ['group_name', 'weight', 'name']
def __str__(self):
return self.name
#
# Graphs
#
class Graph(models.Model):
type = models.PositiveSmallIntegerField(
choices=GRAPH_TYPE_CHOICES
)
weight = models.PositiveSmallIntegerField(
default=1000
)
name = models.CharField(
max_length=100,
verbose_name='Name'
)
source = models.CharField(
max_length=500,
verbose_name='Source URL'
)
link = models.URLField(
blank=True,
verbose_name='Link URL'
)
class Meta:
ordering = ['type', 'weight', 'name']
def __str__(self):
return self.name
def embed_url(self, obj):
template = Template(self.source)
return template.render(Context({'obj': obj}))
def embed_link(self, obj):
if self.link is None:
return ''
template = Template(self.link)
return template.render(Context({'obj': obj}))
#
# Export templates
#
def get_export_template_models():
return model_names_to_filter_dict(EXPORTTEMPLATE_MODELS)
class ExportTemplate(models.Model):
content_type = models.ForeignKey(
to=ContentType,
on_delete=models.CASCADE,
limit_choices_to=get_export_template_models
)
name = models.CharField(
max_length=100
)
description = models.CharField(
max_length=200,
blank=True
)
template_language = models.PositiveSmallIntegerField(
choices=TEMPLATE_LANGUAGE_CHOICES,
default=TEMPLATE_LANGUAGE_JINJA2
)
template_code = models.TextField(
help_text='The list of objects being exported is passed as a context variable named <code>queryset</code>.'
)
mime_type = models.CharField(
max_length=50,
blank=True,
verbose_name='MIME type',
help_text='Defaults to <code>text/plain</code>'
)
file_extension = models.CharField(
max_length=15,
blank=True,
help_text='Extension to append to the rendered filename'
)
class Meta:
ordering = ['content_type', 'name']
unique_together = [
['content_type', 'name']
]
def __str__(self):
return '{}: {}'.format(self.content_type, self.name)
def render(self, queryset):
"""
Render the contents of the template.
"""
context = {
'queryset': queryset
}
if self.template_language == TEMPLATE_LANGUAGE_DJANGO:
template = Template(self.template_code)
output = template.render(Context(context))
elif self.template_language == TEMPLATE_LANGUAGE_JINJA2:
template = Environment().from_string(source=self.template_code)
output = template.render(**context)
else:
return None
# Replace CRLF-style line terminators
output = output.replace('\r\n', '\n')
return output
def render_to_response(self, queryset):
"""
Render the template to an HTTP response, delivered as a named file attachment
"""
output = self.render(queryset)
mime_type = 'text/plain' if not self.mime_type else self.mime_type
# Build the response
response = HttpResponse(output, content_type=mime_type)
filename = 'netbox_{}{}'.format(
queryset.model._meta.verbose_name_plural,
'.{}'.format(self.file_extension) if self.file_extension else ''
)
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
return response
#
# Topology maps
#
class TopologyMap(models.Model):
name = models.CharField(
max_length=50,
unique=True
)
slug = models.SlugField(
unique=True
)
type = models.PositiveSmallIntegerField(
choices=TOPOLOGYMAP_TYPE_CHOICES,
default=TOPOLOGYMAP_TYPE_NETWORK
)
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.CASCADE,
related_name='topology_maps',
blank=True,
null=True
)
device_patterns = models.TextField(
help_text='Identify devices to include in the diagram using regular '
'expressions, one per line. Each line will result in a new '
'tier of the drawing. Separate multiple regexes within a '
'line using semicolons. Devices will be rendered in the '
'order they are defined.'
)
description = models.CharField(
max_length=100,
blank=True
)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@property
def device_sets(self):
if not self.device_patterns:
return None
return [line.strip() for line in self.device_patterns.split('\n')]
def render(self, img_format='png'):
from dcim.models import Device
# Construct the graph
if self.type == TOPOLOGYMAP_TYPE_NETWORK:
G = graphviz.Graph
else:
G = graphviz.Digraph
self.graph = G()
self.graph.graph_attr['ranksep'] = '1'
seen = set()
for i, device_set in enumerate(self.device_sets):
subgraph = G(name='sg{}'.format(i))
subgraph.graph_attr['rank'] = 'same'
subgraph.graph_attr['directed'] = 'true'
# Add a pseudonode for each device_set to enforce hierarchical layout
subgraph.node('set{}'.format(i), label='', shape='none', width='0')
if i:
self.graph.edge('set{}'.format(i - 1), 'set{}'.format(i), style='invis')
# Add each device to the graph
devices = []
for query in device_set.strip(';').split(';'): # Split regexes on semicolons
devices += Device.objects.filter(name__regex=query).prefetch_related('device_role')
# Remove duplicate devices
devices = [d for d in devices if d.id not in seen]
seen.update([d.id for d in devices])
for d in devices:
bg_color = '#{}'.format(d.device_role.color)
fg_color = '#{}'.format(foreground_color(d.device_role.color))
subgraph.node(d.name, style='filled', fillcolor=bg_color, fontcolor=fg_color, fontname='sans')
# Add an invisible connection to each successive device in a set to enforce horizontal order
for j in range(0, len(devices) - 1):
subgraph.edge(devices[j].name, devices[j + 1].name, style='invis')
self.graph.subgraph(subgraph)
# Compile list of all devices
device_superset = Q()
for device_set in self.device_sets:
for query in device_set.split(';'): # Split regexes on semicolons
device_superset = device_superset | Q(name__regex=query)
devices = Device.objects.filter(*(device_superset,))
# Draw edges depending on graph type
if self.type == TOPOLOGYMAP_TYPE_NETWORK:
self.add_network_connections(devices)
elif self.type == TOPOLOGYMAP_TYPE_CONSOLE:
self.add_console_connections(devices)
elif self.type == TOPOLOGYMAP_TYPE_POWER:
self.add_power_connections(devices)
return self.graph.pipe(format=img_format)
def add_network_connections(self, devices):
from circuits.models import CircuitTermination
from dcim.models import Interface
# Add all interface connections to the graph
connected_interfaces = Interface.objects.prefetch_related(
'_connected_interface__device'
).filter(
Q(device__in=devices) | Q(_connected_interface__device__in=devices),
_connected_interface__isnull=False,
pk__lt=F('_connected_interface')
)
for interface in connected_interfaces:
style = 'solid' if interface.connection_status == CONNECTION_STATUS_CONNECTED else 'dashed'
self.graph.edge(interface.device.name, interface.connected_endpoint.device.name, style=style)
# Add all circuits to the graph
for termination in CircuitTermination.objects.filter(term_side='A', connected_endpoint__device__in=devices):
peer_termination = termination.get_peer_termination()
if (peer_termination is not None and peer_termination.interface is not None and
peer_termination.interface.device in devices):
self.graph.edge(termination.interface.device.name, peer_termination.interface.device.name, color='blue')
def add_console_connections(self, devices):
from dcim.models import ConsolePort
# Add all console connections to the graph
for cp in ConsolePort.objects.filter(device__in=devices, connected_endpoint__device__in=devices):
style = 'solid' if cp.connection_status == CONNECTION_STATUS_CONNECTED else 'dashed'
self.graph.edge(cp.connected_endpoint.device.name, cp.device.name, style=style)
def add_power_connections(self, devices):
from dcim.models import PowerPort
# Add all power connections to the graph
for pp in PowerPort.objects.filter(device__in=devices, _connected_poweroutlet__device__in=devices):
style = 'solid' if pp.connection_status == CONNECTION_STATUS_CONNECTED else 'dashed'
self.graph.edge(pp.connected_endpoint.device.name, pp.device.name, style=style)
#
# Image attachments
#
def image_upload(instance, filename):
path = 'image-attachments/'
# Rename the file to the provided name, if any. Attempt to preserve the file extension.
extension = filename.rsplit('.')[-1].lower()
if instance.name and extension in ['bmp', 'gif', 'jpeg', 'jpg', 'png']:
filename = '.'.join([instance.name, extension])
elif instance.name:
filename = instance.name
return '{}{}_{}_{}'.format(path, instance.content_type.name, instance.object_id, filename)
class ImageAttachment(models.Model):
"""
An uploaded image which is associated with an object.
"""
content_type = models.ForeignKey(
to=ContentType,
on_delete=models.CASCADE
)
object_id = models.PositiveIntegerField()
parent = GenericForeignKey(
ct_field='content_type',
fk_field='object_id'
)
image = models.ImageField(
upload_to=image_upload,
height_field='image_height',
width_field='image_width'
)
image_height = models.PositiveSmallIntegerField()
image_width = models.PositiveSmallIntegerField()
name = models.CharField(
max_length=50,
blank=True
)
created = models.DateTimeField(
auto_now_add=True
)
class Meta:
ordering = ['name']
def __str__(self):
if self.name:
return self.name
filename = self.image.name.rsplit('/', 1)[-1]
return filename.split('_', 2)[2]
def delete(self, *args, **kwargs):
_name = self.image.name
super().delete(*args, **kwargs)
# Delete file from disk
self.image.delete(save=False)
# Deleting the file erases its name. We restore the image's filename here in case we still need to reference it
# before the request finishes. (For example, to display a message indicating the ImageAttachment was deleted.)
self.image.name = _name
@property
def size(self):
"""
Wrapper around `image.size` to suppress an OSError in case the file is inaccessible.
"""
try:
return self.image.size
except OSError:
return None
#
# Config contexts
#
class ConfigContext(models.Model):
"""
A ConfigContext represents a set of arbitrary data available to any Device or VirtualMachine matching its assigned
qualifiers (region, site, etc.). For example, the data stored in a ConfigContext assigned to site A and tenant B
will be available to a Device in site A assigned to tenant B. Data is stored in JSON format.
"""
name = models.CharField(
max_length=100,
unique=True
)
weight = models.PositiveSmallIntegerField(
default=1000
)
description = models.CharField(
max_length=100,
blank=True
)
is_active = models.BooleanField(
default=True,
)
regions = models.ManyToManyField(
to='dcim.Region',
related_name='+',
blank=True
)
sites = models.ManyToManyField(
to='dcim.Site',
related_name='+',
blank=True
)
roles = models.ManyToManyField(
to='dcim.DeviceRole',
related_name='+',
blank=True
)
platforms = models.ManyToManyField(
to='dcim.Platform',
related_name='+',
blank=True
)
tenant_groups = models.ManyToManyField(
to='tenancy.TenantGroup',
related_name='+',
blank=True
)
tenants = models.ManyToManyField(
to='tenancy.Tenant',
related_name='+',
blank=True
)
data = JSONField()
objects = ConfigContextQuerySet.as_manager()
class Meta:
ordering = ['weight', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('extras:configcontext', kwargs={'pk': self.pk})
def clean(self):
# Verify that JSON data is provided as an object
if type(self.data) is not dict:
raise ValidationError(
{'data': 'JSON data must be in object form. Example: {"foo": 123}'}
)
class ConfigContextModel(models.Model):
local_context_data = JSONField(
blank=True,
null=True,
)
class Meta:
abstract = True
def get_config_context(self):
"""
Return the rendered configuration context for a device or VM.
"""
# Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs
data = OrderedDict()
for context in ConfigContext.objects.get_for_object(self):
data = deepmerge(data, context.data)
# If the object has local config context data defined, merge it last
if self.local_context_data:
data = deepmerge(data, self.local_context_data)
return data
#
# Custom scripts
#
class Script(models.Model):
"""
Dummy model used to generate permissions for custom scripts. Does not exist in the database.
"""
class Meta:
managed = False
permissions = (
('run_script', 'Can run script'),
)
#
# Report results
#
class ReportResult(models.Model):
"""
This model stores the results from running a user-defined report.
"""
report = models.CharField(
max_length=255,
unique=True
)
created = models.DateTimeField(
auto_now_add=True
)
user = models.ForeignKey(
to=User,
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True
)
failed = models.BooleanField()
data = JSONField()
class Meta:
ordering = ['report']
#
# Change logging
#
class ObjectChange(models.Model):
"""
Record a change to an object and the user account associated with that change. A change record may optionally
indicate an object related to the one being changed. For example, a change to an interface may also indicate the
parent device. This will ensure changes made to component models appear in the parent model's changelog.
"""
time = models.DateTimeField(
auto_now_add=True,
editable=False,
db_index=True
)
user = models.ForeignKey(
to=User,
on_delete=models.SET_NULL,
related_name='changes',
blank=True,
null=True
)
user_name = models.CharField(
max_length=150,
editable=False
)
request_id = models.UUIDField(
editable=False
)
action = models.PositiveSmallIntegerField(
choices=OBJECTCHANGE_ACTION_CHOICES
)
changed_object_type = models.ForeignKey(
to=ContentType,
on_delete=models.PROTECT,
related_name='+'
)
changed_object_id = models.PositiveIntegerField()
changed_object = GenericForeignKey(
ct_field='changed_object_type',
fk_field='changed_object_id'
)
related_object_type = models.ForeignKey(
to=ContentType,
on_delete=models.PROTECT,
related_name='+',
blank=True,
null=True
)
related_object_id = models.PositiveIntegerField(
blank=True,
null=True
)
related_object = GenericForeignKey(
ct_field='related_object_type',
fk_field='related_object_id'
)
object_repr = models.CharField(
max_length=200,
editable=False
)
object_data = JSONField(
editable=False
)
csv_headers = [
'time', 'user', 'user_name', 'request_id', 'action', 'changed_object_type', 'changed_object_id',
'related_object_type', 'related_object_id', 'object_repr', 'object_data',
]
class Meta:
ordering = ['-time']
def __str__(self):
return '{} {} {} by {}'.format(
self.changed_object_type,
self.object_repr,
self.get_action_display().lower(),
self.user_name
)
def save(self, *args, **kwargs):
# Record the user's name and the object's representation as static strings
if not self.user_name:
self.user_name = self.user.username
if not self.object_repr:
self.object_repr = str(self.changed_object)
return super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('extras:objectchange', args=[self.pk])
def to_csv(self):
return (
self.time,
self.user,
self.user_name,
self.request_id,
self.get_action_display(),
self.changed_object_type,
self.changed_object_id,
self.related_object_type,
self.related_object_id,
self.object_repr,
self.object_data,
)
#
# Tags
#
# TODO: figure out a way around this circular import for ObjectChange
from utilities.models import ChangeLoggedModel # noqa: E402
class Tag(TagBase, ChangeLoggedModel):
color = ColorField(
default='9e9e9e'
)
comments = models.TextField(
blank=True,
default=''
)
def get_absolute_url(self):
return reverse('extras:tag', args=[self.slug])
class TaggedItem(GenericTaggedItemBase):
tag = models.ForeignKey(
to=Tag,
related_name="%(app_label)s_%(class)s_items",
on_delete=models.CASCADE
)
class Meta:
index_together = (
("content_type", "object_id")
)
|
the-stack_106_18848
|
import copy as cp
import numpy as np
from skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin
from skmultiflow.bayes import NaiveBayes
import warnings
def DynamicWeightedMajority(n_estimators=5, base_estimator=NaiveBayes(), period=50, beta=0.5,
theta=0.01): # pragma: no cover
warnings.warn("'DynamicWeightedMajority' has been renamed to 'DynamicWeightedMajorityClassifier' in v0.5.0.\n"
"The old name will be removed in v0.7.0", category=FutureWarning)
return DynamicWeightedMajorityClassifier(n_estimators=n_estimators,
base_estimator=base_estimator,
period=period,
beta=beta,
theta=theta)
class DynamicWeightedMajorityClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
""" Dynamic Weighted Majority ensemble classifier.
Parameters
----------
n_estimators: int (default=5)
Maximum number of estimators to hold.
base_estimator: StreamModel or sklearn.BaseEstimator (default=NaiveBayes)
Each member of the ensemble is an instance of the base estimator.
period: int (default=50)
Period between expert removal, creation, and weight update.
beta: float (default=0.5)
Factor for which to decrease weights by.
theta: float (default=0.01)
Minimum fraction of weight per model.
Notes
-----
The dynamic weighted majority (DWM) [1]_, uses four mechanisms to
cope with concept drift: It trains online learners of the ensemble,
it weights those learners based on their performance, it removes them,
also based on their performance, and it adds new experts based on the
global performance of the ensemble.
References
----------
.. [1] Kolter and Maloof. Dynamic weighted majority: An ensemble method
for drifting concepts. The Journal of Machine Learning Research,
8:2755-2790, December 2007. ISSN 1532-4435.
Examples
--------
>>> # Imports
>>> from skmultiflow.data import SEAGenerator
>>>from skmultiflow.meta import DynamicWeightedMajorityClassifier
>>>
>>> # Setup a data stream
>>> stream = SEAGenerator(random_state=1)
>>>
>>> # Setup Dynamic Weighted Majority Ensemble Classifier
>>> dwm = DynamicWeightedMajorityClassifier()
>>>
>>> # Setup variables to control loop and track performance
>>> n_samples = 0
>>> correct_cnt = 0
>>> max_samples = 200
>>>
>>> # Train the classifier with the samples provided by the data stream
>>> while n_samples < max_samples and stream.has_more_samples():
>>> X, y = stream.next_sample()
>>> y_pred = dwm.predict(X)
>>> if y[0] == y_pred[0]:
>>> correct_cnt += 1
>>> dwm.partial_fit(X, y)
>>> n_samples += 1
>>>
>>> # Display results
>>> print('{} samples analyzed.'.format(n_samples))
>>> print('Dynamic Weighted Majority accuracy: {}'.format(correct_cnt / n_samples))
"""
class WeightedExpert:
"""
Wrapper that includes an estimator and its weight.
Parameters
----------
estimator: StreamModel or sklearn.BaseEstimator
The estimator to wrap.
weight: float
The estimator's weight.
"""
def __init__(self, estimator, weight):
self.estimator = estimator
self.weight = weight
def __init__(self, n_estimators=5, base_estimator=NaiveBayes(),
period=50, beta=0.5, theta=0.01):
"""
Creates a new instance of DynamicWeightedMajorityClassifier.
"""
super().__init__()
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.beta = beta
self.theta = theta
self.period = period
# Following attributes are set later
self.epochs = None
self.num_classes = None
self.experts = None
self.reset()
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially fits the model on the supplied X and y matrices.
Since it's an ensemble learner, if X and y matrix of more than one
sample are passed, the algorithm will partial fit the model one sample
at a time.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples)
An array-like with the class labels of all samples in X.
classes: numpy.ndarray, optional (default=None)
Array with all possible/known class labels. This is an optional parameter, except
for the first partial_fit call where it is compulsory.
sample_weight: numpy.ndarray of shape (n_samples), optional (default=None)
Samples weight. If not provided, uniform weights are assumed. Usage varies depending on the base estimator.
Returns
-------
DynamicWeightedMajorityClassifier
self
"""
for i in range(len(X)):
self.fit_single_sample(
X[i:i+1, :], y[i:i+1], classes, sample_weight
)
return self
def predict(self, X):
""" predict
The predict function will take an average of the predictions of its
learners, weighted by their respective weights, and return the most
likely class.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Returns
-------
numpy.ndarray
A numpy.ndarray with the label prediction for all the samples in X.
"""
preds = np.array([np.array(exp.estimator.predict(X)) * exp.weight
for exp in self.experts])
sum_weights = sum(exp.weight for exp in self.experts)
aggregate = np.sum(preds / sum_weights, axis=0)
return (aggregate + 0.5).astype(int) # Round to nearest int
def predict_proba(self, X):
raise NotImplementedError
def fit_single_sample(self, X, y, classes=None, sample_weight=None):
"""
Fits a single sample of shape `X.shape=(1, n_attributes)` and `y.shape=(1)`
Aggregates all experts' predictions, diminishes weight of experts whose
predictions were wrong, and may create or remove experts every _period_
samples.
Finally, trains each individual expert on the provided data.
Train loop as described by Kolter and Maloof in the original paper.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
Features matrix used for partially updating the model.
y: Array-like
An array-like of all the class labels for the samples in X.
classes: list
List of all existing classes. This is an optional parameter.
sample_weight: numpy.ndarray of shape (n_samples), optional (default=None)
Samples weight. If not provided, uniform weights are assumed. Applicability
depends on the base estimator.
"""
self.epochs += 1
self.num_classes = max(
len(classes) if classes is not None else 0,
(int(np.max(y)) + 1), self.num_classes)
predictions = np.zeros((self.num_classes,))
max_weight = 0
weakest_expert_weight = 1
weakest_expert_index = None
for i, exp in enumerate(self.experts):
y_hat = exp.estimator.predict(X)
if np.any(y_hat != y) and (self.epochs % self.period == 0):
exp.weight *= self.beta
predictions[y_hat] += exp.weight
max_weight = max(max_weight, exp.weight)
if exp.weight < weakest_expert_weight:
weakest_expert_index = i
weakest_expert_weight = exp.weight
y_hat = np.array([np.argmax(predictions)])
if self.epochs % self.period == 0:
self._scale_weights(max_weight)
self._remove_experts()
if np.any(y_hat != y):
if len(self.experts) == self.n_estimators:
self.experts.pop(weakest_expert_index)
self.experts.append(self._construct_new_expert())
# Train individual experts
for exp in self.experts:
exp.estimator.partial_fit(X, y, classes, sample_weight)
def get_expert_predictions(self, X):
"""
Returns predictions of each class for each expert.
In shape: (n_experts, n_samples)
"""
return [exp.estimator.predict(X) for exp in self.experts]
def reset(self):
"""
Reset this ensemble learner.
"""
self.epochs = 0
self.num_classes = 2 # Minimum of 2 classes
self.experts = [
self._construct_new_expert()
]
def _scale_weights(self, max_weight):
"""
Scales the experts' weights such that the max is 1.
"""
scale_factor = 1 / max_weight
for exp in self.experts:
exp.weight *= scale_factor
def _remove_experts(self):
"""
Removes all experts whose weight is lower than self.theta.
"""
self.experts = [ex for ex in self.experts if ex.weight >= self.theta]
def _construct_new_expert(self):
"""
Constructs a new WeightedExpert from the provided base_estimator.
"""
return self.WeightedExpert(cp.deepcopy(self.base_estimator), 1)
|
the-stack_106_18851
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from data import v2 as cfg
from box_utils import *
GPU = False
if torch.cuda.is_available():
GPU = True
torch.set_default_tensor_type('torch.cuda.FloatTensor')
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self,num_classes,overlap_thresh,prior_for_matching,bkg_label,neg_mining,neg_pos,neg_overlap,encode_target):
super(MultiBoxLoss, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data, priors = predictions
num = loc_data.size(0)
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
for idx in range(num):
truths = targets[idx][:,:-1].data
labels = targets[idx][:,-1].data
defaults = priors.data
match(self.threshold,truths,defaults,self.variance,labels,loc_t,conf_t,idx)
if GPU:
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
# wrap targets
loc_t = Variable(loc_t, requires_grad=False)
conf_t = Variable(conf_t,requires_grad=False)
pos = conf_t > 0
num_pos = pos.sum()
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1,4)
loc_t = loc_t[pos_idx].view(-1,4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1,self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1,1))
# Hard Negative Mining
loss_c[pos] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_,loss_idx = loss_c.sort(1, descending=True)
_,idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes)
targets_weighted = conf_t[(pos+neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = num_pos.data.sum()
loss_l/=N
loss_c/=N
return loss_l,loss_c
|
the-stack_106_18853
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Shawn Seymour. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import requests
from kafka_connect_healthcheck import helpers
class Health:
def __init__(self, connect_url, worker_id, unhealthy_states, auth):
self.connect_url = connect_url
self.worker_id = worker_id
self.unhealthy_states = [x.upper().strip() for x in unhealthy_states]
self.log_initialization_values()
self.kwargs = {}
if auth:
self.kwargs['auth'] = tuple(auth.split(':'))
def get_health_result(self):
try:
health_result = {"failures": [], "failure_states": self.unhealthy_states}
connector_names = self.get_connector_names()
connector_statuses = self.get_connectors_health(connector_names)
self.handle_healthcheck(connector_statuses, health_result)
health_result["healthy"] = len(health_result["failures"]) == 0
except Exception as ex:
logging.error("Error while attempting to calculate health result. Assuming unhealthy. Error: {}".format(ex))
logging.error(ex)
health_result = {
"healthy": False,
"message": "Exception raised while attempting to calculate health result, assuming unhealthy.",
"error": "{}".format(ex),
"failure_states": self.unhealthy_states
}
helpers.log_line_break()
return health_result
def handle_healthcheck(self, connector_statuses, health_result):
connectors_on_this_worker = False
for connector in connector_statuses:
if self.is_on_this_worker(connector["worker_id"]):
connectors_on_this_worker = True
if self.is_in_unhealthy_state(connector["state"]):
logging.warning("Connector '{}' is unhealthy in failure state: {}".format(connector["name"], connector["state"]))
health_result["failures"].append({
"type": "connector",
"connector": connector["name"],
"state": connector["state"],
"worker_id": connector["worker_id"]
})
else:
logging.info("Connector '{}' is healthy in state: {}".format(connector["name"], connector["state"]))
self.handle_task_healthcheck(connector, health_result)
if not connectors_on_this_worker and connector_statuses:
self.handle_broker_healthcheck(health_result, connector_statuses[0]["name"])
def handle_broker_healthcheck(self, health_result, connector_name):
try:
self.get_connector_details(connector_name)
except Exception as ex:
logging.error("Error while attempting to get details for {}. Assuming unhealthy. Error: {}".format(connector_name, ex))
logging.error(ex)
health_result["failures"].append({
"type": "broker",
"connector": connector_name,
})
def handle_task_healthcheck(self, connector, health_result):
for task in connector["tasks"]:
if self.is_on_this_worker(task["worker_id"]):
if self.is_in_unhealthy_state(task["state"]):
logging.warning("Connector '{}' task '{}' is unhealthy in failure state: {}".format(
connector["name"], task["id"], task["state"]
))
health_result["failures"].append({
"type": "task",
"connector": connector["name"],
"id": task["id"],
"state": task["state"],
"worker_id": task["worker_id"],
"trace": task.get("trace", None)
})
else:
logging.info("Connector '{}' task '{}' is healthy in state: {}".format(
connector["name"], task["id"], task["state"]
))
def get_connectors_health(self, connector_names):
statuses = []
for connector_name in connector_names:
statuses.append(self.get_connector_health(connector_name))
return statuses
def get_connector_health(self, connector_name):
connector_status = self.get_connector_status(connector_name)
connector_state = connector_status["connector"]["state"].upper()
connector_worker = connector_status["connector"]["worker_id"]
return {
"name": connector_name,
"state": connector_state,
"worker_id": connector_worker,
"tasks": connector_status["tasks"]
}
def get_connector_names(self):
response = requests.get("{}/connectors".format(self.connect_url), **self.kwargs)
response_json = response.json()
return response_json
def get_connector_status(self, connector_name):
response = requests.get("{}/connectors/{}/status".format(self.connect_url, connector_name), **self.kwargs)
response_json = response.json()
return response_json
def get_connector_details(self, connector_name):
response = requests.get("{}/connectors/{}".format(self.connect_url, connector_name), **self.kwargs)
response.raise_for_status()
response_json = response.json()
return response_json
def is_in_unhealthy_state(self, state):
return state.upper() in self.unhealthy_states
def is_on_this_worker(self, response_worker_id):
return response_worker_id.lower() == self.worker_id.lower() if self.worker_id is not None else True
def log_initialization_values(self):
logging.info("Server will report unhealthy for states: '{}'".format(", ".join(self.unhealthy_states)))
logging.info("Server will healthcheck against Kafka Connect at: {}".format(self.connect_url))
if self.worker_id is not None:
logging.info("Server will healthcheck connectors and tasks for worker with id '{}'".format(self.worker_id))
else:
logging.warning("No worker id supplied, server will healthcheck all connectors and tasks")
|
the-stack_106_18854
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for interacting with Identity Servers"""
import logging
from canonicaljson import json
from twisted.internet import defer
from synapse.api.errors import (
CodeMessageException,
Codes,
HttpResponseException,
SynapseError,
)
from ._base import BaseHandler
logger = logging.getLogger(__name__)
class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.federation_http_client = hs.get_http_client()
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
def _should_trust_id_server(self, id_server):
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
return False
return True
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
if not self._should_trust_id_server(id_server):
logger.warn(
'%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server
)
defer.returnValue(None)
try:
data = yield self.http_client.get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid"
),
{'sid': creds['sid'], 'client_secret': client_secret}
)
except HttpResponseException as e:
logger.info("getValidated3pid failed with Matrix error: %r", e)
raise e.to_synapse_error()
if 'medium' in data:
defer.returnValue(data)
defer.returnValue(None)
@defer.inlineCallbacks
def bind_threepid(self, creds, mxid):
logger.debug("binding threepid %r to %s", creds, mxid)
data = None
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
try:
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
'client_secret': client_secret,
'mxid': mxid,
}
)
logger.debug("bound threepid %r to %s", creds, mxid)
# Remember where we bound the threepid
yield self.store.add_user_bound_threepid(
user_id=mxid,
medium=data["medium"],
address=data["address"],
id_server=id_server,
)
except CodeMessageException as e:
data = json.loads(e.msg) # XXX WAT?
defer.returnValue(data)
@defer.inlineCallbacks
def try_unbind_threepid(self, mxid, threepid):
"""Removes a binding from an identity server
Args:
mxid (str): Matrix user ID of binding to be removed
threepid (dict): Dict with medium & address of binding to be
removed, and an optional id_server.
Raises:
SynapseError: If we failed to contact the identity server
Returns:
Deferred[bool]: True on success, otherwise False if the identity
server doesn't support unbinding (or no identity server found to
contact).
"""
if threepid.get("id_server"):
id_servers = [threepid["id_server"]]
else:
id_servers = yield self.store.get_id_servers_user_bound(
user_id=mxid,
medium=threepid["medium"],
address=threepid["address"],
)
# We don't know where to unbind, so we don't have a choice but to return
if not id_servers:
defer.returnValue(False)
changed = True
for id_server in id_servers:
changed &= yield self.try_unbind_threepid_with_id_server(
mxid, threepid, id_server,
)
defer.returnValue(changed)
@defer.inlineCallbacks
def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server):
"""Removes a binding from an identity server
Args:
mxid (str): Matrix user ID of binding to be removed
threepid (dict): Dict with medium & address of binding to be removed
id_server (str): Identity server to unbind from
Raises:
SynapseError: If we failed to contact the identity server
Returns:
Deferred[bool]: True on success, otherwise False if the identity
server doesn't support unbinding
"""
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
content = {
"mxid": mxid,
"threepid": {
"medium": threepid["medium"],
"address": threepid["address"],
},
}
# we abuse the federation http client to sign the request, but we have to send it
# using the normal http client since we don't want the SRV lookup and want normal
# 'browser-like' HTTPS.
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method='POST',
url_bytes='/_matrix/identity/api/v1/3pid/unbind'.encode('ascii'),
content=content,
destination_is=id_server,
)
headers = {
b"Authorization": auth_headers,
}
try:
yield self.http_client.post_json_get_json(
url,
content,
headers,
)
changed = True
except HttpResponseException as e:
changed = False
if e.code in (400, 404, 501,):
# The remote server probably doesn't support unbinding (yet)
logger.warn("Received %d response while unbinding threepid", e.code)
else:
logger.error("Failed to unbind threepid on identity server: %s", e)
raise SynapseError(502, "Failed to contact identity server")
yield self.store.remove_user_bound_threepid(
user_id=mxid,
medium=threepid["medium"],
address=threepid["address"],
id_server=id_server,
)
defer.returnValue(changed)
@defer.inlineCallbacks
def requestEmailToken(
self,
id_server,
email,
client_secret,
send_attempt,
next_link=None,
):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
params = {
'email': email,
'client_secret': client_secret,
'send_attempt': send_attempt,
}
if next_link:
params.update({'next_link': next_link})
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/email/requestToken"
),
params
)
defer.returnValue(data)
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
@defer.inlineCallbacks
def requestMsisdnToken(
self, id_server, country, phone_number,
client_secret, send_attempt, **kwargs
):
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
params = {
'country': country,
'phone_number': phone_number,
'client_secret': client_secret,
'send_attempt': send_attempt,
}
params.update(kwargs)
try:
data = yield self.http_client.post_json_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/msisdn/requestToken"
),
params
)
defer.returnValue(data)
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
|
the-stack_106_18855
|
from presidio_analyzer import Pattern, PatternRecognizer
class UKNINORecognizer(PatternRecognizer):
"""
Recognizes National insurance number using regex
"""
# pylint: disable=line-too-long,abstract-method
# Weak pattern: National insurance number are a weak match, e.g., JG 12 13 16 A, AB123456C
PATTERNS = [
Pattern("NINO (very weak)",
r"[A-Z]{2}?[ ]?[0-9]{2}[ ]?[0-9]{2}[ ]?[0-9]{2}[ ]?[ ]?[A-Z],?[ ]?[A-CEGHJ-PR-TW-Z][A-CEGHJ-NPR-TW-Z]{1}[0-9]{6}[A-DFM]?",
0.5),
]
CONTEXT = ["National insurance number", "national insurance number"]
def __init__(
self,
patterns=None,
context=None,
supported_language="en",
supported_entity="UK_NINO",
):
context = context if context else self.CONTEXT
patterns = patterns if patterns else self.PATTERNS
super().__init__(
supported_entity=supported_entity,
patterns=patterns,
context=context,
supported_language=supported_language,
)
|
the-stack_106_18858
|
#! /usr/local/bin/python
import numpy as np
import cv2
import os
path = 'captured/'
try:
os.mkdir(path)
except:
print("Already exists.")
cap = cv2.VideoCapture(1)
i = 0
while (True):
# Capture frame-by-frame
(ret, frame) = cap.read()
# Get frame size
width = cap.get(3)
height = cap.get(4)
# print(width, height)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# # Test if it can do a laplacian filter in a loop
# lap = cv2.Laplacian(gray, cv2.CV_64F)
# lap = np.uint8(np.absolute(lap))
# cv2.imshow(lap, 'Laplacian')
# Display the resulting frame
# cv2.imshow('frame', frame)
# cv2.imshow(frame, 'frame')
cv2.imshow('frame', gray)
# cv2.imshow('frame',lap)
# Capture the frame
if cv2.waitKey(1) & 0XFF == ord('c'):
cv2.imwrite('captured/image{:02d}.png'.format(i), gray)
i += 1
# The & 0XFF is a so-called mask.
if cv2.waitKey(1) & 0XFF == ord('q'):
break
# cv2.imwrite('ball.png', frame)
# When everything is done, release the capture
cap.release()
cv2.destroyAllWindows()
|
the-stack_106_18859
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable, List, Union
import pytest
from sparseml.keras.optim import (
KerasModifierYAML,
Modifier,
ScheduledModifier,
ScheduledUpdateModifier,
)
from sparseml.keras.utils import keras
from sparseml.utils import KERAS_FRAMEWORK
from tests.sparseml.keras.optim.mock import mnist_model
from tests.sparseml.optim.test_modifier import (
BaseModifierTest,
BaseScheduledTest,
BaseUpdateTest,
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_KERAS_TESTS", False),
reason="Skipping keras tests",
)
class ModifierTest(BaseModifierTest):
# noinspection PyMethodOverriding
def test_constructor(
self,
modifier_lambda: Callable[[], Modifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_constructor(modifier_lambda, framework=KERAS_FRAMEWORK)
# noinspection PyMethodOverriding
def test_yaml(
self,
modifier_lambda: Callable[[], Modifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_yaml(modifier_lambda, framework=KERAS_FRAMEWORK)
# noinspection PyMethodOverriding
def test_yaml_key(
self,
modifier_lambda: Callable[[], Modifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_yaml_key(modifier_lambda, framework=KERAS_FRAMEWORK)
# noinspection PyMethodOverriding
def test_repr(
self,
modifier_lambda: Callable[[], Modifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_repr(modifier_lambda, framework=KERAS_FRAMEWORK)
# noinspection PyMethodOverriding
def test_props(
self,
modifier_lambda: Callable[[], Modifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_props(modifier_lambda, framework=KERAS_FRAMEWORK)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_KERAS_TESTS", False),
reason="Skipping keras tests",
)
class ScheduledModifierTest(ModifierTest, BaseScheduledTest):
# noinspection PyMethodOverriding
def test_props_start(
self,
modifier_lambda: Callable[[], ScheduledModifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_props_start(modifier_lambda, framework=KERAS_FRAMEWORK)
# noinspection PyMethodOverriding
def test_props_end(
self,
modifier_lambda: Callable[[], ScheduledModifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_props_end(modifier_lambda, framework=KERAS_FRAMEWORK)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_KERAS_TESTS", False),
reason="Skipping keras tests",
)
class ScheduledUpdateModifierTest(ScheduledModifierTest, BaseUpdateTest):
# noinspection PyMethodOverriding
def test_props_frequency(
self,
modifier_lambda: Callable[[], ScheduledUpdateModifier],
model_lambda: Callable[[], keras.models.Model],
steps_per_epoch: int,
):
super().test_props_frequency(modifier_lambda, framework=KERAS_FRAMEWORK)
@KerasModifierYAML()
class ModifierImpl(Modifier):
def __init__(self, log_types: Union[str, List[str]] = ["python"]):
super().__init__(log_types)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_KERAS_TESTS", False),
reason="Skipping keras tests",
)
@pytest.mark.parametrize("modifier_lambda", [ModifierImpl], scope="function")
@pytest.mark.parametrize("model_lambda", [mnist_model], scope="function")
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestModifierImpl(ModifierTest):
pass
@KerasModifierYAML()
class ScheduledModifierImpl(ScheduledModifier):
def __init__(
self,
log_types: Union[str, List[str]] = ["python"],
end_epoch: float = -1.0,
start_epoch: float = -1.0,
):
super().__init__(log_types)
@pytest.mark.parametrize("modifier_lambda", [ScheduledModifierImpl], scope="function")
@pytest.mark.parametrize("model_lambda", [mnist_model], scope="function")
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestScheduledModifierImpl(ScheduledModifierTest):
pass
@KerasModifierYAML()
class ScheduledUpdateModifierImpl(ScheduledUpdateModifier):
def __init__(
self,
log_types: Union[str, List[str]] = ["python"],
end_epoch: float = -1.0,
start_epoch: float = -1.0,
update_frequency: float = -1,
):
super().__init__(log_types)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_KERAS_TESTS", False),
reason="Skipping keras tests",
)
@pytest.mark.parametrize(
"modifier_lambda", [ScheduledUpdateModifierImpl], scope="function"
)
@pytest.mark.parametrize("model_lambda", [mnist_model], scope="function")
@pytest.mark.parametrize("steps_per_epoch", [100], scope="function")
class TestScheduledUpdateModifierImpl(ScheduledUpdateModifierTest):
pass
|
the-stack_106_18861
|
import copy
import torch
from ignite.engine import (Engine,
Events,
_prepare_batch,
create_supervised_evaluator)
from ignite.metrics import RunningAverage, Loss
from ignite.contrib.handlers import ProgressBar
from object_detection.utils.prepare_data import transform_inputs
from object_detection.utils.evaluation import CocoEvaluator
from object_detection.models.ssd.predictor import Predictor
from object_detection.datasets.bdd100k_yolo import xyxy2xywh
from object_detection.utils.yolo.yolo_utils import *
__all__ = [
"create_detection_trainer",
"create_detection_evaluator"
]
def train_data(model_name, model, batch, loss_fn, device):
if model_name == "faster":
images, targets = batch
images, targets = transform_inputs(images, targets, device)
losses = model(images, targets)
loss = sum([loss for loss in losses.values()])
elif model_name == "ssd512":
images, boxes, labels = batch
images = images.to(device)
boxes = boxes.to(device)
labels = labels.to(device)
with torch.cuda.amp.autocast():
confidence, locations = model(images)
regression_loss, classification_loss = loss_fn(confidence, locations, labels, boxes)
loss = regression_loss + classification_loss
elif model_name == "yolov3" or model_name == "yolov3_spp" or model_name == "yolov4":
images, targets, paths, _ = batch
images = images.to(device).float() / 255.0
targets = targets.to(device)
with torch.cuda.amp.autocast():
predictions = model(images)
loss, loss_items = loss_fn(predictions, targets, model)
loss *= len(batch) / 64
return loss
def create_detection_trainer(model_name,
model,
optimizer,
device,
val_loader,
evaluator,
grad_scaler=None,
loss_fn = None,
logging = True):
def update_fn(_trainer, batch):
"""Training function
Keyword arguments:
- each bach
"""
model.train()
optimizer.zero_grad()
loss = train_data(model_name, model, batch, loss_fn, device)
grad_scaler.scale(loss).backward()
grad_scaler.step(optimizer)
grad_scaler.update()
return loss.item()
trainer = Engine(update_fn)
RunningAverage(output_transform=lambda x: x) \
.attach(trainer, 'loss')
@trainer.on(Events.ITERATION_COMPLETED)
def log_optimizer_params(engine):
param_groups = optimizer.param_groups[0]
for h in ['lr', 'momentum', 'weight_decay']:
if h in param_groups.keys():
engine.state.metrics[h] = param_groups[h]
@trainer.on(Events.EPOCH_COMPLETED)
def on_epoch_completed(engine):
evaluator.run(val_loader)
if logging:
ProgressBar(persist=False) \
.attach(trainer, ['loss', 'lr'])
return trainer
def test_data(model_name, model, batch, device):
if model_name == "faster":
images, targets = batch
images, targets = transform_inputs(images, targets, device)
images_model = copy.deepcopy(images)
torch.cuda.synchronize()
with torch.no_grad():
outputs = model(images_model)
outputs = [{k: v.to(device) for k, v in t.items()} for t in outputs]
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
elif model_name == "ssd512":
from object_detection.utils.ssd import ssd512_config as config
images, targets = batch
images_model = copy.deepcopy(images)
candidate_size = 50
sigma = 0.5
predictor = Predictor(model,
config.image_size,
config.image_mean,
config.image_std,
iou_threshold = config.iou_threshold,
candidate_size = candidate_size,
sigma = sigma,
device = device)
boxes, labels, probs = predictor.predict(images_model, 10 , 0.2)
if boxes.size()[0] == 0:
outputs = {"boxes": torch.tensor([[0,0,0,0]]),
"labels": torch.tensor([0]),
"scores" : torch.tensor([0])}
else:
outputs = {"boxes": boxes,
"labels" : labels,
"scores": probs}
res = {targets['image_id'].item(): outputs}
elif model_name == "yolov3" or model_name == "yolov3_spp" or model_name == "yolov4":
images, targets = batch
images, targets = transform_inputs(images, targets, device)
images_model = copy.deepcopy(images)
images_model = [image.float()/255 for image in images_model]
batch_imgs = torch.stack(images_model)
labels_ = []
scores = []
res = {}
nb, _, width, height = batch_imgs.shape
torch.cuda.synchronize()
with torch.no_grad():
inf_out, train_out = model(batch_imgs)
output = non_max_suppression(inf_out, conf_thres=0.25, iou_thres = 0.6) #conf = 0.25 to decrease the training time
for si, pred in enumerate(output):
if pred is None:
res.update({targets[si]["image_id"].item():
{"boxes": torch.tensor([[0,0,0,0]]),
"labels": torch.tensor([1]),
"scores" : torch.tensor([0])}})
else:
clip_coords(pred, (height, width))
box = pred[:, :4].clone()
res.update({targets[si]["image_id"].item():
{"boxes":box,
"labels":pred[:, 5],
"scores":pred[:,4]
}})
images_model = outputs = None
return images, targets, res
def create_detection_evaluator(model_name, model, device, coco_api_val_dataset, logging = True):
def update_model(engine, batch):
images, targets, res = test_data(model_name, model, batch, device)
engine.state.coco_evaluator.update(res)
return images, targets, res
evaluator = Engine(update_model)
if logging:
ProgressBar(persist=False) \
.attach(evaluator)
@evaluator.on(Events.STARTED)
def on_evaluation_started(engine):
model.eval()
engine.state.coco_evaluator = CocoEvaluator(coco_api_val_dataset)
@evaluator.on(Events.COMPLETED)
def on_evaluation_completed(engine):
engine.state.coco_evaluator.synchronize_between_processes()
print("\nResults val set:")
engine.state.coco_evaluator.accumulate()
if logging:
engine.state.coco_evaluator.summarize()
return evaluator
|
the-stack_106_18863
|
"""Plot streamlines of the 2D field:
u(x,y) = -1 - x^2 + y
v(x,y) = 1 + x - y^2
"""
from vtkplotter import *
import numpy as np
# a grid with a vector field (U,V):
X, Y = np.mgrid[-5:5 :15j, -4:4 :15j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
# optionally, pick some random points as seeds:
prob_pts = np.random.rand(200, 2)*8 - [4,4]
sp = streamplot(X,Y, U,V,
lw=0.001, # line width in abs. units
direction='forward', # 'both' or 'backward'
probes=prob_pts, # try comment out this
)
pts = Points(prob_pts, r=5, c='white')
show(sp, pts,
Text2D(__doc__, c='w'),
axes=1, bg='bb')
|
the-stack_106_18864
|
# import the necessary packages
from imutils.object_detection import non_max_suppression
import numpy as np
import argparse
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str,
help="path to input image")
ap.add_argument("-east", "--east", type=str,
help="path to input EAST text detector")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=320,
help="resized image width (should be multiple of 32)")
ap.add_argument("-e", "--height", type=int, default=320,
help="resized image height (should be multiple of 32)")
args = vars(ap.parse_args())
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 5))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 21))
# load the input image and grab the image dimensions
image = cv2.imread(args["image"])
orig = image.copy()
(H, W) = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# smooth the image using a 3x3 Gaussian, then apply the blackhat
# morphological operator to find dark regions on a light background
gray = cv2.GaussianBlur(gray, (3, 3), 0)
image = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, rectKernel)
# set the new width and height and then determine the ratio in change
# for both the width and height
(newW, newH) = (args["width"], args["height"])
rW = W / float(newW)
rH = H / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
# show timing information on text prediction
print("[INFO] text detection took {:.6f} seconds".format(end - start))
# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability, ignore it
if scoresData[x] < args["min_confidence"]:
continue
# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and then
# compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height of
# the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates for
# the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score to
# our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# draw the bounding box on the image
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 255, 0), 2)
# show the output image
#cv2.imshow("Text Detection", orig)
name = "helllo.jpg"
cv2.imwrite(name,image)
cv2.waitKey(0)
|
the-stack_106_18866
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import datetime
from bs4 import BeautifulSoup
import scrape_common as sc
d = sc.download('https://www.sz.ch/behoerden/information-medien/medienmitteilungen/coronavirus.html/72-416-412-1379-6948', silent=True)
soup = BeautifulSoup(d, 'html.parser')
xls_url = soup.find('a', string=re.compile(r'Coronaf.lle\s*im\s*Kanton\s*Schwyz'))['href']
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls)
is_first = True
for row in rows:
if not isinstance(row['Datum'], datetime.datetime):
continue
if not is_first:
print('-' * 10)
is_first = False
# handle wrong value on 2020-03-25, see issue #631
if row['Datum'].date().isoformat() == '2020-03-25':
row['Bestätigte Fälle (kumuliert)'] = ''
print('SZ')
sc.timestamp()
print('Downloading:', xls_url)
if row['Zeit']:
print('Date and time:', row['Datum'].date().isoformat(), row['Zeit'].time().isoformat())
else:
print('Date and time:', row['Datum'].date().isoformat())
print('Confirmed cases:', row['Bestätigte Fälle (kumuliert)'])
print('Deaths:', row['Todesfälle (kumuliert)'])
print('Recovered:', row['Genesene (kumuliert)'])
|
the-stack_106_18869
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/adjust_results4_isadog.py
#
# PROGRAMMER:
# DATE CREATED:
# REVISED DATE:
# PURPOSE: Create a function adjust_results4_isadog that adjusts the results
# dictionary to indicate whether or not the pet image label is of-a-dog,
# and to indicate whether or not the classifier image label is of-a-dog.
# All dog labels from both the pet images and the classifier function
# will be found in the dognames.txt file. We recommend reading all the
# dog names in dognames.txt into a dictionary where the 'key' is the
# dog name (from dognames.txt) and the 'value' is one. If a label is
# found to exist within this dictionary of dog names then the label
# is of-a-dog, otherwise the label isn't of a dog. Alternatively one
# could also read all the dog names into a list and then if the label
# is found to exist within this list - the label is of-a-dog, otherwise
# the label isn't of a dog.
# This function inputs:
# -The results dictionary as results_dic within adjust_results4_isadog
# function and results for the function call within main.
# -The text file with dog names as dogfile within adjust_results4_isadog
# function and in_arg.dogfile for the function call within main.
# This function uses the extend function to add items to the list
# that's the 'value' of the results dictionary. You will be adding the
# whether or not the pet image label is of-a-dog as the item at index
# 3 of the list and whether or not the classifier label is of-a-dog as
# the item at index 4 of the list. Note we recommend setting the values
# at indices 3 & 4 to 1 when the label is of-a-dog and to 0 when the
# label isn't a dog.
#
##
# TODO 4: Define adjust_results4_isadog function below, specifically replace the None
# below by the function definition of the adjust_results4_isadog function.
# Notice that this function doesn't return anything because the
# results_dic dictionary that is passed into the function is a mutable
# data type so no return is needed.
#
def adjust_results4_isadog(results_dic, dogfile):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. Where the list will contain the following items:
index 0 = pet image label (string)
index 1 = classifier label (string)
index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
------ where index 3 & index 4 are added by this function -----
NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogfile - A text file that contains names of all dogs from the classifier
function and dog names from the pet image files. This file has
one dog name per line dog names are all in lowercase with
spaces separating the distinct words of the dog name. Dog names
from the classifier function can be a string of dog names separated
by commas when a particular breed of dog has multiple dog names
associated with that breed (ex. maltese dog, maltese terrier,
maltese) (string - indicates text file's filename)
Returns:
None - results_dic is mutable data type so no return needed.
"""
dognames_dict = []
with open(dogfile, "r") as infile:
# Reads in dognames from first line in file
line = infile.readline()
# Processes each line in file until reaching EOF (end-of-file) by
# processing line and adding dognames to dognames_dic with while loop
while line != "":
if line not in dognames_dict:
words = line.split(',')
for element in words:
breed =element.strip()
dognames_dict.append(breed)
line = infile.readline()
for key in results_dic:
# Pet Image Label IS of Dog (e.g. found in dognames_dic)
if results_dic[key][0] in dognames_dict:
# Classifier Label IS image of Dog (e.g. found in dognames_dic)
# appends (1, 1) because both labels are dogs
if results_dic[key][1] in dognames_dict:
results_dic[key].extend((1, 1))
else:
results_dic[key].extend((1,0))
else:
if results_dic[key][1] in dognames_dict:
results_dic[key].extend((0,1))
else:
results_dic[key].extend((0,0))
None
|
the-stack_106_18870
|
"""
@author: Jun Wang
@date: 20201019
@contact: [email protected]
"""
import os
import sys
import shutil
import argparse
import logging as logger
from itertools import chain
import torch
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
sys.path.append('../../')
from backbone.backbone_def import BackboneFactory
from loss.loss_def import KDLossFactory
from utils.net_util import define_paraphraser, define_translator
sys.path.append('../../../../../')
from utils.AverageMeter import AverageMeter
from data_processor.train_dataset import ImageDataset
from head.head_def import HeadFactory
logger.basicConfig(level=logger.INFO,
format='%(levelname)s %(asctime)s %(filename)s: %(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class FaceModel(torch.nn.Module):
"""Define a traditional face model which contains a backbone and a head.
Attributes:
backbone(object): the backbone of face model.
head(object): the head of face model.
"""
def __init__(self, backbone_factory, head_factory):
"""Init face model by backbone factorcy and head factory.
Args:
backbone_factory(object): produce a backbone according to config files.
head_factory(object): produce a head according to config files.
"""
super(FaceModel, self).__init__()
self.backbone = backbone_factory.get_backbone()
self.head = head_factory.get_head()
def forward(self, data, label):
out_stage1, out_stage2, out_stage3, out_stage4, feat = self.backbone.forward(data)
pred = self.head.forward(feat, label)
return out_stage1, out_stage2, out_stage3, out_stage4, feat, pred
def get_lr(optimizer):
"""Get the current learning rate from optimizer.
"""
for param_group in optimizer.param_groups:
return param_group['lr']
def train_para(train_loader, teacher_model, paraphraser, optimizer_para, criterionPara, total_epoch, conf):
para_losses = AverageMeter()
paraphraser.train()
for epoch in range(1, total_epoch+1):
for batch_idx, (images, labels) in enumerate(train_loader):
images = images.to(conf.device)
with torch.no_grad():
_, _, _, outputs, _, _ = teacher_model(images, labels)
_, outputs_rec = paraphraser(outputs.detach())
para_loss = criterionPara(outputs_rec, outputs.detach())
para_losses.update(para_loss.item(), images.size(0))
optimizer_para.zero_grad()
para_loss.backward()
optimizer_para.step()
if batch_idx % conf.print_freq == 0:
para_loss_avg = para_losses.avg
lr = get_lr(optimizer_para)
logger.info('Epoch %d, iter %d/%d, lr %f, loss_para %f.' %
(epoch, batch_idx, len(train_loader), lr, para_loss_avg))
para_losses.reset()
def train_one_epoch(data_loader, teacher_model, paraphraser, student_model, translator, optimizer,
criterion, criterion_kd, cur_epoch, loss_cls_meter, loss_kd_meter, conf):
"""Tain one epoch by traditional training.
"""
for batch_idx, (images, labels) in enumerate(data_loader):
images = images.to(conf.device)
labels = labels.to(conf.device)
labels = labels.squeeze()
_, _, _, outputs_s, feats_s, preds_s = student_model.forward(images, labels)
factor_s = translator(outputs_s)
loss_cls = criterion(preds_s, labels)
with torch.no_grad():
_, _, _, outputs_t, feats_t, preds_t = teacher_model.forward(images, labels)
factor_t, _ = paraphraser(outputs_t)
loss_kd = criterion_kd(factor_s, factor_t.detach()) * args.lambda_kd
loss = loss_cls + loss_kd
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_cls_meter.update(loss_cls.item(), images.shape[0])
loss_kd_meter.update(loss_kd.item(), images.shape[0])
if batch_idx % conf.print_freq == 0:
loss_cls_avg = loss_cls_meter.avg
loss_kd_avg = loss_kd_meter.avg
lr = get_lr(optimizer)
logger.info('Epoch %d, iter %d/%d, lr %f, loss_cls %f, loss_kd %f.' %
(cur_epoch, batch_idx, len(data_loader), lr, loss_cls_avg, loss_kd_avg))
global_batch_idx = cur_epoch * len(data_loader) + batch_idx
conf.writer.add_scalar('Cls_loss', loss_cls_avg, global_batch_idx)
conf.writer.add_scalar('KD_loss', loss_kd_avg, global_batch_idx)
conf.writer.add_scalar('Train_lr', lr, global_batch_idx)
loss_cls_meter.reset()
loss_kd_meter.reset()
if (batch_idx + 1) % conf.save_freq == 0:
saved_name = 'Epoch_%d_batch_%d.pt' % (cur_epoch, batch_idx)
state = {
'state_dict': student_model.module.state_dict(),
'epoch': cur_epoch,
'batch_id': batch_idx
}
torch.save(state, os.path.join(conf.out_dir, saved_name))
logger.info('Save checkpoint %s to disk.' % saved_name)
saved_name = 'Epoch_%d.pt' % cur_epoch
state = {'state_dict': student_model.module.state_dict(),
'epoch': cur_epoch, 'batch_id': batch_idx}
torch.save(state, os.path.join(conf.out_dir, saved_name))
logger.info('Save checkpoint %s to disk...' % saved_name)
def train(conf):
"""Total training procedure.
"""
conf.device = torch.device('cuda:0')
data_loader = DataLoader(ImageDataset(conf.data_root, conf.train_file),
conf.batch_size, True, num_workers = 4)
# define head factory.
head_factory = HeadFactory(conf.head_type, conf.head_conf_file)
# define teacher model.
teacher_backbone_factory = BackboneFactory(conf.teacher_backbone_type, conf.teacher_backbone_conf_file)
teacher_model = FaceModel(teacher_backbone_factory, head_factory)
state_dict = torch.load(args.pretrained_teacher)['state_dict']
teacher_model.load_state_dict(state_dict)
teacher_model = torch.nn.DataParallel(teacher_model).cuda()
# define and train the paraphraser
paraphraser = define_paraphraser(512, k=0.5)
optimizer_para = torch.optim.SGD(paraphraser.parameters(),
lr = args.lr * 0.1,
momentum = args.momentum,
weight_decay = 1e-4)
criterionPara = torch.nn.MSELoss().cuda()
logger.info('The first stage, training the paraphraser......')
train_para(data_loader, teacher_model, paraphraser, optimizer_para, criterionPara, 2, conf)
paraphraser.eval()
for param in paraphraser.parameters():
param.requires_grad = False
logger.info('The second stage, training the student network......')
translator = define_translator(512, 512, k=0.5)
criterion = torch.nn.CrossEntropyLoss().cuda(conf.device)
kd_loss_factory = KDLossFactory(conf.loss_type, conf.loss_conf_file)
criterion_kd = kd_loss_factory.get_kd_loss().cuda(conf.device)
# define student model
student_backbone_factory = BackboneFactory(conf.student_backbone_type, conf.student_backbone_conf_file)
student_model = FaceModel(student_backbone_factory, head_factory)
ori_epoch = 0
if conf.resume:
ori_epoch = torch.load(args.pretrain_model)['epoch'] + 1
state_dict = torch.load(args.pretrain_model)['state_dict']
student_model.load_state_dict(state_dict)
student_model = torch.nn.DataParallel(student_model).cuda()
parameters = [p for p in student_model.parameters() if p.requires_grad]
optimizer = optim.SGD(chain(parameters, translator.parameters()), lr = conf.lr,
momentum = conf.momentum, weight_decay = 1e-4)
lr_schedule = optim.lr_scheduler.MultiStepLR(
optimizer, milestones = conf.milestones, gamma = 0.1)
loss_cls_meter = AverageMeter()
loss_kd_meter = AverageMeter()
student_model.train()
for epoch in range(ori_epoch, conf.epoches):
train_one_epoch(data_loader, teacher_model, paraphraser, student_model, translator, optimizer,
criterion, criterion_kd, epoch, loss_cls_meter, loss_kd_meter, conf)
lr_schedule.step()
if __name__ == '__main__':
conf = argparse.ArgumentParser(description='traditional_training for face recognition.')
conf.add_argument("--data_root", type = str,
help = "The root folder of training set.")
conf.add_argument("--train_file", type = str,
help = "The training file path.")
conf.add_argument("--teacher_backbone_type", type = str,
help = "Mobilefacenets, Resnet.")
conf.add_argument("--teacher_backbone_conf_file", type = str,
help = "the path of backbone_conf.yaml.")
conf.add_argument("--student_backbone_type", type = str,
help = "Mobilefacenets, Resnet.")
conf.add_argument("--student_backbone_conf_file", type = str,
help = "the path of backbone_conf.yaml.")
conf.add_argument("--head_type", type = str,
help = "mv-softmax, arcface, npc-face.")
conf.add_argument("--head_conf_file", type = str,
help = "the path of head_conf.yaml.")
conf.add_argument("--loss_type", type = str,
help = "Logits, PKT...")
conf.add_argument("--loss_conf_file", type = str,
help = "the path of loss_conf.yaml.")
conf.add_argument('--lr', type = float, default = 0.1,
help='The initial learning rate.')
conf.add_argument('--lambda_kd', type = float, default = 1.0,
help='The weight of kd loss.')
conf.add_argument("--out_dir", type = str,
help = "The folder to save models.")
conf.add_argument('--epoches', type = int, default = 9,
help = 'The training epoches.')
conf.add_argument('--step', type = str, default = '2,5,7',
help = 'Step for lr.')
conf.add_argument('--print_freq', type = int, default = 10,
help = 'The print frequency for training state.')
conf.add_argument('--save_freq', type = int, default = 10,
help = 'The save frequency for training state.')
conf.add_argument('--batch_size', type = int, default = 128,
help='The training batch size over all gpus.')
conf.add_argument('--momentum', type = float, default = 0.9,
help = 'The momentum for sgd.')
conf.add_argument('--log_dir', type = str, default = 'log',
help = 'The directory to save log.log')
conf.add_argument('--tensorboardx_logdir', type = str,
help = 'The directory to save tensorboardx logs')
conf.add_argument('--pretrained_teacher', type = str, default = 'mv_epoch_8.pt',
help = 'The path of pretrained teahcer model')
conf.add_argument('--pretrained_model', type = str, default = 'mv_epoch_8.pt',
help = 'The path of pretrained model')
conf.add_argument('--resume', '-r', action = 'store_true', default = False,
help = 'Whether to resume from a checkpoint.')
args = conf.parse_args()
args.milestones = [int(num) for num in args.step.split(',')]
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
tensorboardx_logdir = os.path.join(args.log_dir, args.tensorboardx_logdir)
if os.path.exists(tensorboardx_logdir):
shutil.rmtree(tensorboardx_logdir)
writer = SummaryWriter(log_dir=tensorboardx_logdir)
args.writer = writer
logger.info('Start optimization.')
logger.info(args)
train(args)
logger.info('Optimization done!')
|
the-stack_106_18872
|
from nlcontrol.systems import SystemBase
from nlcontrol.signals import step, empty_signal
from simupy.block_diagram import BlockDiagram
from simupy.systems.symbolic import MemorylessSystem, DynamicalSystem
from simupy.systems import SystemFromCallable
from sympy.tensor.array import Array
from sympy import Symbol
import numpy as np
states1 = 'x1'
inputs1 = 'u1'
sys1 = SystemBase(states1, inputs1)
x1, x1dot, u1 = sys1.create_variables()
sys1.system = DynamicalSystem(state_equation=Array([-x1 + u1]), state=x1, output_equation=x1, input_=u1)
sys1_lin, _ = sys1.linearize(1)
print('state_eq: ',sys1_lin.system.state_equation)
states2 = None
inputs2 = 'w'
sys2 = SystemBase(states2, inputs2)
w = sys2.create_variables()
output_eq = Array([5 * w])
sys2.sys = MemorylessSystem(input_=Array([w]), output_equation=output_eq)
print('output_eq: ', sys2.output_equation)
states3 = 'x2'
inputs3 = 'u2'
sys3 = SystemBase(states3, inputs3)
x2, x2dot, u2, u2dot = sys3.create_variables(True)
sys3.system = DynamicalSystem(state_equation=Array([-x2**2 - u2**2]), state=Array([x2]), output_equation=Array([x2]), input_=u2)
sys3_lin, _ = sys3.linearize(1,2)
print('state_eq: ',sys3_lin.system.state_equation)
states4 = 'x3, x4'
inputs4 = 'u3'
sys4 = SystemBase(states4, inputs4)
print('Vars: ', sys4.create_variables())
x3, x4, x3dot, x4dot, u3 = sys4.create_variables()
sys4.system = DynamicalSystem(state_equation=Array([-x3 + x4 + u3, -x4 + 0.5 * x3]), state=Array([x3, x4]), output_equation=Array([x3 * x4, x4]), input_=u3)
sys4_lin, _ = sys4.linearize([2, 1], 1)
print('state_eq: ', sys4_lin.system.state_equation)
states5 = 'x5'
inputs5 = 'u4, u5'
sys5 = SystemBase(states5, inputs5)
x5, x5dot, u4, u5 = sys5.create_variables()
sys5.system = DynamicalSystem(state_equation=Array([-x5 + u4 - u5]), state=Array([x5]), output_equation=Array([x5]), input_=Array([u4, u5]))
mode = 'series'
if mode is 'series':
series_sys1 = sys1.series(sys2)
print(series_sys1.sys.state_equation)
print(series_sys1.sys.output_equation)
print(series_sys1, ' - ', series_sys1.sys)
series_sys2 = sys1.series(sys3)
print(series_sys2.sys.state_equation)
print(series_sys2.sys.output_equation)
print(series_sys2, ' - ', series_sys2.sys)
sys2.block_configuration
sys1.block_configuration
series_sys3 = sys2.series(sys1)
print(series_sys3.sys.state_equation)
print(series_sys3.sys.output_equation)
print(series_sys3, ' - ', series_sys3.sys)
series_sys4 = sys2.series(sys2)
# print(series_sys4.sys.state_equation)
print(series_sys4.sys.output_equation)
print(series_sys4, ' - ', series_sys4.sys)
series_md = sys4.series(sys5)
print(series_md.sys.state)
print(series_md.sys.state_equation)
print(series_md.sys.output_equation)
print(series_md, ' - ', series_md.sys)
elif mode is 'parallel':
parallel_sys1 = sys1.parallel(sys2)
print(parallel_sys1.sys.state_equation)
print(parallel_sys1.sys.output_equation)
print(parallel_sys1, ' - ', parallel_sys1.sys)
parallel_sys2 = sys1.parallel(sys3)
print(parallel_sys2.sys.state_equation)
print(parallel_sys2.sys.output_equation)
print(parallel_sys2, ' - ', parallel_sys2.sys)
parallel_sys3 = sys2.parallel(sys1)
print(parallel_sys3.sys.state_equation)
print(parallel_sys3.sys.output_equation)
print(parallel_sys3, ' - ', parallel_sys3.sys)
parallel_sys4 = sys2.parallel(sys2)
# print(parallel_sys4.sys.state_equation)
print(parallel_sys4.sys.output_equation)
print(parallel_sys4, ' - ', parallel_sys4.sys)
input_step1 = step()
input_step2 = step(step_times=[5, 15], end_values=[0.9, 1.1], begin_values=[0.2, 0.15])
input_step3 = step(step_times=[5], end_values=[1.4], begin_values=[0.4])
input_step4 = step(step_times=[5, 5], end_values=[1.4, 1.5], begin_values=[0.4, 0.5])
input_empty = empty_signal(sys2.system.dim_input)
time_axis = np.linspace(0, 20, 100)
test_simulation = False
if test_simulation:
# 1
sys1.simulation(time_axis, initial_conditions=1, input_signals=input_step3, plot=True)
# 2
series_md.simulation(20, initial_conditions=[0.1, 0.5, 0.2], input_signals=input_step3, plot=True)
# 3
series_md.simulation(20, initial_conditions=[0.1, 0.5, 0.2], plot=True)
# 4A
sys5.simulation([2, 20], initial_conditions=[0.5], input_signals=input_step2, plot=True)
# 4B
sys5.simulation([2, 20], initial_conditions=[0.5], input_signals=input_step4, plot=True)
# 5
sys2.simulation(time_axis, plot=True, input_signals=input_step3)
#6
sys2.simulation([0, 15], initial_conditions=1, plot=True)
# 7
input_step2.simulation(time_axis, plot=True)
sys5.simulation([2, 20], initial_conditions=[0.5], input_signals=input_step2, plot=False)
integrator_options = {'nsteps': 1000}
sys5.simulation([2, 20], initial_conditions=[0.5], input_signals=input_step2, plot=True, custom_integrator_options=integrator_options)
|
the-stack_106_18873
|
from __future__ import division
import argparse
from PIL import Image
import numpy as np
import gym
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten, Convolution2D, Permute
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from rl.core import Processor
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
INPUT_SHAPE = (84, 84)
WINDOW_LENGTH = 4
class AtariProcessor(Processor):
def process_observation(self, observation):
assert observation.ndim == 3 # (height, width, channel)
img = Image.fromarray(observation)
img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale
processed_observation = np.array(img)
assert processed_observation.shape == INPUT_SHAPE
return processed_observation.astype('uint8') # saves storage in experience memory
def process_state_batch(self, batch):
# We could perform this processing step in `process_observation`. In this case, however,
# we would need to store a `float32` array instead, which is 4x more memory intensive than
# an `uint8` array. This matters if we store 1M observations.
processed_batch = batch.astype('float32') / 255.
return processed_batch
def process_reward(self, reward):
return np.clip(reward, -1., 1.)
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='BreakoutDeterministic-v4')
parser.add_argument('--weights', type=str, default=None)
args = parser.parse_args()
# Get the environment and extract the number of actions.
env = gym.make(args.env_name)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
input_shape = (WINDOW_LENGTH,) + INPUT_SHAPE
model = Sequential()
# (width, height, channels)
model.add(Permute((2, 3, 1), input_shape=input_shape))
model.add(Convolution2D(32, (8, 8), strides=(4, 4)))
model.add(Activation('relu'))
model.add(Convolution2D(64, (4, 4), strides=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3), strides=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in tensorflow.keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)
processor = AtariProcessor()
# Select a policy. We use eps-greedy action selection, which means that a random action is selected
# with probability eps. We anneal eps from 1.0 to 0.1 over the course of 1M steps. This is done so that
# the agent initially explores the environment (high eps) and then gradually sticks to what it knows
# (low eps). We also set a dedicated eps value that is used during testing. Note that we set it to 0.05
# so that the agent still performs some random actions. This ensures that the agent cannot get stuck.
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,
nb_steps=1000000)
# The trade-off between exploration and exploitation is difficult and an on-going research topic.
# If you want, you can experiment with the parameters or use a different policy. Another popular one
# is Boltzmann-style exploration:
# policy = BoltzmannQPolicy(tau=1.)
# Feel free to give it a try!
dqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory,
processor=processor, nb_steps_warmup=50000, gamma=.99, target_model_update=10000,
train_interval=4, delta_clip=1.)
dqn.compile(Adam(lr=.00025), metrics=['mae'])
if args.mode == 'train':
# Okay, now it's time to learn something! We capture the interrupt exception so that training
# can be prematurely aborted. Notice that now you can use the built-in tensorflow.keras callbacks!
weights_filename = f'dqn_{args.env_name}_weights.h5f'
checkpoint_weights_filename = 'dqn_' + args.env_name + '_weights_{step}.h5f'
log_filename = f'dqn_{args.env_name}_log.json'
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=250000)]
callbacks += [FileLogger(log_filename, interval=100)]
dqn.fit(env, callbacks=callbacks, nb_steps=1750000, log_interval=10000)
# After training is done, we save the final weights one more time.
dqn.save_weights(weights_filename, overwrite=True)
# Finally, evaluate our algorithm for 10 episodes.
dqn.test(env, nb_episodes=10, visualize=False)
elif args.mode == 'test':
weights_filename = f'dqn_{args.env_name}_weights.h5f'
if args.weights:
weights_filename = args.weights
dqn.load_weights(weights_filename)
dqn.test(env, nb_episodes=10, visualize=True)
|
the-stack_106_18874
|
import six
from sklearn.pipeline import _name_estimators, Pipeline
from sklearn.utils import tosequence
class TransformerPipeline(Pipeline):
"""
Pipeline that expects all steps to be transformers taking a single argument
and having fit and transform methods.
Code is copied from sklearn's Pipeline, leaving out the `y=None` argument.
"""
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
estimator = estimators[-1]
for e in estimators:
if (not (hasattr(e, "fit") or hasattr(e, "fit_transform")) or not
hasattr(e, "transform")):
raise TypeError("All steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (e, type(e)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def _pre_transform(self, X, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, **fit_params):
Xt, fit_params = self._pre_transform(X, **fit_params)
self.steps[-1][-1].fit(Xt, **fit_params)
return self
def fit_transform(self, X, **fit_params):
Xt, fit_params = self._pre_transform(X, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, **fit_params).transform(Xt)
def make_transformer_pipeline(*steps):
"""Construct a TransformerPipeline from the given estimators.
"""
return TransformerPipeline(_name_estimators(steps))
|
the-stack_106_18875
|
import click
from google.cloud import pubsub_v1 as pubsub
from google.cloud.pubsub_v1.types import BatchSettings
def standard_input():
"""Generator that yields lines from standard input."""
with click.get_text_stream("stdin") as stdin:
while stdin.readable():
line = stdin.readline()
if line:
yield line.strip().encode("utf-8")
@click.command()
@click.argument("topic", type=str)
@click.option("--google-cloud-project", "-p", envvar="GOOGLE_CLOUD_PROJECT")
@click.option("--batch-size", "-b", default=500)
def run(topic, google_cloud_project, batch_size):
publisher = pubsub.PublisherClient(
batch_settings=BatchSettings(max_messages=batch_size)
)
topic_path = publisher.topic_path(google_cloud_project, topic)
for line in standard_input():
publisher.publish(topic_path, data=line)
if __name__ == "__main__":
run()
|
the-stack_106_18877
|
from typing import Optional
from aqt import mw, gui_hooks
from aqt.utils import tooltip
from aqt.addcards import AddCards
add_dialog: Optional[AddCards] = None
def switch_model(name):
try:
notetype = mw.col.models.by_name(name)
if notetype:
id = notetype["id"]
add_dialog.notetype_chooser.selected_notetype_id = id
tooltip(name)
else:
tooltip("No note type with name: " + name)
except: # triggered when not in Add Cards window
pass
def run_shortcut(value):
switch_model(value)
def add_in_shortcuts(cuts, editor):
myscuts = mw.addonManager.getConfig(__name__)["shortcuts"]
for key in myscuts:
val = myscuts[key]
if val and val != "none":
cuts.append((key, lambda i=val: run_shortcut(i)))
def new_add_cards(addcards: AddCards):
global add_dialog
add_dialog = addcards
gui_hooks.add_cards_did_init.append(new_add_cards)
gui_hooks.editor_did_init_shortcuts.append(add_in_shortcuts)
|
the-stack_106_18878
|
# Batch Add Context
# Tool of csv2po.py
# By Tom CHEN <[email protected]> (tomchen.org)
import re
from pathlib import Path
from getfilepaths import getFilePaths
def addContext(inputPath, outputPath, encoding, context):
f = inputPath.open(mode = 'r', encoding = encoding, newline = '', errors = "strict")
content = f.read()
f.close()
content = content.replace("_(TRANS)_", "_(TRANS_CONTEXT:'" + context + "')_")
outputPath.parent.mkdir(parents = True, exist_ok = True)
fout = outputPath.open(mode = 'w', encoding = encoding, newline = '', errors = "strict")
content = fout.write(content)
fout.close()
def batchAddContext(inputPath, extension, encoding, outputPath, filePathToContextFunc):
for p in getFilePaths(inputPath, extension = extension):
addContext(inputPath = p, outputPath = outputPath.joinpath(p.relative_to(inputPath)), encoding = encoding, context = filePathToContextFunc(p))
def fn2c(filePath):
fileName = filePath.name.lower()
ext = filePath.suffix.lower()
stem = filePath.stem.lower()
if ext == '.txt':
return stem
else:
return fileName
batchAddContext(inputPath = Path('template_without_context'), extension = ['txt', 'str', 'ini'], encoding = 'UTF-8', outputPath = Path('.'), filePathToContextFunc = fn2c)
|
the-stack_106_18879
|
import _plotly_utils.basevalidators
class ThicknessmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='thicknessmode',
parent_name='scatterpolargl.marker.colorbar',
**kwargs
):
super(ThicknessmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
role='style',
values=['fraction', 'pixels'],
**kwargs
)
|
the-stack_106_18880
|
# -*- coding: utf-8 -*-
import sys
from lxml import etree
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.mixins.ModuleCacheMixin import ModuleCacheMixin
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class XPath(BaseThreadedModule, ModuleCacheMixin):
"""
Parse an xml string via xpath.
This module supports the storage of the results in a cache. If cache is set,
it will first try to retrieve the result from cache via the key setting.
If that fails, it will execute the xpath query and store the result in cache.
Configuration template:
- parser.XPath:
source_field: # <type: string; is: required>
target_field: # <default: "xpath_result"; type: string; is: optional>
query: # <type: string; is: required>
cache: # <default: None; type: None||string; is: optional>
cache_key: # <default: None; type: None||string; is: optional if cache is None else required>
cache_lock: # <default: 'Lumbermill:XPathParser:XPathParserLock'; type: string; is: optional>
cache_ttl: # <default: 60; type: integer; is: optional>
receivers:
- NextModule
"""
module_type = "parser"
"""Set module type"""
def configure(self, configuration):
BaseThreadedModule.configure(self, configuration)
ModuleCacheMixin.configure(self)
def _castToList(self, value):
list = []
for x in value:
try:
list.append(etree.tostring(x))
except TypeError:
list.append(str(x))
return list
def handleEvent(self, event):
"""
Process the event.
@param event: dictionary
@return data: dictionary
"""
source_field = self.getConfigurationValue('source_field', event)
result = None
if self.cache:
cache_key = self.getConfigurationValue('cache_key', event)
result = self._getFromCache(cache_key, event)
if result is None:
try:
xml_string = bytes(event[source_field], "UTF-8")
except KeyError:
yield event
return
try:
xml_root = etree.fromstring(xml_string)
xml_tree = etree.ElementTree(xml_root)
result = xml_tree.xpath(self.getConfigurationValue('query', event))
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Could not parse xml doc %s Exception: %s, Error: %s." % (xml_string, etype, evalue))
if result:
if type(result) == list:
result = self._castToList(result)
if self.cache and not event['lumbermill']['cache_hit']:
self.cache.set(cache_key, result, self.cache_ttl)
target_field_name = self.getConfigurationValue('target_field', event)
event[target_field_name] = result
yield event
|
the-stack_106_18881
|
"""Remove location is_default
Revision ID: 8521bce91242
Revises: fe73a07da0b4
Create Date: 2019-03-31 16:19:23.467808
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8521bce91242'
down_revision = 'fe73a07da0b4'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('locations', 'is_default', schema='roombooking')
def downgrade():
op.add_column('locations', sa.Column('is_default', sa.Boolean(), nullable=False, server_default='false'),
schema='roombooking')
op.alter_column('locations', 'is_default', server_default=None, schema='roombooking')
op.execute('''
WITH cte AS (
SELECT id FROM roombooking.locations ORDER BY id ASC LIMIT 1
)
UPDATE roombooking.locations loc
SET is_default = true
FROM cte
WHERE loc.id = cte.id
''')
|
the-stack_106_18882
|
'''
from baselines/ppo1/mlp_policy.py and add simple modification
(1) add reuse argument
(2) cache the `stochastic` placeholder
'''
import tensorflow as tf
import gym
import market.baselines.baselines.common.tf_util as U
from market.baselines.baselines.common.mpi_running_mean_std import RunningMeanStd
from market.baselines.baselines.common.distributions import make_pdtype
from market.baselines.baselines.acktr.utils import dense
class MlpPolicy(object):
recurrent = False
def __init__(self, name, reuse=False, *args, **kwargs):
with tf.variable_scope(name):
if reuse:
tf.get_variable_scope().reuse_variables()
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, hid_size, num_hid_layers):
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(dense(last_out, hid_size, "vffc%i" % (i+1), weight_init=U.normc_initializer(1.0)))
self.vpred = dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:, 0]
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(dense(last_out, hid_size, "polfc%i" % (i+1), weight_init=U.normc_initializer(1.0)))
mean = dense(last_out, pdtype.param_shape()[0]//2, "polfinal", U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
# change for BC
stochastic = U.get_placeholder(name="stochastic", dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
self.ac = ac
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob)
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
|
the-stack_106_18884
|
# Author: Tomas Hodan ([email protected])
# Center for Machine Perception, Czech Technical University in Prague
"""Configuration of the BOP Toolkit."""
import os
######## Basic ########
# Folder with the BOP datasets.
if 'BOP_PATH' in os.environ:
datasets_path = os.environ['BOP_PATH']
else:
datasets_path = r'/path/to/bop/datasets'
# Folder with pose results to be evaluated.
results_path = r'/home/seung/Workspace/papers/2021/AUMask/BlenderProc/aumask/output/bop_data'
# Folder for the calculated pose errors and performance scores.
eval_path = r'/path/to/eval/folder'
######## Extended ########
# Folder for outputs (e.g. visualizations).
output_path = r'/home/seung/BOP'
# For offscreen C++ rendering: Path to the build folder of bop_renderer (github.com/thodan/bop_renderer).
bop_renderer_path = r'/path/to/bop_renderer/build'
# Executable of the MeshLab server.
meshlab_server_path = r'/usr/bin/meshlabserver'
|
the-stack_106_18885
|
#!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
CPMel.cmds 脚本模块
"""
from collections import Iterable
from collections import Iterable
import maya.cmds as mc
from ...api import OpenMaya
from ... import core as cmcore
from . import basedata
from . import nodedata
from . import nodetypes
from ...tool import decode
# DELETE #
from ... import ISDEBUG
if ISDEBUG:
reload(basedata)
reload(nodedata)
reload(nodetypes)
# \DELETE #
from .basedata import *
from .nodedata import *
from .nodetypes import *
from .nodedata import newObject
__all__ = ["CPMelToCmds", "cmdsToCPMel", "commandWrap"]
# __ToTuple__ = {OpenMaya.MVector, OpenMaya.MFloatVector, OpenMaya.MPoint, OpenMaya.MFloatPoint, OpenMaya.MFloatArray,
# OpenMaya.MDoubleArray, OpenMaya.MIntArray, OpenMaya.MInt64Array}
__ToTuple__ = (list, tuple)
# __RecursiveToList__ = {list, tuple, OpenMaya.MPointArray, OpenMaya.MFloatPointArray, OpenMaya.MVectorArray,
# OpenMaya.MFloatVector}
# CreateObjectType = nodedata.CreateObjectType()
def CPMelToCmds(val):
u"""
用于将CPMel的对象转化为cmds可以理解的值的函数
:param val: Cmds模块输入参数列表中的元素
:return: 转化完成的对象
"""
if isinstance(val, BaseData):
return val.compile()
if isinstance(val, CPObject):
return val.compile()
# 检查参数是否为需要递归转化为列表的值
for i in __ToTuple__:
if isinstance(val, i):
return tuple((CPMelToCmds(t) for t in val))
return val
def cmdsToCPMel(val):
u"""
将cmds的返回值转化为CPMel使用的对象的函数
:param val: Cmds模块返回参数列表中的元素
:return: 转化完成的对象
"""
if isinstance(val, tuple):
if len(val) == 3:
try:
return basedata.Double3(val)
except Exception:
return val
return val
if isinstance(val, basestring):
try:
return newObject(val)
except Exception:
return val
# return val
if isinstance(val, list):
return [cmdsToCPMel(i) for i in val]
return val
def inCommandWrap(fn):
u"""
命令包裹函数
:param fn:
:return:
"""
def test(*args, **kwargs):
args = tuple((CPMelToCmds(i) for i in args))
kwargs = {i: CPMelToCmds(kwargs[i]) for i in kwargs}
try:
return fn(*args, **kwargs)
except Exception as ex:
raise cmcore.CPMelError(u"Command error >> " + u"\n".join([decode(i) for i in ex.args]))
test.__name__ = fn.__name__
test.__doc__ = fn.__doc__
return test
def runCommandWrap(fn):
u"""
命令返回包裹函数
:param fn:
:return:
"""
def test(*args, **kwargs):
try:
out_args = fn(*args, **kwargs)
except Exception as ex:
raise cmcore.CPMelError(u"Command error >> " + u"\n".join([decode(i) for i in ex.args]))
if isinstance(out_args, Iterable) and (not isinstance(out_args, basestring)):
return type(out_args)((cmdsToCPMel(i) for i in out_args))
return cmdsToCPMel(out_args)
test.__name__ = fn.__name__
test.__doc__ = fn.__doc__
return test
def runUiCommandWrap(fn):
u"""
gui命令返回值包裹函数
:param fn:
:return: fn
"""
def test(*args, **kwargs):
try:
out_args = fn(*args, **kwargs)
except Exception as ex:
raise cmcore.CPMelError(u"Command error >> " + u"\n".join([decode(i) for i in ex.args]))
if (not 'q' in kwargs) and (not 'query' in kwargs) and isinstance(out_args, basestring):
return nodedata.UIObject(out_args)
else:
return out_args
test.__name__ = fn.__name__
test.__doc__ = fn.__doc__
return test
def commandWrap(fn):
u"""
命令包裹函数
:param fn:
:return:
"""
def test(*args, **kwargs):
args = tuple((CPMelToCmds(i) for i in args))
kwargs = {i: CPMelToCmds(kwargs[i]) for i in kwargs}
try:
out_args = fn(*args, **kwargs)
except Exception as ex:
raise cmcore.CPMelError(u"Command error >> " + u"\n".join([decode(i) for i in ex.args]))
if isinstance(out_args, Iterable) and (not isinstance(out_args, basestring)):
return type(out_args)((cmdsToCPMel(i) for i in out_args))
return cmdsToCPMel(out_args)
test.__name__ = fn.__name__
test.__doc__ = fn.__doc__
return test
def uiCommandWrap(fn):
u"""
gui命令包裹函数
:param fn:
:return: fn
"""
def test(*args, **kwargs):
args = tuple((CPMelToCmds(i) for i in args))
kwargs = {i: CPMelToCmds(kwargs[i]) for i in kwargs}
try:
out_args = fn(*args, **kwargs)
except Exception as ex:
raise cmcore.CPMelError(u"Command error >> " + u"\n".join([decode(i) for i in ex.args]))
if (not 'q' in kwargs) and (not 'query' in kwargs) and isinstance(out_args, basestring):
return nodedata.UIObject(out_args)
else:
return out_args
test.__name__ = fn.__name__
test.__doc__ = fn.__doc__
return test
# def getCmdInfoBasic(command):
# typemap = {
# 'string': unicode,
# 'length': float,
# 'float': float,
# 'angle': float,
# 'int': int,
# 'unsignedint': int,
# 'on|off': bool,
# 'script': callable,
# 'name': 'PyNode'
# }
# flags = {}
# shortFlags = {}
# removedFlags = {}
# try:
# lines = cmds.help(command).split('\n')
# except RuntimeError:
# pass
# else:
# synopsis = lines.pop(0)
# # certain commands on certain platforms have an empty first line
# if not synopsis:
# synopsis = lines.pop(0)
# #_logger.debug(synopsis)
# if lines:
# lines.pop(0) # 'Flags'
# #_logger.debug(lines)
#
# for line in lines:
# line = line.replace('(Query Arg Mandatory)', '')
# line = line.replace('(Query Arg Optional)', '')
# tokens = line.split()
#
# try:
# tokens.remove('(multi-use)')
# multiuse = True
# except ValueError:
# multiuse = False
# #_logger.debug(tokens)
# if len(tokens) > 1 and tokens[0].startswith('-'):
#
# args = [typemap.get(x.lower(), util.uncapitalize(x)) for x in tokens[2:]]
# numArgs = len(args)
#
# # lags with no args in mel require a boolean val in python
# if numArgs == 0:
# args = bool
# # numArgs will stay at 0, which is the number of mel arguments.
# # this flag should be renamed to numMelArgs
# #numArgs = 1
# elif numArgs == 1:
# args = args[0]
#
# longname = str(tokens[1][1:])
# shortname = str(tokens[0][1:])
#
# if longname in keyword.kwlist:
# removedFlags[longname] = shortname
# longname = shortname
# elif shortname in keyword.kwlist:
# removedFlags[shortname] = longname
# shortname = longname
# # sometimes the longname is empty, so we'll use the shortname for both
# elif longname == '':
# longname = shortname
#
# flags[longname] = {'longname': longname, 'shortname': shortname, 'args': args, 'numArgs': numArgs, 'docstring': ''}
# if multiuse:
# flags[longname].setdefault('modes', []).append('multiuse')
# shortFlags[shortname] = longname
#
# # except:
# # pass
# #_logger.debug("could not retrieve command info for", command)
# res = {'flags': flags, 'shortFlags': shortFlags, 'description': '', 'example': '', 'type': 'other'}
# if removedFlags:
# res['removedFlags'] = removedFlags
# return res
|
the-stack_106_18886
|
import digitalio
import constants
class Keypad:
def __init__(self, r0, r1, r2, r3, c0, c1, c2, c3):
self.pin_R0 = digitalio.DigitalInOut(r0) # Top Row
self.pin_R1 = digitalio.DigitalInOut(r1)
self.pin_R2 = digitalio.DigitalInOut(r2)
self.pin_R3 = digitalio.DigitalInOut(r3) # Bottom Row
self.pin_C0 = digitalio.DigitalInOut(c0) # Leftmost Column
self.pin_C1 = digitalio.DigitalInOut(c1)
self.pin_C2 = digitalio.DigitalInOut(c2)
self.pin_C3 = digitalio.DigitalInOut(c3) # Rightmost Column
self.rows = [self.pin_R0, self.pin_R1, self.pin_R2, self.pin_R3]
self.cols = [self.pin_C0, self.pin_C1, self.pin_C2, self.pin_C3]
self.rows[0].direction = digitalio.Direction.OUTPUT
self.rows[1].direction = digitalio.Direction.OUTPUT
self.rows[2].direction = digitalio.Direction.OUTPUT
self.rows[3].direction = digitalio.Direction.OUTPUT
self.cols[0].direction = digitalio.Direction.INPUT
self.cols[1].direction = digitalio.Direction.INPUT
self.cols[2].direction = digitalio.Direction.INPUT
self.cols[3].direction = digitalio.Direction.INPUT
self.cols[0].pull = digitalio.Pull.DOWN
self.cols[1].pull = digitalio.Pull.DOWN
self.cols[2].pull = digitalio.Pull.DOWN
self.cols[3].pull = digitalio.Pull.DOWN
def keypad_poll(self):
"""
polls the keypad and returns the button label (1,2,A,B,*,#, etc)
of the button pressed.
"""
# Set each row high and check if a column went high as well
for row in range(len(self.rows)):
self.rows[row].value = True
for col in range(len(self.cols)):
if self.cols[col].value:
self.rows[row].value = False
# print("Button: ", row, " ", col)
return constants.KEY_VALUES[row][col]
self.rows[row].value = False
# No buttons were pressed
return None
|
the-stack_106_18888
|
from typing import Callable, List, Tuple
from palett import Preset
from palett.presets import FRESH, PLANET
from texting import COLF, RTSP
from xbrief.deco.deco_entries.deco_entries import deco_entries
from texting.enum.brackets import BRC
def deco_dict(
entries: dict,
key_read: Callable = None,
read: Callable = None,
head: int = None,
tail: int = None,
presets: Tuple[Preset] = (FRESH, PLANET),
effects: List[str] = None,
delim: str = COLF,
bracket: int = BRC,
inner_bracket: int = None,
ansi: bool = False,
dash: str = RTSP
):
return deco_entries(list(entries.items()),
key_read,
read,
head,
tail,
presets,
effects,
delim,
bracket,
inner_bracket,
ansi,
dash)
|
the-stack_106_18890
|
from copy import deepcopy
import time
from typing import List, Union
from detectron2.config import configurable
from detectron2.data import transforms as T
from detectron2.data.common import MapDataset
from detectron2.modeling import build_model
from fvcore.common.checkpoint import Checkpointer
from hydra import initialize, compose
import matplotlib.pyplot as plt
import numpy as np
from pymongo import MongoClient
import torch
from torch.utils.data import Dataset, DataLoader
from kluster import Kluster
from panoramator import Projection, Panoramator, mongo_to_shards
from tridet.data.augmentations import build_augmentation
from tridet.evaluators.kitti_3d_evaluator import convert_3d_box_to_kitti
from tridet.structures.pose import Pose
from tridet.utils.geometry import project_points3d
from tridet.utils.setup import setup
from tridet.utils.tasks import TaskManager
# Panoramator structures
class PanoramaDataset(Dataset):
def __init__(self, mongo_args, segments, keyword, projections):
kluster = Kluster(session=MongoClient(*mongo_args))
segments = kluster.fetch_data(
"segments",
{"_id": {"$in": segments}, "street_view": {"$elemMatch": {"available": True, keyword: {"$exists": False}}}}
)
self.kluster = mongo_args
lines = [
(segment["_id"], i, line["panoramas"])
for segment in segments for i, line in enumerate(segment["street_view"])
if "available" in line and keyword not in line
]
self.panoramas = [(sid, lidx, pidx, panorama)
for sid, lidx, panoramas in lines for pidx, panorama in enumerate(panoramas)]
self.keyword = keyword
self.projections = projections
def __len__(self):
return len(self.panoramas)
def __getitem__(self, idx):
if type(self.kluster) == tuple:
self.kluster = Kluster(session=MongoClient(*self.kluster))
segment_id, line_idx, panorama_idx, panorama_id = self.panoramas[idx]
panorama = self.kluster.kluster["street_view"].find_one({"_id": panorama_id})
if self.keyword in panorama: # Escape if this panorama is already predicted (but line was not marked)
return segment_id, line_idx, panorama_id, None
shards = mongo_to_shards(panorama["image_shards"])
panoramator = Panoramator(shards=shards, atomic_resolution=panorama["resolution"][0] // 16)
panoramator.build_state()
projections = [(projection_meta, panoramator.get_projection(projection_meta))
for projection_meta in self.projections]
return segment_id, line_idx, panorama_id, projections
def inference(kluster, predictor, data_loader, keyword, upload=True):
current_line = None
line_count = 0
itime = time.time()
for i, (segment_id, line_idx, panorama_id, projections) in enumerate(data_loader):
if current_line is not None and current_line != (segment_id, line_idx):
sid, lidx = current_line
if upload:
kluster.kluster["segments"].update_one({"_id": sid}, {"$set": {f"street_view.{lidx}.{keyword}": True}})
line_count += 1
print(f"Finished line {line_count}! (Segment:{sid};Index:{lidx})")
current_line = (segment_id, line_idx)
if projections is not None: # If the panorama is already predicted, we skip this block
result = []
for projection_meta, projection in projections:
predictions = predictor(projection)
result.append({"projection": projection_meta.get_dict(), **predictions})
if upload:
kluster.kluster["street_view"].update_one({"_id": panorama_id}, {"$set": {keyword: result}})
print(f"Predicted panorama {i + 1}/{len(data_loader)} "
f"(Time elapsed: {time.time() - itime:.2f}s) ({panorama_id})")
itime = time.time()
if current_line is not None:
sid, lidx = current_line
if upload:
kluster.kluster["segments"].update_one({"_id": sid}, {"$set": {f"street_view.{lidx}.{keyword}": True}})
line_count += 1
print(f"Finished line {line_count}! (Segment:{sid};Index:{lidx})")
# DD3D structures
class ParkinkDatasetMapper:
@configurable
def __init__(self, is_train: bool, task_manager, augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str, intrinsics: list, extrinsics: dict):
self.is_train = is_train
self.task_manager = task_manager
self.augmentations = T.AugmentationList(augmentations)
print("Augmentations used: " + str(augmentations))
self.image_format = image_format
self.intrinsics = intrinsics
self.extrinsics = extrinsics
@classmethod
def from_config(cls, cfg, is_train, intrinsics, extrinsics):
augs = build_augmentation(cfg, is_train)
tm = TaskManager(cfg)
return {"is_train": is_train, "task_manager": tm, "augmentations": augs, "image_format": cfg.INPUT.FORMAT,
"intrinsics": intrinsics, "extrinsics": extrinsics}
def __call__(self, parkink_data):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
segment_id, line_idx, panorama_id, projections = parkink_data
if projections is None:
return segment_id, line_idx, panorama_id, None
kitti_projections = []
for projection_meta, image in projections:
kitti = {"width": image.shape[1], "height": image.shape[0],
"intrinsics": self.intrinsics, "extrinsics": self.extrinsics}
if type(image) == torch.Tensor: # When using a DataLoader, Tensors instead of arrays will be given
image = image.numpy()
image = image[:, :, ::-1] # VERY IMPORTANT! CONVERT IMAGE FROM RGB (PIL format) TO BGR (model format)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
kitti["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
intrinsics = np.reshape(kitti["intrinsics"], (3, 3)).astype(np.float32)
intrinsics = transforms.apply_intrinsics(intrinsics)
kitti["intrinsics"] = torch.as_tensor(intrinsics)
kitti["inv_intrinsics"] = torch.as_tensor(np.linalg.inv(intrinsics))
extrinsics = Pose(wxyz=np.float32(kitti["extrinsics"]["wxyz"]),
tvec=np.float32(kitti["extrinsics"]["tvec"]))
kitti["extrinsics"] = extrinsics
kitti_projections.append((projection_meta, kitti))
return segment_id, line_idx, panorama_id, kitti_projections
def meter_to_angle(x, y, z):
# Convert meters coordinates to horizontal and vertical angles.
# We negate the vertical and so that up is positive and down is negative.
return np.array([np.arctan2(x, z), -np.arctan2(y, z)]) / np.pi * 180
def process_scene(model, input_dict, plot=False, log=False):
CLASS_MAPPER = ("Car", "Pedestrian", "Cyclist", "Van", "Truck")
THRESHOLD = 0.5
with torch.no_grad():
raw_output = model([input_dict])[0]
instances = raw_output["instances"].get_fields()
# We discard
# - instances["scores"]: scores_3d gives a more informed score, taking into account the 3d box
# - instances["locations"]: this is ~= object center, but the center given by 3d box is more accurate
# - instances["pred_boxes"]: 2d boxes, a priori useless if we have 3d ones (is this an intermediate step?)
# - instances["fpn_levels"]: This is related to at which level of the net the object is detected... useless
zipped = zip(instances["scores_3d"], instances["pred_classes"], instances["pred_boxes3d"])
subd = {"pixels": [], "meters": [], "degrees": []}
prediction = {"score": [], "kitti_class": [], "size": [], "orientation": [], "center": deepcopy(subd),
"front_upper_left": deepcopy(subd), "front_upper_right": deepcopy(subd),
"front_lower_right": deepcopy(subd), "front_lower_left": deepcopy(subd),
"back_upper_left": deepcopy(subd), "back_upper_right": deepcopy(subd),
"back_lower_right": deepcopy(subd), "back_lower_left": deepcopy(subd), "floor": deepcopy(subd)}
for score_3d, kitti_class, box_3d in zipped:
if score_3d < THRESHOLD: # If the model is not confident enough, we skip the prediction
continue
prediction["score"].append(round(score_3d.item(), 3))
if kitti_class not in (0, 3, 4): # If the detected object is not a car, van or truck, we skip it
continue
kitti_class = CLASS_MAPPER[kitti_class]
prediction["kitti_class"].append(kitti_class)
center_pix = box_3d.proj_ctr[0].cpu().numpy() # width (x), height (y)
center_met = box_3d.tvec[0].cpu().numpy() # horizontal (left->right), vertical (up->down), depth (back->front)
center_ang = meter_to_angle(*center_met) # horizontal (left->right, degrees), vertical (down->up, degrees)
prediction["center"]["pixels"].append([round(e, 1) for e in center_pix.tolist()])
prediction["center"]["meters"].append([round(e, 2) for e in center_met.tolist()])
prediction["center"]["degrees"].append([round(e, 2) for e in center_ang.tolist()])
size = box_3d.size[0].cpu().numpy() # width, length, height (meters)
prediction["size"].append([round(e, 2) for e in size.tolist()])
floor_met = center_met + np.array([0, size[2]/2, 0])
floor_ang = meter_to_angle(*floor_met)
floor_pix = project_points3d(np.array([floor_met]), input_dict["intrinsics"].numpy())[0]
prediction["floor"]["pixels"].append([round(e, 1) for e in floor_pix.tolist()])
prediction["floor"]["meters"].append([round(e, 2) for e in floor_met.tolist()])
prediction["floor"]["degrees"].append([round(e, 2) for e in floor_ang.tolist()])
corners_met = box_3d.corners[0].cpu().numpy()
corners_ang = np.array([meter_to_angle(*corner) for corner in corners_met])
corners_pix = project_points3d(corners_met, input_dict["intrinsics"].numpy())
corners_pix = [pix * (-1 if met[2] < 0 else 1) for met, pix in zip(corners_met, corners_pix)]
keys = ["front_upper_left", "front_upper_right", "front_lower_right", "front_lower_left",
"back_upper_left", "back_upper_right", "back_lower_right", "back_lower_left"]
for key, pix, met, ang in zip(keys, corners_pix, corners_met, corners_ang):
prediction[key]["pixels"].append([round(e, 1) for e in pix.tolist()])
prediction[key]["meters"].append([round(e, 2) for e in met.tolist()])
prediction[key]["degrees"].append([round(e, 2) for e in ang.tolist()])
w, l, h, x, y, z, roty, alpha = convert_3d_box_to_kitti(box_3d)
orientation = - alpha / np.pi * 180 # The alpha in angles.png, clockwise is positive (180 to -180 range) (90 means we see car back) (-90 means we see car front)
prediction["orientation"].append(round(orientation, 2))
if log:
print(f"Confidence: {score_3d}")
print(f"Class: {kitti_class}")
print(f"Center (pixels): {center_pix}")
print(f"Center (meters): {center_met}")
print(f"Center (degrees): {center_ang}")
print(f"Size (meters): {size}")
print(f"Floor (pixels): {floor_pix}")
print(f"Floor (meters): {floor_met}")
print(f"Floor (degrees): {floor_ang}")
print(f"Corners (pixels): {corners_pix}")
print(f"Corners (meters): {corners_met}")
print(f"Corners (degrees): {corners_ang}")
print(f"Car Orientation (degrees): {orientation}")
if plot:
for a, b, c, d in [(0, 1, 2, 3), (4, 5, 6, 7), (0, 4, 7, 3), (1, 5, 6, 2), (0, 2, 1, 3)]:
coord = [corners_pix[a], corners_pix[b], corners_pix[c], corners_pix[d], corners_pix[a]]
xs, ys = zip(*coord)
plt.plot(xs, ys, color='r')
if plot:
img = input_dict["image"].cpu().numpy().transpose(1, 2, 0)[:, :, ::-1]
plt.rcParams['figure.figsize'] = [10, 5]
plt.imshow(img)
plt.show()
return prediction
# Constants
CFG_PATH = "configs/"
CFG_NAME = "kitti99_defaults"
CHECKPOINT = "models/kitti_v99.pth"
MONGO_SESSION_ARGS = ("localhost", 27017)
PREDICTION_KEYWORD = "kitti_cars"
TIMEOUT = 180
EXTRINSICS = {"wxyz": [1.0, 0.0, 0.0, 0.0], "tvec": [0.0, 0.0, 0.0]}
# !!!!! stackoverflow.com/questions/39992968/how-to-calculate-field-of-view-of-the-camera-from-camera-intrinsic-matrix
INTRINSICS = [728.5, 0.0, 640.0, 0.0, 728.5, 112.0, 0.0, 0.0, 1.0]
PROJECTIONS = [Projection(center_horizontal=0, center_vertical=-1, fov_horizontal=82.6, fov_vertical=40.95,
full_resolution_x=1280, full_resolution_y=544,
offset_x=0, offset_y=544-384, resolution_x=1280, resolution_y=384),
Projection(center_horizontal=180, center_vertical=-1, fov_horizontal=82.6, fov_vertical=40.95,
full_resolution_x=1280, full_resolution_y=544,
offset_x=0, offset_y=544-384, resolution_x=1280, resolution_y=384)] # 0.792 1685
MIN_LAT, MAX_LAT = 41.35, 41.5
MIN_LON, MAX_LON = 2.1, 2.3
PLOT = False
LOG = False
UPLOAD = True
# Main Execution
if __name__ == "__main__":
# StreetView initializations
main_kluster = Kluster(session=MongoClient(*MONGO_SESSION_ARGS))
bounding_polygon = [(MIN_LAT, MIN_LON), (MIN_LAT, MAX_LON), (MAX_LAT, MAX_LON),
(MAX_LAT, MIN_LON), (MIN_LAT, MIN_LON)]
bounding_polygon = {"type": "Polygon", "coordinates": [[[lon, lat] for lat, lon in bounding_polygon]]}
# DD3D initializations
with initialize(config_path=CFG_PATH):
cfg = compose(config_name=CFG_NAME)
setup(cfg)
dd3d_model = build_model(cfg).eval()
Checkpointer(dd3d_model).load(CHECKPOINT)
dd3d_predictor = lambda image: process_scene(dd3d_model, image, plot=PLOT, log=LOG)
# Load segment_ids of interest
ways = main_kluster.fetch_data("ways", {"path": {"$geoIntersects": {"$geometry": bounding_polygon}}})
segment_ids = [seg_id for way in ways for seg_id in way["segments"].values()]
# Do the inference, and when it finishes keep looking for new panoramas
while True:
dataset = PanoramaDataset(MONGO_SESSION_ARGS, segment_ids, PREDICTION_KEYWORD, PROJECTIONS)
mapper = ParkinkDatasetMapper(cfg, is_train=False, intrinsics=INTRINSICS, extrinsics=EXTRINSICS)
dataset = MapDataset(dataset, mapper)
if len(dataset):
print(f"LAUNCHING INFERENCE ON {len(dataset)} PANORAMAS")
loader = DataLoader(dataset, batch_size=None, num_workers=4)
inference(main_kluster, dd3d_predictor, loader, PREDICTION_KEYWORD, upload=UPLOAD)
else:
print(f"NO PANORAMAS FOUND! WAITING {TIMEOUT} seconds...")
time.sleep(180)
|
the-stack_106_18892
|
from random import randint
class NListItem:
@staticmethod
def pick_from_list(series, realm):
series2 = []
length = len(series)
for each in range(realm):
position = randint(0, length-1)
series.append(series[position])
return series2
|
the-stack_106_18893
|
#
"""
"""
# end_pymotw_header
import xmlrpc.client
import pickle
import pprint
class MyObj:
def __init__(self, a, b):
self.a = a
self.b = b
def __repr__(self):
return "MyObj({!r}, {!r})".format(self.a, self.b)
server = xmlrpc.client.ServerProxy("http://localhost:9000")
o = MyObj(1, "b goes here")
print("Local:", id(o))
print(o)
print("\nAs object:")
pprint.pprint(server.show_type(o))
p = pickle.dumps(o)
b = xmlrpc.client.Binary(p)
r = server.send_back_binary(b)
o2 = pickle.loads(r.data)
print("\nFrom pickle:", id(o2))
pprint.pprint(o2)
|
the-stack_106_18894
|
# coding: utf-8
import pprint
import re
import six
class BatchCreateVolumeTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'volume_id': 'str',
'body': 'BatchCreateVolumeTagsRequestBody'
}
attribute_map = {
'volume_id': 'volume_id',
'body': 'body'
}
def __init__(self, volume_id=None, body=None):
"""BatchCreateVolumeTagsRequest - a model defined in huaweicloud sdk"""
self._volume_id = None
self._body = None
self.discriminator = None
self.volume_id = volume_id
if body is not None:
self.body = body
@property
def volume_id(self):
"""Gets the volume_id of this BatchCreateVolumeTagsRequest.
:return: The volume_id of this BatchCreateVolumeTagsRequest.
:rtype: str
"""
return self._volume_id
@volume_id.setter
def volume_id(self, volume_id):
"""Sets the volume_id of this BatchCreateVolumeTagsRequest.
:param volume_id: The volume_id of this BatchCreateVolumeTagsRequest.
:type: str
"""
self._volume_id = volume_id
@property
def body(self):
"""Gets the body of this BatchCreateVolumeTagsRequest.
:return: The body of this BatchCreateVolumeTagsRequest.
:rtype: BatchCreateVolumeTagsRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchCreateVolumeTagsRequest.
:param body: The body of this BatchCreateVolumeTagsRequest.
:type: BatchCreateVolumeTagsRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchCreateVolumeTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_18895
|
import logging
from typing import List, Dict, Text, Optional, Any, Set, TYPE_CHECKING
import json
from rasa.core.events import FormValidation
from rasa.core.featurizers.tracker_featurizers import TrackerFeaturizer
from rasa.core.domain import Domain, InvalidDomain, State
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.policies.policy import SupportedData
from rasa.core.trackers import (
DialogueStateTracker,
get_active_loop_name,
is_prev_action_listen_in_state,
)
from rasa.core.training.generator import TrackerWithCachedStates
from rasa.core.constants import (
FORM_POLICY_PRIORITY,
USER_INTENT_RESTART,
USER_INTENT_BACK,
USER_INTENT_SESSION_START,
SHOULD_NOT_BE_SET,
PREVIOUS_ACTION,
LOOP_REJECTED,
)
from rasa.core.actions.action import (
ACTION_LISTEN_NAME,
ACTION_RESTART_NAME,
ACTION_BACK_NAME,
ACTION_SESSION_START_NAME,
RULE_SNIPPET_ACTION_NAME,
ACTION_DEFAULT_FALLBACK_NAME,
)
from rasa.nlu.constants import INTENT_NAME_KEY
from rasa.shared.nlu.constants import ACTION_NAME
if TYPE_CHECKING:
from rasa.core.policies.ensemble import PolicyEnsemble # pytype: disable=pyi-error
logger = logging.getLogger(__name__)
# These are Rasa Open Source default actions and overrule everything at any time.
DEFAULT_ACTION_MAPPINGS = {
USER_INTENT_RESTART: ACTION_RESTART_NAME,
USER_INTENT_BACK: ACTION_BACK_NAME,
USER_INTENT_SESSION_START: ACTION_SESSION_START_NAME,
}
RULES = "rules"
RULES_FOR_LOOP_UNHAPPY_PATH = "rules_for_loop_unhappy_path"
DO_NOT_VALIDATE_LOOP = "do_not_validate_loop"
DO_NOT_PREDICT_LOOP_ACTION = "do_not_predict_loop_action"
class RulePolicy(MemoizationPolicy):
"""Policy which handles all the rules"""
ENABLE_FEATURE_STRING_COMPRESSION = False
@staticmethod
def supported_data() -> SupportedData:
"""The type of data supported by this policy.
Returns:
The data type supported by this policy (rule data).
"""
return SupportedData.ML_AND_RULE_DATA
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = FORM_POLICY_PRIORITY,
lookup: Optional[Dict] = None,
core_fallback_threshold: float = 0.3,
core_fallback_action_name: Text = ACTION_DEFAULT_FALLBACK_NAME,
enable_fallback_prediction: bool = True,
) -> None:
"""Create a `RulePolicy` object.
Args:
featurizer: `Featurizer` which is used to convert conversation states to
features.
priority: Priority of the policy which is used if multiple policies predict
actions with the same confidence.
lookup: Lookup table which is used to pick matching rules for a conversation
state.
core_fallback_threshold: Confidence of the prediction if no rule matched
and de-facto threshold for a core fallback.
core_fallback_action_name: Name of the action which should be predicted
if no rule matched.
enable_fallback_prediction: If `True` `core_fallback_action_name` is
predicted in case no rule matched.
"""
self._core_fallback_threshold = core_fallback_threshold
self._fallback_action_name = core_fallback_action_name
self._enable_fallback_prediction = enable_fallback_prediction
# max history is set to `None` in order to capture any lengths of rule stories
super().__init__(
featurizer=featurizer, priority=priority, max_history=None, lookup=lookup
)
@classmethod
def validate_against_domain(
cls, ensemble: Optional["PolicyEnsemble"], domain: Optional[Domain]
) -> None:
if ensemble is None:
return
rule_policy = next(
(p for p in ensemble.policies if isinstance(p, RulePolicy)), None
)
if not rule_policy or not rule_policy._enable_fallback_prediction:
return
if (
domain is None
or rule_policy._fallback_action_name not in domain.action_names
):
raise InvalidDomain(
f"The fallback action '{rule_policy._fallback_action_name}' which was "
f"configured for the {RulePolicy.__name__} must be present in the "
f"domain."
)
@staticmethod
def _is_rule_snippet_state(state: State) -> bool:
prev_action_name = state.get(PREVIOUS_ACTION, {}).get(ACTION_NAME)
return prev_action_name == RULE_SNIPPET_ACTION_NAME
def _create_feature_key(self, states: List[State]) -> Optional[Text]:
new_states = []
for state in reversed(states):
if self._is_rule_snippet_state(state):
# remove all states before RULE_SNIPPET_ACTION_NAME
break
new_states.insert(0, state)
if not new_states:
return
# we sort keys to make sure that the same states
# represented as dictionaries have the same json strings
return json.dumps(new_states, sort_keys=True)
@staticmethod
def _states_for_unhappy_loop_predictions(states: List[State]) -> List[State]:
"""Modifies the states to create feature keys for loop unhappy path conditions.
Args:
states: a representation of a tracker
as a list of dictionaries containing features
Returns:
modified states
"""
# leave only last 2 dialogue turns to
# - capture previous meaningful action before action_listen
# - ignore previous intent
if len(states) == 1 or not states[-2].get(PREVIOUS_ACTION):
return [states[-1]]
else:
return [{PREVIOUS_ACTION: states[-2][PREVIOUS_ACTION]}, states[-1]]
@staticmethod
def _remove_rule_snippet_predictions(lookup: Dict[Text, Text]) -> Dict[Text, Text]:
# Delete rules if it would predict the RULE_SNIPPET_ACTION_NAME action
return {
feature_key: action
for feature_key, action in lookup.items()
if action != RULE_SNIPPET_ACTION_NAME
}
def _create_loop_unhappy_lookup_from_states(
self,
trackers_as_states: List[List[State]],
trackers_as_actions: List[List[Text]],
) -> Dict[Text, Text]:
"""Creates lookup dictionary from the tracker represented as states.
Args:
trackers_as_states: representation of the trackers as a list of states
trackers_as_actions: representation of the trackers as a list of actions
Returns:
lookup dictionary
"""
lookup = {}
for states, actions in zip(trackers_as_states, trackers_as_actions):
action = actions[0]
active_loop = get_active_loop_name(states[-1])
# even if there are two identical feature keys
# their loop will be the same
if not active_loop:
continue
states = self._states_for_unhappy_loop_predictions(states)
feature_key = self._create_feature_key(states)
if not feature_key:
continue
# Since rule snippets and stories inside the loop contain
# only unhappy paths, notify the loop that
# it was predicted after an answer to a different question and
# therefore it should not validate user input
if (
# loop is predicted after action_listen in unhappy path,
# therefore no validation is needed
is_prev_action_listen_in_state(states[-1])
and action == active_loop
):
lookup[feature_key] = DO_NOT_VALIDATE_LOOP
elif (
# some action other than action_listen and active_loop
# is predicted in unhappy path,
# therefore active_loop shouldn't be predicted by the rule
not is_prev_action_listen_in_state(states[-1])
and action not in {ACTION_LISTEN_NAME, active_loop}
):
lookup[feature_key] = DO_NOT_PREDICT_LOOP_ACTION
return lookup
def train(
self,
training_trackers: List[TrackerWithCachedStates],
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> None:
# only consider original trackers (no augmented ones)
training_trackers = [
t
for t in training_trackers
if not hasattr(t, "is_augmented") or not t.is_augmented
]
# only use trackers from rule-based training data
rule_trackers = [t for t in training_trackers if t.is_rule_tracker]
(
rule_trackers_as_states,
rule_trackers_as_actions,
) = self.featurizer.training_states_and_actions(rule_trackers, domain)
rules_lookup = self._create_lookup_from_states(
rule_trackers_as_states, rule_trackers_as_actions
)
self.lookup[RULES] = self._remove_rule_snippet_predictions(rules_lookup)
story_trackers = [t for t in training_trackers if not t.is_rule_tracker]
(
story_trackers_as_states,
story_trackers_as_actions,
) = self.featurizer.training_states_and_actions(story_trackers, domain)
# use all trackers to find negative rules in unhappy paths
trackers_as_states = rule_trackers_as_states + story_trackers_as_states
trackers_as_actions = rule_trackers_as_actions + story_trackers_as_actions
# negative rules are not anti-rules, they are auxiliary to actual rules
self.lookup[
RULES_FOR_LOOP_UNHAPPY_PATH
] = self._create_loop_unhappy_lookup_from_states(
trackers_as_states, trackers_as_actions
)
# TODO use story_trackers and rule_trackers
# to check that stories don't contradict rules
logger.debug(f"Memorized '{len(self.lookup[RULES])}' unique rules.")
@staticmethod
def _does_rule_match_state(rule_state: State, conversation_state: State) -> bool:
for state_type, rule_sub_state in rule_state.items():
conversation_sub_state = conversation_state.get(state_type, {})
for key, value in rule_sub_state.items():
if isinstance(value, list):
# json dumps and loads tuples as lists,
# so we need to convert them back
value = tuple(value)
if (
# value should be set, therefore
# check whether it is the same as in the state
value
and value != SHOULD_NOT_BE_SET
and conversation_sub_state.get(key) != value
) or (
# value shouldn't be set, therefore
# it should be None or non existent in the state
value == SHOULD_NOT_BE_SET
and conversation_sub_state.get(key)
):
return False
return True
@staticmethod
def _rule_key_to_state(rule_key: Text) -> List[State]:
return json.loads(rule_key)
def _is_rule_applicable(
self, rule_key: Text, turn_index: int, conversation_state: State
) -> bool:
"""Check if rule is satisfied with current state at turn."""
# turn_index goes back in time
reversed_rule_states = list(reversed(self._rule_key_to_state(rule_key)))
return bool(
# rule is shorter than current turn index
turn_index >= len(reversed_rule_states)
# current rule and state turns are empty
or (not reversed_rule_states[turn_index] and not conversation_state)
# check that current rule turn features are present in current state turn
or (
reversed_rule_states[turn_index]
and conversation_state
and self._does_rule_match_state(
reversed_rule_states[turn_index], conversation_state
)
)
)
def _get_possible_keys(
self, lookup: Dict[Text, Text], states: List[State]
) -> Set[Text]:
possible_keys = set(lookup.keys())
for i, state in enumerate(reversed(states)):
# find rule keys that correspond to current state
possible_keys = set(
filter(
lambda _key: self._is_rule_applicable(_key, i, state), possible_keys
)
)
return possible_keys
@staticmethod
def _find_action_from_default_actions(
tracker: DialogueStateTracker,
) -> Optional[Text]:
if (
not tracker.latest_action_name == ACTION_LISTEN_NAME
or not tracker.latest_message
):
return None
default_action_name = DEFAULT_ACTION_MAPPINGS.get(
tracker.latest_message.intent.get(INTENT_NAME_KEY)
)
if default_action_name:
logger.debug(f"Predicted default action '{default_action_name}'.")
return default_action_name
@staticmethod
def _find_action_from_loop_happy_path(
tracker: DialogueStateTracker,
) -> Optional[Text]:
active_loop_name = tracker.active_loop_name
active_loop_rejected = tracker.active_loop.get(LOOP_REJECTED)
should_predict_loop = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action.get(ACTION_NAME) != active_loop_name
)
should_predict_listen = (
active_loop_name
and not active_loop_rejected
and tracker.latest_action_name == active_loop_name
)
if should_predict_loop:
logger.debug(f"Predicted loop '{active_loop_name}'.")
return active_loop_name
# predict `action_listen` if loop action was run successfully
if should_predict_listen:
logger.debug(
f"Predicted '{ACTION_LISTEN_NAME}' after loop '{active_loop_name}'."
)
return ACTION_LISTEN_NAME
def _find_action_from_rules(
self, tracker: DialogueStateTracker, domain: Domain
) -> Optional[Text]:
tracker_as_states = self.featurizer.prediction_states([tracker], domain)
states = tracker_as_states[0]
logger.debug(f"Current tracker state: {states}")
rule_keys = self._get_possible_keys(self.lookup[RULES], states)
predicted_action_name = None
best_rule_key = ""
if rule_keys:
# if there are several rules,
# it should mean that some rule is a subset of another rule
# therefore we pick a rule of maximum length
best_rule_key = max(rule_keys, key=len)
predicted_action_name = self.lookup[RULES].get(best_rule_key)
active_loop_name = tracker.active_loop_name
if active_loop_name:
# find rules for unhappy path of the loop
loop_unhappy_keys = self._get_possible_keys(
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH], states
)
# there could be several unhappy path conditions
unhappy_path_conditions = [
self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH].get(key)
for key in loop_unhappy_keys
]
# Check if a rule that predicted action_listen
# was applied inside the loop.
# Rules might not explicitly switch back to the loop.
# Hence, we have to take care of that.
predicted_listen_from_general_rule = (
predicted_action_name == ACTION_LISTEN_NAME
and not get_active_loop_name(self._rule_key_to_state(best_rule_key)[-1])
)
if predicted_listen_from_general_rule:
if DO_NOT_PREDICT_LOOP_ACTION not in unhappy_path_conditions:
# negative rules don't contain a key that corresponds to
# the fact that active_loop shouldn't be predicted
logger.debug(
f"Predicted loop '{active_loop_name}' by overwriting "
f"'{ACTION_LISTEN_NAME}' predicted by general rule."
)
return active_loop_name
# do not predict anything
predicted_action_name = None
if DO_NOT_VALIDATE_LOOP in unhappy_path_conditions:
logger.debug("Added `FormValidation(False)` event.")
tracker.update(FormValidation(False))
if predicted_action_name is not None:
logger.debug(
f"There is a rule for the next action '{predicted_action_name}'."
)
else:
logger.debug("There is no applicable rule.")
return predicted_action_name
def predict_action_probabilities(
self,
tracker: DialogueStateTracker,
domain: Domain,
interpreter: NaturalLanguageInterpreter,
**kwargs: Any,
) -> List[float]:
result = self._default_predictions(domain)
# Rasa Open Source default actions overrule anything. If users want to achieve
# the same, they need to write a rule or make sure that their loop rejects
# accordingly.
default_action_name = self._find_action_from_default_actions(tracker)
if default_action_name:
return self._prediction_result(default_action_name, tracker, domain)
# A loop has priority over any other rule.
# The rules or any other prediction will be applied only if a loop was rejected.
# If we are in a loop, and the loop didn't run previously or rejected, we can
# simply force predict the loop.
loop_happy_path_action_name = self._find_action_from_loop_happy_path(tracker)
if loop_happy_path_action_name:
return self._prediction_result(loop_happy_path_action_name, tracker, domain)
rules_action_name = self._find_action_from_rules(tracker, domain)
if rules_action_name:
return self._prediction_result(rules_action_name, tracker, domain)
return result
def _default_predictions(self, domain: Domain) -> List[float]:
result = super()._default_predictions(domain)
if self._enable_fallback_prediction:
result[
domain.index_for_action(self._fallback_action_name)
] = self._core_fallback_threshold
return result
|
the-stack_106_18896
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Graphviz wrapper to visualize SEM models."""
from .model import Model
import logging
try:
import graphviz
__GRAPHVIZ = True
except ModuleNotFoundError:
logging.info("No graphviz package found, visualization method is "
"unavailable")
__GRAPHVIZ = False
def semplot(mod, filename: str, inspection=None, plot_covs=False,
plot_exos=True, images=None, engine='dot', latshape='circle',
plot_ests=True, std_ests=False, show=False):
"""
Draw a SEM diagram.
Parameters
----------
mod : Model | str
Model instance.
filename : str
Name of file where to plot is saved.
inspection : pd.DataFrame, optional
Parameter estimates as returned by Model.inspect(). The default is
None.
plot_covs : bool, optional
If True, covariances are also drawn. The default is False.
plot_exos: bool, optional
If False, exogenous variables are not plotted. It might be useful,
for example, in GWAS setting, where a number of exogenous variables,
i.e. genetic markers, is oblivious. Has effect only with ModelMeans or
ModelEffects. The default is True.
images : dict, optional
Node labels can be replaced with images. It will be the case if a map
variable_name->path_to_image is provided. The default is None.
engine : str, optional
Graphviz engine name to use. The default is 'dot'.
latshape : str, optional
Graphviz-compaitable shape for latent variables. The default is
'circle'.
plot_ests : bool, optional
If True, then estimates are also plotted on the graph. The default is
True.
std_ests : bool, optional
If True and plot_ests is True, then standardized values are plotted
instead. The default is False.
show : bool, optional
If True, the
Returns
-------
Graphviz graph.
"""
if not __GRAPHVIZ:
raise ModuleNotFoundError("No graphviz module is installed.")
if type(mod) is str:
mod = Model(mod)
if not hasattr(mod, 'last_result'):
plot_ests = False
if inspection is None:
inspection = mod.inspect(std_est=std_ests)
if images is None:
images = dict()
if std_ests:
inspection['Estimate'] = inspection['Est. Std']
t = filename.split('.')
filename, ext = '.'.join(t[:-1]), t[-1]
g = graphviz.Digraph('G', format=ext, engine=engine)
g.attr(overlap='scale', splines='true')
g.attr('edge', fontsize='12')
g.attr('node', shape=latshape, fillcolor='#cae6df', style='filled')
for lat in mod.vars['latent']:
if lat in images:
g.node(lat, label='', image=images[lat])
else:
g.node(lat, label=lat)
g.attr('node', shape='box', style='')
for obs in mod.vars['observed']:
if obs in images:
g.node(obs, label='', image=images[obs])
else:
g.node(obs, label=obs)
regr = inspection[inspection['op'] == '~']
all_vars = mod.vars['all']
try:
exo_vars = mod.vars['observed_exogenous']
except KeyError:
exo_vars = set()
for _, row in regr.iterrows():
lval, rval, est = row['lval'], row['rval'], row['Estimate']
if (rval not in all_vars) or (~plot_exos and rval in exo_vars) or \
(rval == '1'):
continue
if plot_ests:
pval = row['p-value']
label = '{:.3f}'.format(float(est))
if pval != '-':
label += r'\np-val: {:.2f}'.format(float(pval))
else:
label = str()
g.edge(rval, lval, label=label)
if plot_covs:
covs = inspection[inspection['op'] == '~~']
for _, row in covs.iterrows():
lval, rval, est = row['lval'], row['rval'], row['Estimate']
if lval == rval:
continue
if plot_ests:
pval = row['p-value']
label = '{:.3f}'.format(float(est))
if pval != '-':
label += r'\np-val: {:.2f}'.format(float(pval))
else:
label = str()
g.edge(rval, lval, label=label, dir='both', style='dashed')
g.render(filename, view=show)
return g
|
the-stack_106_18898
|
# -*- coding: utf-8 -*-
"""Implements a StorageFile output module."""
from plaso.lib import event
from plaso.lib import storage
from plaso.lib import timelib
from plaso.output import interface
from plaso.output import manager
class PlasoStorageOutputModule(interface.OutputModule):
"""Dumps event objects to a plaso storage file."""
NAME = u'pstorage'
DESCRIPTION = u'Dumps event objects to a plaso storage file.'
def __init__(self, output_mediator):
"""Initializes the output module object.
Args:
output_mediator: The output mediator object (instance of OutputMediator).
"""
super(PlasoStorageOutputModule, self).__init__(output_mediator)
self._file_object = None
self._storage = None
def Close(self):
"""Closes the plaso storage file."""
self._storage.Close()
def Open(self):
"""Opens the plaso storage file."""
pre_obj = event.PreprocessObject()
pre_obj.collection_information = {
u'time_of_run': timelib.Timestamp.GetNow()}
filter_expression = self._output_mediator.filter_expression
if filter_expression:
pre_obj.collection_information[u'filter'] = filter_expression
storage_file_path = self._output_mediator.storage_file_path
if storage_file_path:
pre_obj.collection_information[u'file_processed'] = storage_file_path
self._storage = storage.StorageFile(self._file_object, pre_obj=pre_obj)
def SetFilehandle(self, file_path=None, file_object=None):
"""Sets the filehandle.
Args:
file_path: the full path to the output file.
file_object: a file like object to use for a filehandle.
"""
if file_object:
self._file_object = file_object
return
if file_path:
self._file_object = open(file_path, 'wb')
def WriteEventBody(self, event_object):
"""Writes the body of an event object to the output.
Args:
event_object: the event object (instance of EventObject).
"""
# Needed due to duplicate removals, if two events
# are merged then we'll just pick the first inode value.
inode = getattr(event_object, u'inode', None)
if isinstance(inode, basestring):
inode_list = inode.split(u';')
try:
new_inode = int(inode_list[0])
except (ValueError, IndexError):
new_inode = 0
event_object.inode = new_inode
self._storage.AddEventObject(event_object)
manager.OutputManager.RegisterOutput(PlasoStorageOutputModule)
|
the-stack_106_18901
|
import numpy as np
import pickle as pkl
import matplotlib.pyplot as plt
import os
import pandas as pd
working_dir = '/home/az396/project/mayfly'
signal_path = 'data/signals'
template_path = 'data/templates'
simulation_name = '210610_epa_grid'
#signal_metadata =
analysis_date = '210611'
plot_path = '/home/az396/project/mayfly/analysis/plotting/plots'
result_path = '/home/az396/project/mayfly/analysis/results'
result_date = '210610'
result_name = 'epa_grid_self_scores'
result_file = f'{result_date}_{result_name}.pkl'
new_result_name = f'{analysis_date}_{result_name}_animation_matrices.pkl'
with open(os.path.join(result_path, result_file), 'rb') as infile:
result = pd.DataFrame(pkl.load(infile))
def Compute2DMFScoreGrid(template_x_key, template_y_key, fix_sig_x_key, fix_sig_y_key, signal_index, result):
h_x_list = result[template_x_key].unique()
h_y_list = result[template_y_key].unique()
x_x_list = result[fix_sig_x_key].unique()
x_y_list = result[fix_sig_y_key].unique()
grid_x_size = h_x_list.size
grid_y_size = h_y_list.size
grid = np.zeros((grid_y_size, grid_x_size))
signal_x = x_x_list[signal_index]
signal_y = x_y_list[signal_index]
print(signal_x, signal_y)
selected_result = result[
(result[fix_sig_x_key] == signal_x) &
(result[fix_sig_y_key] == signal_y)
]
for i, h_x in enumerate(h_x_list):
for j, h_y in enumerate(h_y_list):
grid[i, j] = selected_result[
(result[template_x_key] == h_x) &
(result[template_y_key] == h_y)
]['T']
return grid, signal_x, signal_y
#print(result['x_pa'].unique().shape)
grid_list = []
signal_x_list = []
signal_y_list = []
for i in range(result['x_pa'].unique().shape[0]):
print(i)
grid, signal_x, signal_y = Compute2DMFScoreGrid('h_E', 'h_pa', 'x_E', 'x_pa', i, result)
grid_list.append(grid)
signal_x_list.append(signal_x)
signal_y_list.append(signal_y)
with open(os.path.join(result_path, new_result_name), 'wb') as outfile:
pkl.dump({'grids': grid_list, 'sig_x': signal_x_list, 'sig_y': signal_y_list}, outfile)
|
the-stack_106_18903
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.b (the "License");
# you may not use this file except in compliance with the License.
#
# You can find misc modules, which dont fit in anything xD
""" Userbot module for other small commands. """
from random import randint
from time import sleep
from userbot.events import register
@register(outgoing=True, pattern="^.random")
async def randomise(items):
"""For .random command, get a random item from the list of items."""
if not items.text[0].isalpha() and items.text[0] not in ("/", "#", "@", "!"):
itemo = (items.text[8:]).split()
index = randint(1, len(itemo) - 1)
await items.edit(
"**Query: **\n`"
+ items.text[8:]
+ "`\n**Output: **\n`"
+ itemo[index]
+ "`"
)
@register(outgoing=True, pattern="^.sleep( [0-9]+)?$")
async def sleepybot(time):
"""For .sleep command, let the userbot snooze for a few second."""
message = time.text
if not message[0].isalpha() and message[0] not in ("/", "#", "@", "!"):
if " " not in time.pattern_match.group(1):
await time.reply("Syntax: `.sleep [seconds]`")
else:
counter = int(time.pattern_match.group(1))
await time.edit("`I am sulking and snoozing....`")
sleep(2)
if LOGGER:
await time.client.send_message(
LOGGER_GROUP,
"You put the bot to sleep for " + str(counter) + " seconds",
)
sleep(counter)
|
the-stack_106_18904
|
from openpyxl import Workbook
from openpyxl import load_workbook
import gspread
from oauth2client.service_account import ServiceAccountCredentials
readWorkBook = load_workbook('attendanceCheck.xlsx')
readWorkSheet = readWorkBook.worksheets[0]
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('barcodeID-spreadsheet.json', scope)
client = gspread.authorize(creds)
sheet = client.open('barcodeID spreadsheet').sheet1
def coder(a):
if a <= 9:
b = 7-a
if b < 0:
c = 10 + b
return '119000{}42198{}'.format(a, c)
return '119000{}42198{}'.format(a, b)
else:
b = 14 - a
if b < 0:
c = 10 + b
return '11900{}42198{}'.format(a, c)
return '11900{}42198{}'.format(a, b)
def flush():
identity = int(input('Enter student ID.\n'))
barCode = coder(identity)
for cell in readWorkSheet[1]:
if cell.value == barCode:
column = cell.column
columnLetter = chr(64+column)
for i in range(2,10):
readWorkSheet['{}{}'.format(columnLetter, i)] = ' '
sheet.update_cell(i, column, ' ')
readWorkSheet['{}10'.format(columnLetter)] = '0'
sheet.update_cell(10, column, '0')
print('Flushed.')
readWorkBook.save("attendanceCheck.xlsx")
flush()
|
the-stack_106_18906
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file", "http_jar")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
RULES_NODEJS_VERSION = "0.36.1"
RULES_NODEJS_SHA = "3356c6b767403392bab018ce91625f6d15ff8f11c6d772dc84bc9cada01c669a"
BAZEL_TOOLCHAINS_VERSION = "be10bee3010494721f08a0fccd7f57411a1e773e"
BAZEL_TOOLCHAINS_SHA = "5962fe677a43226c409316fcb321d668fc4b7fa97cb1f9ef45e7dc2676097b26"
SKYLIB_VERSION = "0.8.0"
SKYLIB_SHA = "2ea8a5ed2b448baf4a6855d3ce049c4c452a6470b1efd1504fdb7c1c134d220a"
PROTOBUF_VERSION = "3.11.3"
PROTOBUF_SHA = "cf754718b0aa945b00550ed7962ddc167167bd922b842199eeb6505e6f344852"
BAZEL_DEPS_VERSION = "0.1.0"
BAZEL_DEPS_SHA = "05498224710808be9687f5b9a906d11dd29ad592020246d4cd1a26eeaed0735e"
RULES_JVM_EXTERNAL_TAG = "2.7"
RULES_JVM_EXTERNAL_SHA = "f04b1466a00a2845106801e0c5cec96841f49ea4e7d1df88dc8e4bf31523df74"
RULES_PROTO_GIT_COMMIT = "f6b8d89b90a7956f6782a4a3609b2f0eee3ce965"
RULES_PROTO_SHA = "4d421d51f9ecfe9bf96ab23b55c6f2b809cbaf0eea24952683e397decfbd0dd0"
def kt_download_local_dev_dependencies():
"""
Downloads all necessary http_* artifacts for rules_kotlin dev configuration.
Must be called before setup_dependencies in the WORKSPACE.
"""
maybe(
http_archive,
name = "com_google_protobuf",
sha256 = PROTOBUF_SHA,
strip_prefix = "protobuf-%s" % PROTOBUF_VERSION,
urls = [
"https://mirror.bazel.build/github.com/protocolbuffers/protobuf/archive/v%s.tar.gz" % PROTOBUF_VERSION,
"https://github.com/protocolbuffers/protobuf/archive/v%s.tar.gz" % PROTOBUF_VERSION,
],
)
maybe(
http_archive,
name = "rules_proto",
sha256 = RULES_PROTO_SHA,
strip_prefix = "rules_proto-%s" % RULES_PROTO_GIT_COMMIT,
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_proto/archive/%s.tar.gz" % RULES_PROTO_GIT_COMMIT,
"https://github.com/bazelbuild/rules_proto/archive/%s.tar.gz" % RULES_PROTO_GIT_COMMIT,
],
)
maybe(
http_archive,
name = "bazel_skylib",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/%s.tar.gz" % SKYLIB_VERSION],
strip_prefix = "bazel-skylib-%s" % SKYLIB_VERSION,
sha256 = SKYLIB_SHA,
)
maybe(
http_jar,
name = "bazel_deps",
sha256 = BAZEL_DEPS_SHA,
url = "https://github.com/hsyed/bazel-deps/releases/download/v%s/parseproject_deploy.jar" % BAZEL_DEPS_VERSION,
)
maybe(
http_archive,
name = "bazel_toolchains",
sha256 = BAZEL_TOOLCHAINS_SHA,
strip_prefix = "bazel-toolchains-%s" % BAZEL_TOOLCHAINS_VERSION,
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/%s.tar.gz" % BAZEL_TOOLCHAINS_VERSION,
"https://github.com/bazelbuild/bazel-toolchains/archive/%s.tar.gz" % BAZEL_TOOLCHAINS_VERSION,
],
)
maybe(
http_archive,
name = "build_bazel_rules_nodejs",
sha256 = RULES_NODEJS_SHA,
url = "https://github.com/bazelbuild/rules_nodejs/releases/download/{0}/rules_nodejs-{0}.tar.gz".format(RULES_NODEJS_VERSION),
)
maybe(
http_archive,
name = "rules_jvm_external",
sha256 = RULES_JVM_EXTERNAL_SHA,
strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG,
url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG,
)
maybe(
http_archive,
name = "rules_pkg",
url = "https://github.com/bazelbuild/rules_pkg/releases/download/0.2.4/rules_pkg-0.2.4.tar.gz",
sha256 = "4ba8f4ab0ff85f2484287ab06c0d871dcb31cc54d439457d28fd4ae14b18450a",
)
maybe(
git_repository,
name = "io_bazel_stardoc",
remote = "https://github.com/bazelbuild/stardoc.git",
tag = "0.4.0",
)
|
the-stack_106_18908
|
import logging
import boto3
import botocore.exceptions
from cartography.util import run_cleanup_job
logger = logging.getLogger(__name__)
def get_account_from_arn(arn):
# TODO use policyuniverse to parse ARN?
return arn.split(":")[4]
def get_caller_identity(boto3_session):
client = boto3_session.client('sts')
return client.get_caller_identity()
def get_current_aws_account_id(boto3_session):
return get_caller_identity(boto3_session)['Account']
def get_aws_account_default(boto3_session):
try:
return {"default": get_current_aws_account_id(boto3_session)}
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
logger.debug("Error occurred getting default AWS account number.", exc_info=True)
logger.error(
(
"Unable to get AWS account number, an error occurred: '%s'. Make sure your AWS credentials are "
"configured correctly, your AWS config file is valid, and your credentials have the SecurityAudit "
"policy attached."
),
e,
)
return {}
def get_aws_accounts_from_botocore_config(boto3_session):
d = {}
for profile_name in boto3_session.available_profiles:
if profile_name == 'default':
logger.debug("Skipping AWS profile 'default'.")
continue
try:
profile_boto3_session = boto3.Session(profile_name=profile_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
logger.debug("Error occurred calling boto3.Session() with profile_name '%s'.", profile_name, exc_info=True)
logger.error(
(
"Unable to initialize an AWS session using profile '%s', an error occurred: '%s'. Make sure your "
"AWS credentials are configured correctly, your AWS config file is valid, and your credentials "
"have the SecurityAudit policy attached."
),
profile_name,
e,
)
continue
try:
d[profile_name] = get_current_aws_account_id(profile_boto3_session)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
logger.debug(
"Error occurred getting AWS account number with profile_name '%s'.",
profile_name,
exc_info=True,
)
logger.error(
(
"Unable to get AWS account number using profile '%s', an error occurred: '%s'. Make sure your AWS "
"credentials are configured correctly, your AWS config file is valid, and your credentials have "
"the SecurityAudit policy attached."
),
profile_name,
e,
)
continue
logger.debug(
"Discovered AWS account '%s' associated with configured profile '%s'.",
d[profile_name],
profile_name,
)
return d
def load_aws_accounts(neo4j_session, aws_accounts, aws_update_tag, common_job_parameters):
query = """
MERGE (aa:AWSAccount{id: {ACCOUNT_ID}})
ON CREATE SET aa.firstseen = timestamp()
SET aa.lastupdated = {aws_update_tag}, aa.name = {ACCOUNT_NAME}
WITH aa
MERGE (root:AWSPrincipal{arn: {RootArn}})
ON CREATE SET root.firstseen = timestamp(), root.type = 'AWS'
SET root.lastupdated = {aws_update_tag}
WITH aa, root
MERGE (aa)-[r:RESOURCE]->(root)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {aws_update_tag};
"""
for account_name, account_id in aws_accounts.items():
root_arn = f'arn:aws:iam::{account_id}:root'
neo4j_session.run(
query,
ACCOUNT_ID=account_id,
ACCOUNT_NAME=account_name,
RootArn=root_arn,
aws_update_tag=aws_update_tag,
)
def cleanup(neo4j_session, common_job_parameters):
run_cleanup_job('aws_account_cleanup.json', neo4j_session, common_job_parameters)
def sync(neo4j_session, accounts, aws_update_tag, common_job_parameters):
load_aws_accounts(neo4j_session, accounts, aws_update_tag, common_job_parameters)
cleanup(neo4j_session, common_job_parameters)
|
the-stack_106_18910
|
import urllib
import dash_html_components as html
import pandas as pd
from matscholar_web.constants import rester
from matscholar_web.search.common import (
big_label_and_disclaimer_html,
common_results_container_style,
no_results_html,
)
"""
Functions for defining the results container when materials summary is desired.
Please do not define callback logic in this file.
"""
MAX_N_MATERIALS_IN_TABLE = 100 # the maximum number of rows shown in the table
MAX_N_DOIS_FOR_VIEWING = 5 # The maximum number of viewable DOIs on this page.
big_results_label_and_disclaimer = big_label_and_disclaimer_html("materials")
materials_no_results_html = no_results_html(
pre_label=big_results_label_and_disclaimer
)
def materials_results_html(entity_query, raw_text):
"""
Get the html block for materials summaru from the Rester-compatible
entity query and text.
Args:
entity_query (dict): The entity query, in Rester-compatible format.
raw_text (str, None): Any raw text to search for.
Returns:
(dash_html_components.Div): The materials summary html block.
"""
results = rester.materials_search(
entity_query, text=raw_text, top_k=MAX_N_MATERIALS_IN_TABLE
)
if not results:
return materials_no_results_html
else:
materials = []
counts = []
dois = []
for i, r in enumerate(results):
material = r["material"]
elemental = len(material) <= 2
oxide = "oxide" in material
uppercase = material.isupper()
if not uppercase and not oxide and not elemental:
materials.append(material)
counts.append(r["count"])
dois.append(r["dois"])
df = pd.DataFrame(
{"material": materials, "count": counts, "dois": dois}
)
# Prevent results with only oxides or elements from being shown
# as an empty table
if df.shape[0] == 0:
return no_results_html(pre_label=big_results_label_and_disclaimer)
# Update the download link
link = make_download_link_from_all_dois_html(df)
n_filtered_results = df.shape[0]
if n_filtered_results >= MAX_N_MATERIALS_IN_TABLE:
label_txt = (
f"Showing top {MAX_N_MATERIALS_IN_TABLE} of "
f"{n_filtered_results} materials - download csv for "
f"full results"
)
else:
label_txt = f"Showing all {n_filtered_results} results."
label = html.Label(label_txt, className="has-margin-10")
materials_table = materials_table_html(df, MAX_N_MATERIALS_IN_TABLE)
materials_html = html.Div(
children=[
big_results_label_and_disclaimer,
label,
link,
materials_table,
],
className=common_results_container_style(),
)
return materials_html
def materials_table_html(df, limit):
"""
Get the html block for materials summary table alone from the rester
results.
Args:
df (pd.DataFrame): The pandas dataframe containing all results.
limit (int): The maximum number of results to show in the table.
Returns:
(dash_html_components.Div): The materials summary table only.
"""
header_material = html.Th("Material")
header_counts = html.Th("Fraction of Papers")
header_clickable = html.Th(
f"Clickable doi links ({MAX_N_DOIS_FOR_VIEWING} examples)"
)
header_downloadable = html.Th("Download all dois as file")
header = html.Tr(
[header_material, header_counts, header_clickable, header_downloadable]
)
n_results = min(df.shape[0], limit)
rows = [None] * n_results
for i in range(n_results):
material_cell = html.Td(df["material"].iloc[i])
count_cell = html.Td(df["count"].iloc[i])
dois = df["dois"].iloc[i]
doi_details, doi_html_link = single_materials_details_html(dois)
dois_cell = html.Td(doi_details)
dois_link_cell = html.Td(doi_html_link)
rows[i] = html.Tr(
[material_cell, count_cell, dois_cell, dois_link_cell]
)
table = html.Table(
[header] + rows,
className="table is-fullwidth is-bordered is-hoverable is-narrow is-striped",
)
return html.Div(table)
def single_materials_details_html(dois):
"""
Get the html block for a single material, along with a download link for
it's full doi list.
Args:
dois ([str]): The list of dois for this material.
Returns:
details (dash_html_components.Div): The collapsible html block
containing the clickable doi list for this material.
download_link(dash_html_components.Div): The download link for all the
fetched dois.
"""
dois = dois[:MAX_N_MATERIALS_IN_TABLE]
viewable_doi_links = []
for doi in dois:
link = html.A(
"{}; ".format(doi),
href="http://www.doi.org/{}".format(doi),
target="_blank",
)
link_div = html.Div(link)
viewable_doi_links.append(link_div)
viewable_doi_links = viewable_doi_links[:MAX_N_DOIS_FOR_VIEWING]
df = pd.DataFrame({"doi": dois})
download_link = make_download_link_from_all_dois_html(
df, f"Download dois as csv"
)
summary_txt = f"Show dois?"
summary = html.Summary([summary_txt])
details = html.Details([summary] + viewable_doi_links)
doi_html_link = html.Div(download_link)
return details, doi_html_link
def make_download_link_from_all_dois_html(df, link_text=None):
"""
Make a download link html block from the dataframe of all results.
This is to download all the materials results shown for all materials in
the results.
Args:
df (pd.DataFrame): The dataframe containing columns "material", "count",
and "dois".
link_text (str): The text to use as the label for hyperlinking (ie.,
what you click on to download).
Returns:
link (dash_html_components.Div): The link as an html block.
"""
if not link_text:
link_text = "Fetch and download all DOIs as CSV"
csv_string = df.to_csv(index=False, encoding="utf-8")
csv_string = "data:text/csv;charset=utf-8," + urllib.parse.quote(
csv_string
)
link = html.A(
link_text,
id="download-link",
download="matscholar_data.csv",
href=csv_string,
target="_blank",
)
return link
|
the-stack_106_18911
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="tickformatstops", parent_name="heatmap.colorbar", **kwargs
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs
)
|
the-stack_106_18912
|
# Submit job to the remote cluster
import yaml
import sys
import time
import random
import os, subprocess
import pickle, datetime
import socket
def load_yaml_conf(yaml_file):
with open(yaml_file) as fin:
data = yaml.load(fin, Loader=yaml.FullLoader)
return data
def process_cmd(yaml_file):
current_path = os.path.dirname(os.path.abspath(__file__))
config_path=os.path.join(current_path,yaml_file)
print(config_path)
yaml_conf = load_yaml_conf(config_path)
# yaml_conf = load_yaml_conf(yaml_file)
# ps_ip = yaml_conf['ps_ip']
ps_ip=socket.gethostname()
worker_ips, total_gpus = [], []
for ip_gpu in yaml_conf['worker_ips']:
ip, gpu_list = ip_gpu.strip().split(':')
ip=socket.gethostname()
worker_ips.append(ip)
total_gpus.append(eval(gpu_list))
running_vms = set()
subprocess_list=set()
submit_user = f"{yaml_conf['auth']['ssh_user']}@" if len(yaml_conf['auth']['ssh_user']) else ""
total_gpu_processes = sum([sum(x) for x in total_gpus])
learner_conf = '-'.join([str(_) for _ in list(range(1, total_gpu_processes+1))])
conf_script = ''
setup_cmd = ''
if yaml_conf['setup_commands'] is not None:
for item in yaml_conf['setup_commands']:
setup_cmd += (item + ' && ')
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%m%d_%H%M%S')+'_'+str(random.randint(1,60000))
job_conf = {'time_stamp':time_stamp,
'total_worker': total_gpu_processes,
'ps_ip':ps_ip,
'ps_port':random.randint(1000, 60000),
'manager_port':random.randint(1000, 60000),
}
for conf in yaml_conf['job_conf']:
job_conf.update(conf)
job_name = job_conf['job_name']
if len(sys.argv)>3:
job_conf['sample_mode'] = sys.argv[3]
if len(sys.argv)>4:
job_conf['load_model'] = True
job_conf['load_time_stamp'] = sys.argv[4]
job_conf['load_epoch'] = sys.argv[5]
job_conf["model_path"]=os.path.join(job_conf["log_path"], 'logs', job_name, job_conf['load_time_stamp'])
for conf_name in job_conf:
conf_script = conf_script + f' --{conf_name}={job_conf[conf_name]}'
log_file_name=os.path.join(current_path,f"{job_name}_logging_{time_stamp}")
# =========== Submit job to parameter server ============
running_vms.add(ps_ip)
ps_cmd = f" {yaml_conf['python_path']}/python {yaml_conf['exp_path']}/param_server.py {conf_script} --this_rank=0 --learner={learner_conf} --gpu_device=0"
print(f"Starting time_stamp on {time_stamp}...")
with open(log_file_name, 'wb') as fout:
pass
print(f"Starting aggregator on {ps_ip}...")
with open(log_file_name, 'a') as fout:
# p=subprocess.Popen(f'ssh -tt {submit_user}{ps_ip} "{setup_cmd} {ps_cmd}"', shell=True, stdout=fout, stderr=fout)
# p=subprocess.Popen(f'{ps_cmd}', shell=True, stdout=fout, stderr=fout)
cmd_sequence=f'{ps_cmd}'
cmd_sequence=cmd_sequence.split()
p = subprocess.Popen(cmd_sequence,stdout=fout, stderr=fout)
subprocess_list.add(p)
time.sleep(30)
# =========== Submit job to each worker ============
rank_id = 1
for worker, gpu in zip(worker_ips, total_gpus):
running_vms.add(worker)
print(f"Starting workers on {worker} ...")
for gpu_device in range(len(gpu)):
for _ in range(gpu[gpu_device]):
worker_cmd = f" {yaml_conf['python_path']}/python {yaml_conf['exp_path']}/learner.py {conf_script} --this_rank={rank_id} --learner={learner_conf} --gpu_device={gpu_device}"
rank_id += 1
with open(log_file_name, 'a') as fout:
# p=subprocess.Popen(f'ssh -tt {submit_user}{worker} "{setup_cmd} {worker_cmd}"', shell=True, stdout=fout, stderr=fout)
# p=subprocess.Popen(f'{worker_cmd}', shell=True, stdout=fout, stderr=fout)
cmd_sequence=f'{worker_cmd}'
cmd_sequence=cmd_sequence.split()
p = subprocess.Popen(cmd_sequence,stdout=fout, stderr=fout)
subprocess_list.add(p)
exit_codes = [p.wait() for p in subprocess_list]
# dump the address of running workers
job_name = os.path.join(current_path, f"{job_name}_{time_stamp}")
with open(job_name, 'wb') as fout:
job_meta = {'user':submit_user, 'vms': running_vms}
pickle.dump(job_meta, fout)
print(f"Submitted job, please check your logs ({log_file_name}) for status")
def terminate(job_name):
current_path = os.path.dirname(os.path.abspath(__file__))
job_meta_path = os.path.join(current_path, job_name)
if not os.path.isfile(job_meta_path):
print(f"Fail to terminate {job_name}, as it does not exist")
with open(job_meta_path, 'rb') as fin:
job_meta = pickle.load(fin)
for vm_ip in job_meta['vms']:
# os.system(f'scp shutdown.py {job_meta["user"]}{vm_ip}:~/')
print(f"Shutting down job on {vm_ip}")
os.system(f"ssh {job_meta['user']}{vm_ip} '/mnt/home/lichenni/anaconda3/envs/oort/bin/python {current_path}/shutdown.py {job_name}'")
try:
if len(sys.argv)==1:
# process_cmd('configs/har/conf_test.yml')
# process_cmd('configs/openimage/conf_test.yml')
process_cmd('configs/speech/conf_test.yml')
# process_cmd('configs/stackoverflow/conf_test.yml')
elif sys.argv[1] == 'submit':
process_cmd(sys.argv[2])
elif sys.argv[1] == 'stop':
terminate(sys.argv[2])
else:
print("Unknown cmds ...")
except Exception as e:
print(f"====Error {e}")
|
the-stack_106_18913
|
import functools, itertools, types, builtins, operator, weakref
import logging, re, fnmatch
import ptypes, image.bitmap
from ptypes import *
ptypes.setbyteorder(ptypes.config.byteorder.littleendian)
## combinators
fcompose = lambda *f: functools.reduce(lambda f1, f2: lambda *a: f1(f2(*a)), builtins.reversed(f))
## primitive types
class s8(pint.sint_t): length = 1
class u8(pint.uint_t): length = 1
class s16(pint.sint_t): length = 2
class u16(pint.uint_t): length = 2
class s24(pint.sint_t): length = 3
class u24(pint.uint_t): length = 3
class s32(pint.sint_t): length = 4
class u32(pint.uint_t): length = 4
class f32(pfloat.single): pass
class f64(pfloat.double): pass
## core types
class String(pstr.string):
def str(self):
res = super(String, self).str()
return res.rstrip('\0')
def summary(self):
return "({:d}) {:s}".format(self.blocksize(), self.str())
class Coord(pstruct.type):
_fields_ = [(u16, 'x'), (u16, 'y')]
def summary(self):
return "x={:d} y={:d}".format(self['x'].int(), self['y'].int())
class resref(String):
length = 8
class strref(u32):
pass
class rect(pstruct.type):
_fields_ = [(u16, 'x1'), (u16, 'y1'), (u16, 'x2'), (u16, 'y2')]
def summary(self):
return "x1={:d} y1={:d} x2={:d} y2={:d}".format(self['x1'].int(), self['y1'].int(), self['x2'].int(), self['y1'].int())
## encoded types
import zlib
class zdata(ptype.encoded_t):
def decode(self, object, **attrs):
encdata = object.serialize()
decdata = zlib.decompress(encdata)
return self.new(ptype.block).set(decdata)
def encode(self, object, **attrs):
decdata = object.serialize()
encdata = zlib.compress(decdata)
return self.new(ptype.block).set(encdata)
## record types
class IndexRecord(ptype.definition):
'''
Key is an integer as used by the BIFF format.
'''
cache = {}
class FileRecord(ptype.definition):
'''
Key is (signature, version=(major, minor))
'''
cache = {}
## special types
class ocStructure(pstruct.type):
def __pointer_to_object__(self):
def closure(ptr, parent=self):
count = self['count'].li
return dyn.array(parent._object_, count.int())
return dyn.rpointer(closure, self.getparent(File), u32)
_fields_ = [
(lambda self: self.__pointer_to_object__(), 'offset'),
(u32, 'count'),
]
def summary(self):
return "offset={:#x} count={:d}".format(self['offset'].int(), self['count'].int())
class coStructure(ocStructure):
_fields_ = [
(u32, 'count'),
(lambda self: self.__pointer_to_object__(), 'offset'),
]
class ocStructure16(ocStructure):
_fields_ = [
(lambda self: self.__pointer_to_object__(), 'offset'),
(u16, 'count'),
]
class coStructure16(ocStructure):
_fields_ = [
(u16, 'count'),
(lambda self: self.__pointer_to_object__(), 'offset'),
]
class oc16Structure16(ocStructure):
def __pointer_to_object__(self):
def closure(ptr, parent=self):
count = self['count'].li
return dyn.array(parent._object_, count.int())
return dyn.rpointer(closure, self.getparent(File), u16)
_fields_ = [
(u16, 'count'),
(lambda self: self.__pointer_to_object__(), 'offset'),
]
class padStructure(pstruct.type):
_fields_ = [
(lambda self: self._object_, 'st'),
]
class osFile(pstruct.type):
def __pointer_to_object__(self):
def closure(self, parent=self):
res = parent['size'].li
return dyn.clone(File, blocksize=lambda _, cb=res.int(): cb)
return dyn.pointer(closure, u32)
_fields_ = [
(lambda self: self.__pointer_to_object__(), 'offset'),
(u32, 'size'),
]
def summary(self):
return "offset={:#x} size={!s}".format(self['offset'].int(), self['size'].summary())
class soFile(osFile):
_fields_ = [
(u32, 'size'),
(lambda self: self.__pointer_to_object__(), 'offset'),
]
## unknown types
@IndexRecord.define(type=1)
class Unknown_0001(image.bitmap.File):
'''This might be a bitmap file.'''
@IndexRecord.define(type=2050)
class Unknown_0802(pstr.string):
pass
@IndexRecord.define(type=2051)
class Unknown_0803(parray.block):
_object_ = u32
@IndexRecord.define(type=1003)
class Unknown_03eb(parray.block):
_object_ = u32
@IndexRecord.define(type=1007)
class Unknown_03ef(pstr.string):
pass
@IndexRecord.define(type=1008)
class Unknown_03f0(pstr.string):
pass
@IndexRecord.define(type=1023)
class Unknown_03ff(ptype.block):
pass
@IndexRecord.define(type=1028)
class Unknown_0404(ptype.block):
pass
@IndexRecord.define(type=1029)
class Unknown_0405(pstr.string):
'''shader'''
@IndexRecord.define(type=1032)
class Unknown_0408(pstr.string):
'''looks like lua script for the ui'''
@IndexRecord.define(type=1033)
class Unknown_0409(pstr.string):
'''looks like lua script'''
@IndexRecord.define(type=1034)
class Unknown_040a(ptype.block):
pass
## complex types
class Statistics(pstruct.type):
class Kills(pstruct.type):
_fields_ = [
(u32, 'XP'),
(u32, 'Number'),
]
def summary(self):
return "xp={:d} number={:d}".format(self['xp'].int(), self['number'].int())
_fields_ = [
(strref, 'powerful(Name)'),
(u32, 'powerful(XP)'),
(u32, 'duration'),
(u32, 'time'),
(u8, 'memberQ'),
(u16, 'unused'),
(pstr.char_t, 'cname'),
(Kills, 'kills(Chapter)'),
(Kills, 'kills(Game)'),
(dyn.array(resref, 4), 'favorite(spells)'),
(dyn.array(u16, 4), 'favorite(count)'),
(dyn.array(resref, 4), 'favorite(weapons)'),
(dyn.array(u16, 4), 'favorite(time)'),
]
class NPC(pstruct.type):
_fields_ = [
(s16, 'selected'),
(s16, 'order'),
(osFile, 'CRE'),
(dyn.clone(String, length=8), 'cname'),
(u32, 'orientation'),
(resref, 'area'),
(Coord, 'coordinate'),
(Coord, 'view'),
(u16, 'action'),
(u16, 'happiness'),
(dyn.array(u32, 24), 'NumTimesInteracted'),
(dyn.array(u16, 4), 'WeaponSlots'),
(dyn.array(u16, 4), 'QuickAbility'),
(dyn.array(resref, 3), 'QuickSpell'),
(dyn.array(u16, 3), 'QuickItem'),
(dyn.array(u16, 3), 'QuickItemSlot'),
(dyn.clone(pstr.string, length=32), 'name'),
(u32, 'Talkcount'),
(Statistics, 'statistics'),
(dyn.block(8), 'voice'),
]
class Inventory(ptype.undefined):
pass
class Item(pstruct.type):
_fields_ = [
(resref, 'resname'),
(u8, 'time(creation)'),
(u8, 'time(expiration)'),
(u16, 'quantity'),
# (dyn.array(u16, 3), 'quantity'),
# (u32, 'flags'),
]
class SpellKnown(pstruct.type):
_fields_ = [
(resref, 'resname'),
(u16, 'level'),
(u16, 'type'),
]
class SpellInfo(pstruct.type):
_fields_ = [
(u16, 'level'),
(u16, 'number'),
(u16, 'number(effects)'),
(u16, 'type'),
(u32, 'index'),
(u32, 'count'),
]
class SpellMemo(pstruct.type):
_fields_ = [
(resref, 'resname'),
(u32, 'memorizedQ'),
]
class Door(pstruct.type):
_fields_ = [
(dyn.clone(String, length=32), 'Name'),
(dyn.clone(String, length=8), 'ID'),
(u32, 'flags'),
(u32, 'ivdoor(open)'),
(u16, 'cvdoor(open)'),
(u16, 'cvdoor(closed)'),
(u32, 'ivdoor(closed)'),
(rect, 'bounds(open)'),
(rect, 'bounds(closed)'),
(u32, 'ivcell(open)'),
(u16, 'cvcell(open)'),
(u16, 'cvcell(closed)'),
(u32, 'ivcell(closed)'),
(u16, 'HP'),
(u16, 'AC'),
(resref, 'sound(open)'),
(resref, 'sound(closed)'),
(u32, 'cursor'),
(u16, 'trap(detection)'),
(u16, 'trap(removal)'),
(u16, 'trappedQ'),
(u16, 'detectedQ'),
(Coord, 'trap'),
(resref, 'key'),
(resref, 'script'),
(u32, 'difficulty(detection)'),
(u32, 'difficulty(lock)'),
(rect, 'points'),
(strref, 'string(pick)'),
(dyn.clone(String, length=24), 'trigger'),
(strref, 'speaker'),
(resref, 'dialog'),
(resref, 'unknown'),
]
class VAR(pstruct.type):
_fields_ = [
(dyn.clone(String, length=32), 'name'),
(u16, 'type'),
(u16, 'ref'),
(u32, 'dword'),
(s32, 'int'),
(f64, 'double'),
(dyn.clone(String, length=32), 'value'),
]
class Journal(pstruct.type):
_fields_ = [
(strref, 'text'),
(u32, 'time'),
(u8, 'chapter'),
(u8, 'owner'),
(u8, 'section'),
(u8, 'location'),
]
class Actor(pstruct.type):
_fields_ = [
(dyn.clone(String, length=32), 'name'),
(Coord, 'current'),
(Coord, 'destination'),
(u32, 'flags'),
(u16, 'spawnedQ'),
(pstr.char_t, 'cresref'),
(u8, 'unused1'),
(u32, 'animation'),
(u16, 'orientation'),
(u16, 'unused2'),
(u32, 'timer(remove)'),
(u16, 'move(distance)'),
(u16, 'move(target)'),
(u32, 'schedule'),
(u32, 'NumTimesTalkedTo'),
(resref, 'dialog'),
(resref, 'script(override)'),
(resref, 'script(general)'),
(resref, 'script(class)'),
(resref, 'script(race)'),
(resref, 'script(default)'),
(resref, 'script(specific)'),
(resref, 'file(CRE)'),
(osFile, 'CRE'),
(dyn.block(128), 'unused3'),
]
## bitmap animation format
class RGBQUAD(pstruct.type):
_fields_ = [
(u8, 'b'),
(u8, 'g'),
(u8, 'r'),
(u8, 'a'),
]
class BitmapFrame(pstruct.type):
class _data(pbinary.flags):
_fields_ = [
(1, 'compressedQ'),
(31, 'offset'),
]
_fields_ = [
(u16, 'width'),
(u16, 'height'),
(s16, 'x'),
(s16, 'y'),
(_data, 'data'),
]
class BitmapCycle(pstruct.type):
_fields_ = [
(u16, 'count(indices)'),
(u16, 'index'),
]
@FileRecord.define
class BitmapAnimation(pstruct.type):
type = 'BAM ', (1, None)
def __offset_frame(self):
count = self['count(frame)'].li
t = dyn.array(BitmapFrame, count.int())
# FIXME: this should be followed by count(cycle) BitmapCycle entries...
return dyn.rpointer(t, self.getparent(File), u32)
def __offset_palette(self):
t = dyn.array(RGBQUAD, 0)
# FIXME: figure out the correct count for this
return dyn.rpointer(t, self.getparent(File), u32)
def __offset_table(self):
# FIXME: figure out the correct count for this
t = dyn.array(u32, 0)
return dyn.rpointer(t, self.getparent(File), u32)
_fields_ = [
(u16, 'count(frame)'),
(u8, 'count(cycle)'),
(u8, 'compressed'),
(__offset_frame, 'offset(frame)'),
(__offset_palette, 'offset(palette)'),
(__offset_table, 'offset(table)'),
]
@FileRecord.define
class BitmapAnimationCompressed(pstruct.type):
type = 'BAMC', (1, None)
def __data(self):
t = dyn.block(self['length'].li.int())
return dyn.clone(zdata, _value_=t, _object_=ptype.block)
_fields_ = [
(u32, 'length'),
(__data, 'data'),
]
## some table format (we define a bunch of keys for it because the devers can't seem to type it right)
@FileRecord.define(type=('2DA ', ' '))
@FileRecord.define(type=(' 2DA', ' V1.'))
@FileRecord.define
class TwoDimensionalArray(pstr.string):
type = '2DA ', (1, 0)
def iterate(self):
string = self.str()
items = string.replace('\r\n', '\n').split('\n')
return (item.replace('\t', 7*' ') for item in items if item)
def split(self):
iterable = self.iterate()
return [item for item in iterable]
def Default(self):
iterable = self.iterate()
# figure out whatever the first single item is
item = next(iterable).strip()
return int(item) if item.isdigit() else item
def Columns(self):
iterable = self.iterate()
# some heuristics to extract the column headers
next(iterable)
row = next(iterable)
iterable = (item for item in row.split(' ') if item.strip())
return [item.strip() for item in iterable]
def Rows(self):
iterable = self.iterate()
next(iterable)
next(iterable)
# some heuristics to extract the first column
iterable = (item.strip().split(' ')[0] for item in iterable if item.strip())
return [item.strip() for item in iterable]
def summary(self):
default, rows, columns = self.Default(), self.Rows(), self.Columns()
if rows and columns:
return "default:{!s} rows:{:s} columns:{:s}".format(default, ','.join(rows), ','.join(columns))
return "default:{!s} rows:{:s}".format(default, ','.join(rows)) if rows else "default:{!s} columns:{:s}".format(default, ','.join(columns))
def details(self):
return self.str().strip('\r\n')
def repr(self):
return self.details()
## creature format
@FileRecord.define
class Creature(pstruct.type):
type = 'CRE ', (1, 0)
class Portrait(pstruct.type):
_fields_ = [
(resref, 'small'),
(resref, 'large'),
]
class Color(pstruct.type):
_fields_ = [
(u8, 'metal'),
(u8, 'minor'),
(u8, 'major'),
(u8, 'skin'),
(u8, 'leather'),
(u8, 'armor'),
(u8, 'hair'),
]
class AC(pstruct.type):
_fields_ = [
(s16, 'natural'),
(s16, 'effective'),
(s16, 'crushing'),
(s16, 'missile'),
(s16, 'piercing'),
(s16, 'slashing'),
]
class Saves(pstruct.type):
_fields_ = [
(u8, 'death'),
(u8, 'wands'),
(u8, 'poly'),
(u8, 'breath'),
(u8, 'spells'),
]
class Resist(pstruct.type):
_fields_ = [
(u8, 'fire'),
(u8, 'cold'),
(u8, 'electricity'),
(u8, 'acid'),
(u8, 'magic'),
(u8, 'magic-fire'),
(u8, 'magic-cold'),
(u8, 'slashing'),
(u8, 'crushing'),
(u8, 'piercing'),
(u8, 'missile'),
]
class Skills(pstruct.type):
_fields_ = [
(u8, 'Detect'),
(u8, 'Traps(set)'),
(u8, 'Lore'),
(u8, 'Lockpicking'),
(u8, 'Stealth'),
(u8, 'Traps(disarm)'),
(u8, 'Pickpocket'),
(u8, 'Fatigue'),
(u8, 'Intoxication'),
(u8, 'Luck'),
]
class Proficiencies(parray.type):
length, _object_ = 20, u8
class PSTEE(pstruct.type):
_fields_ = [
(u32, 'XP(thief)'),
(u32, 'XP(mage)'),
(u8, 'variable(GOOD)'),
(u8, 'variable(LAW)'),
(u8, 'variable(LADY)'),
(u8, 'Faction'),
(u8, 'Team'),
(u8, 'Species'),
(u8, 'range(Dialog)'),
(u8, 'size(Circle)'),
(u8, 'flags(shield)'),
(u8, 'vision'),
(u32, 'flags'),
(dyn.block(10), 'unused'),
]
def __Game(self):
return self.PSTEE
class Stats(pstruct.type):
_fields_ = [
(u8, 'STR'),
(u8, 'STR%'),
(u8, 'INT'),
(u8, 'WIS'),
(u8, 'DEX'),
(u8, 'CON'),
(u8, 'CHA'),
]
class IDs(pstruct.type):
'''these are all enumerations'''
_fields_ = [
(u8, 'enemy-ally'),
(u8, 'general'),
(u8, 'race'),
(u8, 'class'),
(u8, 'specific'),
(u8, 'gender'),
(dyn.array(u8, 5), 'object'),
(u8, 'alignment'),
]
class Enum(pstruct.type):
_fields_ = [
(u16, 'global'),
(u16, 'local'),
]
def __items(self):
# XXX: i counted 25 items here, but i'm not sure where this comes
# from (other than loaded items, and inventory items)
t = dyn.array(u16, 25)
return dyn.rpointer(t, self.getparent(File), u32)
_fields_ = [
(strref, 'name'),
(strref, 'tooltip'),
(u32, 'flags'),
(u32, 'xp'),
(u32, 'power'),
(u32, 'gold'),
(u32, 'status'),
(u16, 'hp'),
(u16, 'maxhp'),
(u32, 'animationid'),
(Color, 'color'),
(u8, 'EffVersion'),
(Portrait, 'portrait'),
(s8, 'reputation'),
(u8, 'shadowsQ'),
(AC, 'AC'),
(u8, 'THAC0'),
(u8, 'AttackCount'),
(Saves, 'Saves'),
(Resist, 'Resist'),
(Skills, 'Skills'),
(Proficiencies, 'Proficiencies'),
(u8, 'TurnUndead'),
(u8, 'Tracking'),
(__Game, 'Game'), # PSTEE-only
(dyn.array(strref, 100), 'strtab'),
(dyn.array(u8, 3), 'level(classes)'),
(u8, 'gender'),
(Stats, 'stats'),
(u8, 'morale'),
(u8, 'morale(break)'),
(u8, 'enemy(race)'),
(u16, 'morale(recovery)'),
(u32, 'kit'),
(resref, 'script(override)'),
(resref, 'script(class)'),
(resref, 'script(race)'),
(resref, 'script(general)'),
(resref, 'script(default)'),
(IDs, 'id'),
(Enum, 'actor'),
(dyn.clone(String, length=32), 'variable(death)'),
(dyn.clone(ocStructure, _object_=SpellKnown), 'spells(known)'),
(dyn.clone(ocStructure, _object_=SpellInfo), 'spells(info)'),
(dyn.clone(ocStructure, _object_=SpellMemo), 'spells(memorized)'),
(dyn.clone(ocStructure, _object_=Item), 'items'),
(__items, 'items(slots)'),
(dyn.clone(ocStructure, _object_=ptype.block), 'effects'),
(resref, 'dialog'),
]
## area format
@FileRecord.define
class Area(pstruct.type):
type = 'AREA', (1, 0)
class AreaFlags(pstruct.type):
_fields_ = [
(resref, 'area'),
(u32, 'flags'),
]
_fields_ = [
(resref, 'wed'),
(u32, 'last saved'),
(u32, 'flags'),
(AreaFlags, 'flags(North)'),
(AreaFlags, 'flags(East)'),
(AreaFlags, 'flags(South)'),
(AreaFlags, 'flags(West)'),
(u16, 'type'),
(u16, 'probability(rain)'),
(u16, 'probability(snow)'),
(u16, 'probability(fog)'),
(u16, 'probability(lightning)'),
(u16, 'wind'),
(dyn.clone(ocStructure16, _object_=Actor), 'actors'),
(dyn.clone(coStructure16, _object_=ptype.block), 'regions'),
(dyn.clone(ocStructure, _object_=ptype.block), 'spawns'),
(dyn.clone(ocStructure, _object_=ptype.block), 'entrances'),
(dyn.clone(ocStructure16, _object_=ptype.block), 'containers'),
(dyn.clone(coStructure16, _object_=Item), 'items'),
(dyn.clone(ocStructure16, _object_=ptype.block), 'vertices'),
(dyn.clone(coStructure16, _object_=ptype.block), 'ambients'),
(dyn.clone(ocStructure, _object_=VAR), 'variables'),
(dyn.clone(oc16Structure16, _object_=ptype.block), 'tileflags'),
(resref, 'script'),
(soFile, 'explored'),
(dyn.clone(coStructure, _object_=Door), 'doors'),
(dyn.clone(coStructure, _object_=ptype.block), 'animations'),
(dyn.clone(coStructure, _object_=ptype.block), 'tiles'),
(lambda self: dyn.rpointer(ptype.undefined, self.getparent(File), u32), 'rest interruptions'),
(dyn.clone(ocStructure, _object_=ptype.block), 'notes'),
(dyn.clone(ocStructure, _object_=ptype.block), 'traps'),
(resref, 'rest(day)'),
(resref, 'rest(night)'),
(dyn.block(56), 'unused'),
]
## archive (save) formats
class Contents(pstruct.type):
def __data(self):
length = self['length(data)'].li
return dyn.clone(zdata, _value_=dyn.block(length.int()), _object_=File)
_fields_ = [
(u32, 'length(filename)'),
(lambda self: dyn.clone(pstr.string, length=self['length(filename)'].li.int()), 'filename'),
(u32, 'length'),
(u32, 'length(data)'),
(__data, 'data'),
]
@FileRecord.define
class Save(parray.block):
type = 'SAV ', (1, 0)
_object_ = Contents
## game format
@FileRecord.define
class Game(pstruct.type):
type = 'GAME', (2, 0)
_fields_ = [
(u32, 'time'),
(dyn.array(u16, 6), 'formation'),
(u32, 'gold'),
(u16, 'unknown'),
(u16, 'flags(weather)'),
(dyn.clone(ocStructure, _object_=NPC), 'PCs'),
(dyn.clone(ocStructure, _object_=Inventory), 'inventory'),
(dyn.clone(ocStructure, _object_=NPC), 'NPCs'),
(dyn.clone(ocStructure, _object_=VAR), 'GVar'),
(resref, 'area(Main)'),
(dyn.pointer(ptype.undefined, u32), 'extra(familiar)'),
(dyn.clone(coStructure, _object_=Journal), 'journal'),
(u32, 'reputation'),
(resref, 'area(Current)'),
(u32, 'flags(gui)'),
(u32, 'load progress'),
(dyn.pointer(ptype.undefined, u32), 'info(familiar)'),
(dyn.clone(ocStructure, _object_=ptype.undefined), 'locations'),
(u32, 'game time'),
(dyn.clone(ocStructure, _object_=ptype.undefined), 'planes'),
(u32, 'zoom'),
(resref, 'area(random encounter)'),
(resref, 'worldmap'),
(resref, 'campaign'),
(u32, 'owner(familiar)'),
(dyn.clone(String, length=20), 'random encounter'),
]
## biff format
class Index(parray.type):
import collections
Locator = collections.namedtuple('location', ['id', 'type', 'size', 'count'])
def summary(self):
if len(self):
first, last = self[0], self[-1]
return "{:s} {:s}[{:#x}:{:+x}]...{:s}[{:#x}:{:+x}]".format(self.__element__(), File.typename(), first['offset'].int(), first['size'].int(), File.typename(), last['offset'].int(), last['size'].int())
return "{:s} ...".format(self.__element__())
class FileIndex(Index):
class Locator(pstruct.type):
_fields_ = [
(u32, 'locator'),
(u32, 'offset'),
(u32, 'size'),
(u16, 'type'),
(u16, 'unknown'),
]
_object_ = Locator
class TileIndex(Index):
class Locator(pstruct.type):
_fields_ = [
(u32, 'locator'),
(u32, 'offset'),
(u32, 'count'),
(u32, 'size'),
(u16, 'type'),
(u16, 'unknown'),
]
_object_ = Locator
class BiffContents(parray.terminated):
__index__ = None
def __init__(self, **attrs):
if self.__index__ is None:
attrs.setdefault('__index__', [])
self.__cache__ = {}
return super(BiffContents, self).__init__(**attrs)
def _object_(self):
index = len(self.value)
item = self.__index__[index]
if item.id is not None:
self.__cache__[item.id] = index
t = IndexRecord.withdefault(item.type, ptype.block, type=item.type, length=item.size)
return t if t is ptype.block else dyn.clone(t, blocksize=lambda _, cb=item.size: cb)
def locate(self, locator):
idx = self.__cache__[locator]
return self[idx]
def isTerminator(self, item):
return len(self) >= len(self.__index__)
def enumerate(self):
for i, item in enumerate(self):
if item.type is not None:
yield i, item
continue
return
def iterate(self):
for _, item in self.enumerate():
yield item
return
def summary(self):
items = [item for item in self.iterate()]
if len(items):
first, last = items[0], items[-1]
types = {item.type for item in self.__index__ if item.type is not None}
return "({:d}) {:s}...{:s} types:{:s}".format(len(items), first.instance(), last.instance(), ','.join(map("{:d}".format, sorted(types))))
return "({:d}) ...".format(len(items))
@FileRecord.define
class BIFF(pstruct.type):
type = 'BIFF', (1, None)
def __items(self):
def closure(self, parent=self):
index = parent.__build_index__()
return dyn.clone(BiffContents, __index__=index)
offset, fields = self['offset'].li.int() - self.getoffset(), ['count(files)', 'count(tiles)', 'offset', 'unknown']
t = dyn.block(offset - sum(self[fld].li.size() for fld in fields))
return dyn.clone(ptype.encoded_t, _value_=t, _object_=closure)
def __build_index__(self):
files, tiles = (self[fld] for fld in ['index(file)', 'index(tile)'])
# figure out the boundaries of our data so that we can determine
# if a locator needs to be discarded.
baseoffset, size = self['items'].getoffset(), self['items'].size()
# combine both indices into a single list sorted by offset
items = [item for item in sorted(itertools.chain(files, tiles), key=fcompose(operator.itemgetter('offset'), operator.methodcaller('int')))]
# now we need to traverse our items so that we can build a flat
# list (with no holes) for each item and its type. if we find a
# hole that needs to be padded, then we use the none type.
index, position = [], baseoffset
for item in items:
offset = item['offset'].int()
# if our item offset is earlier than expected, then the
# previous index is overlapping with this one. so in this
# case, we'll warn the user and continue skipping records
# until we're back at a reasonable position.
if offset < position:
logging.warning("Item {!s} with bounds {:#x}{:+x} is {:s}".format(item.instance(), offset, item['size'].int(), "overlapping with previous entry {!s}".format(index[-1]) if len(index) else "not within expected bounds {:#x}:{:+x}".format(baseoffset, size)))
continue
# if our item offset is farther along than expected, then we
# need to pad this entry till we get to the right position.
shift = offset - position
if offset > position:
res = Index.Locator(id=None, type=None, count=None, size=offset - position)
index.append(res)
position += shift
# if our index item is a TileIndex, then figure out the count.
if isinstance(item, TileIndex.Locator):
count = item['count'].int()
# otherwise, it's a FileIndex, and there isn't a count
else:
count = None
# now we should be able to add the entry, and update position.
res = Index.Locator(id=item['locator'].int(), type=item['type'].int(), size=item['size'].int(), count=count)
index.append(res)
position += res.size
# check to see if we've filled up the entire block and pad it if necessary.
if position < baseoffset + size:
shift = position - baseoffset
res = Index.Locator(id=None, type=None, count=None, size=size - shift)
index.append(res)
# our index should now be contiguous
return index
_fields_ = [
(u32, 'count(files)'),
(u32, 'count(tiles)'),
(u32, 'offset'),
(u32, 'unknown'),
(__items, 'items'),
(lambda self: dyn.clone(FileIndex, length=self['count(files)'].li.int()), 'index(file)'),
(lambda self: dyn.clone(TileIndex, length=self['count(tiles)'].li.int()), 'index(tile)'),
]
def summary(self):
return "count(files)={:d} count(tiles)={:d} offset={:+#x} unknown={:#x}".format(*(self[fld].int() for fld in ['count(files)','count(tiles)','offset','unknown']))
## dialogue format
class DialogState(pstruct.type):
_fields_ = [
(strref, 'response'),
(u32, 'index'),
(u32, 'count'),
(s32, 'trigger'),
]
def summary(self):
return "index={:d} count={:d} trigger={:d} response={!s}".format(self['index'].int(), self['count'].int(), self['trigger'].int(), self['response'].summary())
class DialogTransition(pstruct.type):
@pbinary.littleendian
class _flags(pbinary.flags):
_fields_ = [
(21, 'unused'),
(1, 'clear'),
(1, 'immediateQ'),
(1, 'journal(solved)'),
(1, 'journal(note)'),
(1, 'journal(unsolved)'),
(1, 'interrupt'),
(1, 'journalQ'),
(1, 'sentinelQ'),
(1, 'actionQ'),
(1, 'triggerQ'),
(1, 'textQ'),
]
class _nextState(pstruct.type):
_fields_ = [
(resref, 'state'),
(u32, 'index'),
]
def summary(self):
return "index={!s} state={!s}".format(self['index'].summary(), self['state'].summary())
_fields_ = [
(_flags, 'flags'),
(strref, 'text'),
(strref, 'journal'),
(u32, 'trigger'),
(u32, 'action'),
(_nextState, 'next'),
]
#def summary(self):
# items = []
# if flags['textQ']:
# items.append(self['text'])
# if flags['journalQ']:
# items.append(self['journal'])
# if flags['triggerQ']:
# items.append(self['trigger'])
# if flags['actionQ']:
# items.append(self['action'])
# if not flags['sentinelQ']:
# items.append(self['node'])
# raise NotImplementedError
class DialogTrigger(ocStructure):
def __pointer_to_object__(self):
def closure(ptr, parent=self):
count = self['count'].li
return dyn.clone(pstr.string, length=count.int())
return dyn.rpointer(closure, self.getparent(File), u32)
def summary(self):
item = self['offset'].d
try:
return "offset={:#x} : {!s}".format(self['offset'].int(), item.li.summary())
except ptypes.error.LoadError:
pass
return super(DialogTrigger, self).summary()
def str(self):
item = self['offset'].d
return item.li.str()
class DialogAction(DialogTrigger):
pass
@FileRecord.define
class Dialog(pstruct.type):
type = 'DLG ', (1, 0)
class _trigger(ocStructure):
_object_ = DialogTrigger
def items(self):
result = self['offset'].d.li
return [item.str() for item in result]
def summary(self):
result = self['offset'].d.li
iterable = (item.str() for item in result)
return "({:d}) [{:s}]".format(len(result), ', '.join(map(repr, iterable)))
class _action(_trigger):
_object_ = DialogAction
_fields_ = [
(dyn.clone(coStructure, _object_=DialogState), 'state'),
(dyn.clone(coStructure, _object_=DialogTransition), 'transition'),
(_trigger, 'trigger(state)'),
(_trigger, 'trigger(transition)'),
(_action, 'action'),
(u32, 'hostile'),
]
def States(self):
return [item for item in self['trigger(state)'].items()]
def Transitions(self):
return [item for item in self['trigger(transition)'].items()]
def Actions(self):
return [item for item in self['action'].items()]
## file format
class Header(pstruct.type):
class _Version(String):
length = 4
def get(self):
string = self.str()
if not string.startswith('V'):
return super(Header._Version, self).get()
if string[1:].count('.') > 0:
items = string[1:].split('.', 1)
if not all(item.isnumeric() for item in items):
return super(Header._Version, self).get()
major, minor = items
return int(major), int(minor)
major = string[1:].rstrip(' \0')
return int(major), None
def set(self, version):
if not isinstance(version, builtins.tuple):
return super(Header._Version, self).set(version)
res = "V{:d}.{:d}".format(*version)
if len(res) != self.blocksize():
raise ValueError
return self.set(res)
_fields_ = [
(dyn.clone(String, length=4), 'Signature'),
(_Version, 'Version'),
]
def Signature(self):
return self['Signature'].str()
def summary(self):
sig, version = tuple(self[item].get() for item in ['Signature', 'Version'])
try:
t = FileRecord.lookup((sig, version))
description = "{:s} ({:s})".format(sig.rstrip(' \0'), t.__name__)
except KeyError:
description = sig.rstrip( '\0')
if isinstance(version, builtins.tuple):
filtered = (item for item in version if item is not None)
return "{:s} v{:s}".format(description, '.'.join(map("{:d}".format, filtered)))
return ' '.join(map("{!s}".format, [description, repr(version)]))
@IndexRecord.define(type=4)
@IndexRecord.define(type=1000)
@IndexRecord.define(type=1001)
@IndexRecord.define(type=1002)
@IndexRecord.define(type=1004)
@IndexRecord.define(type=1005)
@IndexRecord.define(type=1006)
@IndexRecord.define(type=1009)
@IndexRecord.define(type=1010)
@IndexRecord.define(type=1011)
@IndexRecord.define(type=1012)
@IndexRecord.define(type=1013)
@IndexRecord.define(type=1014)
@IndexRecord.define(type=1015)
@IndexRecord.define(type=1016)
@IndexRecord.define(type=1019)
@IndexRecord.define(type=1021)
class File(pstruct.type):
def __Contents(self):
res, bs = self['Header'].li, self.blocksize()
key = tuple(res[item].get() for item in ['Signature', 'Version'])
t = FileRecord.lookup(key, ptype.block)
if issubclass(t, (ptype.block, parray.block, pstr.string)):
return dyn.clone(t, blocksize=lambda _, cb=bs - res.size(): cb)
return t
def __Extra(self):
res, fields = self.blocksize(), ['Header', 'Contents']
total = sum(self[fld].li.size() for fld in fields)
return dyn.block(res - total)
_fields_ = [
(Header, 'Header'),
(__Contents, 'Contents'),
(__Extra, 'Extra'),
]
def blocksize(self):
return self.source.size()
def Signature(self):
return self['Header'].Signature()
if __name__ == '__main__':
import os, sys, os.path
import ptypes, games.infinity as infinity
ptypes.setsource(ptypes.prov.file(sys.argv[1], 'rb'))
z = infinity.File()
z = z.l
sig = z['header']['signature'].str()
if sig == 'GAME':
a = z['contents']
Players = z['contents']['PCs']['offset'].d.l
G = z['contents']['GVar']['offset'].d.l
cre = { index : item['cre']['offset'].d.l for index, item in enumerate(Players) }
iterable = ((index, item['contents']) for index, item in cre.items() )
I = { index : (item['items(slots)'].d.l, item['items']['offset'].d.l) for index, item in iterable }
def search(name, G=z['contents']['GVar']['offset'].d.li):
regex = re.compile(fnmatch.translate("*{:s}*".format(name)))
Fmatcher = fcompose(operator.itemgetter('name'), operator.methodcaller('str'), regex.match, bool)
return [item for item in filter(Fmatcher, G)]
bookvars = search('BOOK')
#for item in z['contents']['gvar']['offset'].d.l:
# print('\t'.join([item['name'].str(), "{:d}".format(item['int'].int())]))
elif sig == 'SAV ':
members = z['contents']
everything = [bytearray(item['data'].d.l.serialize()) for item in z['contents']]
files = [infinity.File(source=ptypes.prov.bytes(item)).l for item in everything]
L, = (item for i, item in enumerate(files) if item.Signature() == 'AREA' and item['contents']['wed'].str() == 'AR0109')
#L, = (item for i, item in enumerate(files) if item.Signature() == 'AREA' and item['contents']['wed'].str() == 'AR0108')
#for l in L['contents']:
# for item in l['variables']['offset'].d.l:
# print('\t'.join([item['name'].str(), "{:d}".format(item['int'].int())]))
actors = [item for item in L['contents']['actors']['offset'].d.l]
creatures = { item['name'].str() : item['CRE']['offset'].d.l for item in actors }
doors = L['contents']['doors']['offset'].d.l
def save():
for item, data in zip(z['contents'], everything):
zdata = zlib.compress(data)
item['length(data)'].set(len(zdata))
item['data'] = ptype.block().set(zdata)
z.setoffset(z.getoffset(), recurse=True)
elif sig == 'BIFF':
a = z['contents']['items'].d
a = a.l
|
the-stack_106_18914
|
# -*- coding: utf-8 -*-
'''
Configuration of the GNOME desktop
========================================
Control the GNOME settings
.. code-block:: yaml
localdesktop_wm_prefs:
gnomedesktop.wm_preferences:
- user: username
- audible_bell: false
- action_double_click_titlebar: 'toggle-maximize'
- visual_bell: true
- num_workspaces: 6
localdesktop_lockdown:
gnomedesktop.desktop_lockdown:
- user: username
- disable_user_switching: true
localdesktop_interface:
gnomedesktop.desktop_interface:
- user: username
- clock_show_date: true
- clock_show_format: 12h
'''
# Import python libs
import logging
import re
log = logging.getLogger(__name__)
def _check_current_value(gnome_kwargs, value):
'''
Check the current value with the passed value
'''
current_value = __salt__['gnome.get'](**gnome_kwargs)
return str(current_value) == str(value)
def _do(name, gnome_kwargs, preferences):
'''
worker function for the others to use
this handles all the gsetting magic
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
messages = []
for pref in preferences:
key = pref
value = preferences[pref]
if isinstance(value, bool):
ftype = 'boolean'
# need to convert boolean values to strings and make lowercase to
# pass to gsettings
value = str(value).lower()
elif isinstance(value, int):
ftype = 'int'
elif isinstance(value, str):
ftype = 'string'
else:
ftype = 'string'
gnome_kwargs.update({'key': key, 'value': value})
if _check_current_value(gnome_kwargs, value):
messages.append('{0} is already set to {1}'.format(key, value))
else:
result = __salt__['gnome.set'](**gnome_kwargs)
if result['retcode'] == 0:
messages.append('Setting {0} to {1}'.format(key, value))
ret['changes'][key] = '{0}:{1}'.format(key, value)
ret['result'] = True
else:
messages.append(result['stdout'])
ret['result'] = False
ret['comment'] = ', '.join(messages)
return ret
def wm_preferences(name,
user=None,
action_double_click_titlebar=None,
action_middle_click_titlebar=None,
action_right_click_titlebar=None,
application_based=None,
audible_bell=None,
auto_raise=None,
auto_raise_delay=None,
button_layout=None,
disable_workarounds=None,
focus_mode=None,
focus_new_windows=None,
mouse_button_modifier=None,
num_workspaces=None,
raise_on_click=None,
resize_with_right_button=None,
theme=None,
titlebar_font=None,
titlebar_uses_system_font=None,
visual_bell=None,
visual_bell_type=None,
workspace_names=None,
**kwargs):
'''
wm_preferences: sets values in the org.gnome.desktop.wm.preferences schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.wm.preferences'
}
preferences = ['action_double_click_titlebar',
'action_middle_click_titlebar', 'action_right_click_titlebar',
'application_based', 'audible_bell', 'auto_raise',
'auto_raise_delay', 'button_layout', 'disable_workarounds',
'focus_mode', 'focus_new_windows', 'mouse_button_modifier',
'num_workspaces', 'raise_on_click', 'resize_with_right_button',
'theme', 'titlebar_font', 'titlebar_uses_system_font',
'visual_bell', 'visual_bell_type', 'workspace_names']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash)
def desktop_lockdown(name,
user=None,
disable_application_handlers=None,
disable_command_line=None,
disable_lock_screen=None,
disable_log_out=None,
disable_print_setup=None,
disable_printing=None,
disable_save_to_disk=None,
disable_user_switching=None,
user_administration_disabled=None,
**kwargs):
'''
desktop_lockdown: sets values in the org.gnome.desktop.lockdown schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.lockdown'
}
preferences = ['disable_application_handlers', 'disable_command_line',
'disable_lock_screen', 'disable_log_out', 'disable_print_setup',
'disable_printing', 'disable_save_to_disk',
'disable_user_switching', 'user_administration_disabled']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash)
def desktop_interface(name,
user=None,
automatic_mnemonics=None,
buttons_have_icons=None,
can_change_accels=None,
clock_format=None,
clock_show_date=None,
clock_show_seconds=None,
cursor_blink=None,
cursor_blink_time=None,
cursor_blink_timeout=None,
cursor_size=None,
cursor_theme=None,
document_font_name=None,
enable_animations=None,
font_name=None,
gtk_color_palette=None,
gtk_color_scheme=None,
gtk_im_module=None,
gtk_im_preedit_style=None,
gtk_im_status_style=None,
gtk_key_theme=None,
gtk_theme=None,
gtk_timeout_initial=None,
gtk_timeout_repeat=None,
icon_theme=None,
menubar_accel=None,
menubar_detachable=None,
menus_have_icons=None,
menus_have_tearoff=None,
monospace_font_name=None,
show_input_method_menu=None,
show_unicode_menu=None,
text_scaling_factor=None,
toolbar_detachable=None,
toolbar_icons_size=None,
toolbar_style=None,
toolkit_accessibility=None,
**kwargs):
'''
desktop_interface: sets values in the org.gnome.desktop.interface schema
'''
gnome_kwargs = {
'user': user,
'schema': 'org.gnome.desktop.interface'
}
preferences = ['automatic_mnemonics', 'buttons_have_icons',
'can_change_accels', 'clock_format', 'clock_show_date',
'clock_show_seconds', 'cursor_blink', 'cursor_blink_time',
'cursor_blink_timeout', 'cursor_size', 'cursor_theme',
'document_font_name', 'enable_animations', 'font_name',
'gtk_color_palette', 'gtk_color_scheme', 'gtk_im_module',
'gtk_im_preedit_style', 'gtk_im_status_style', 'gtk_key_theme',
'gtk_theme', 'gtk_timeout_initial', 'gtk_timeout_repeat',
'icon_theme', 'menubar_accel', 'menubar_detachable',
'menus_have_icons', 'menus_have_tearoff', 'monospace_font_name',
'show_input_method_menu', 'show_unicode_menu',
'text_scaling_factor', 'toolbar_detachable', 'toolbar_icons_size',
'toolbar_style', 'toolkit_accessibility']
preferences_hash = {}
for pref in preferences:
if pref in locals() and locals()[pref] is not None:
key = re.sub('_', '-', pref)
preferences_hash[key] = locals()[pref]
return _do(name, gnome_kwargs, preferences_hash)
|
the-stack_106_18915
|
from bottle import route, run, request
import os
count = 0
HTML= """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>file uploader</title>
</head>
<body>
<form action="/upload" method="post" enctype="multipart/form-data">
Category: <input type="text" name="category" />
Select a file: <input type="file" name="upload" />
<input type="submit" value="Start upload" />
</form>
</body>
</html>
"""
@route('/', method='GET')
def index():
return HTML
@route('/count', method='GET')
def hello():
global count
count = count + 1
return "hello: %d" %(count)
@route('/upload', method='POST')
def do_upload():
category = request.forms.get('category')
upload = request.files.get('upload')
name, ext = os.path.splitext(upload.filename)
save_path = os.path.dirname(os.path.realpath(__file__))
save_file = os.path.join(save_path, category+".png")
upload.save(save_file) # appends upload.filename automatically
return 'OK'
run(host="0.0.0.0", debug=True, reloader=True)
|
the-stack_106_18916
|
"""
This example fits model to OGLE-2003-BLG-235/MOA-2003-BLG-53,
the first microlensing planet. Here we fix *s* and *q* parameters for
the sake of simplicity. Wide range of other binary lens parameters is explored.
Note that it would be beneficial to turn *x_caustic_in* and *x_caustic_out*
to periodic variables.
Specific settings are in file example_13.cfg.
Running this example takes 15-60 minutes on most modern machines.
"""
import os
import sys
import numpy as np
import emcee
import configparser
import MulensModel as mm
import example_15_read as read
def ln_like(theta, event, parameters_to_fit, print_models):
"""
Likelihood function. The values of *parameters_to_fit* are in *theta*.
MulensModel Event class instance *event* gives event for which
calculations will be done. Boolean *print_models* controls if
all models are printed.
"""
for (theta_, param) in zip(theta, parameters_to_fit):
setattr(event.model.parameters, param, theta_)
chi2 = event.get_chi2()
if print_models:
if 'x_caustic_in' not in parameters_to_fit:
print(chi2, *[t for t in theta], flush=True)
else:
theta_ = theta.tolist()
keys = ['t_0', 'u_0', 't_E', 'alpha']
theta_ += [getattr(event.model.parameters, key) for key in keys]
print(chi2, *theta_, flush=True)
return -0.5 * chi2
def ln_prior(theta, parameters_to_fit, event):
"""
Prior. Check if *theta* values for *parameters_to_fit* are within ranges
defined by *ln_prior.min* and *ln_prior.max*.
"""
outside = -np.inf
for (parameter, value) in ln_prior.min.items():
index = parameters_to_fit.index(parameter)
if theta[index] < value:
return outside
for (parameter, value) in ln_prior.max.items():
index = parameters_to_fit.index(parameter)
if theta[index] > value:
return outside
# Below we calculate prior probability based on x_caustic_in and x_caustic_out.
# This calculation assumes flat prior in (t_0, u_0, t_E, alpha), not in
# (x_caustic_in, x_caustic_out, t_caustic_in, t_caustic_out). If you want flat
# prior in the latter, then just replace following lines by "return 0".
inside = event.model.parameters.uniform_caustic_sampling.jacobian(
x_caustic_in=theta[parameters_to_fit.index('x_caustic_in')],
x_caustic_out=theta[parameters_to_fit.index('x_caustic_out')])
if inside == 0.:
return outside
else:
return np.log(inside)
def ln_prob(
theta, event, parameters_to_fit, print_models=False):
"""
Log probability of the model - combines ln_prior() and ln_like().
"""
ln_prior_ = ln_prior(theta, parameters_to_fit, event)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit, print_models)
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
def generate_random_parameters(parameters, starting, n, s=None, q=None):
"""
Generate *n* vectors of values of *parameters* according to distributions
specified in *starting*.
"""
values = []
for param in parameters:
settings = starting[param]
if settings[0] == 'gauss':
v = settings[2] * np.random.randn(n)
v += settings[1]
elif settings[0] == 'uniform':
v = np.random.uniform(
low=settings[1], high=settings[2], size=n)
elif settings[0] == 'log-uniform':
beg = np.log(settings[1])
end = np.log(settings[2])
v = np.exp(np.random.uniform(beg, end, n))
values.append(v)
if 'x_caustic_in' in parameters and 'x_caustic_out' in parameters:
sampling = mm.UniformCausticSampling(s=s, q=q)
(x_in, x_out) = sampling.get_uniform_sampling(n)
values[parameters.index('x_caustic_in')] = x_in
values[parameters.index('x_caustic_out')] = x_out
return np.array(values).T.tolist()
# Read config file.
config_file = "example_13.cfg"
config = configparser.ConfigParser()
config.optionxform = str # So that "t_E" is not changed to "t_e".
config.read(config_file)
files = read.read_files_from_config(config)
model_settings = read.read_model_settings(config)
(parameters, starting) = read.read_parameters_start(config)
fixed_parameters = read.read_fix_parameters(config)
(min_values, max_values) = read.read_min_max(config)
ln_prior.min = min_values
ln_prior.max = max_values
emcee_settings = read.read_emcee_settings(config)
other_settings = read.read_other(config)
# Read photometric data.
k = {'comments': ['\\', '|']}
datasets = [mm.MulensData(file_name=f[0], phot_fmt=f[1], **k) for f in files]
# Generate starting values of parameters.
s = fixed_parameters.get('s', None)
q = fixed_parameters.get('q', None)
start = generate_random_parameters(
parameters, starting, emcee_settings['n_walkers'], s=s, q=q)
# Setup Event instance that combines model and data.
par = dict(zip(parameters, start[0]))
par = {**par, **fixed_parameters}
my_model = mm.Model(par, coords=model_settings['coords'])
if 'methods' in model_settings:
my_model.set_magnification_methods(model_settings['methods'])
if 'default_method' in model_settings:
my_model.set_default_magnification_method(model_settings['default_method'])
my_event = mm.Event(datasets=datasets, model=my_model)
# Prepare sampler.
n_dim = len(parameters)
print_models = other_settings.get('print_models', False)
args = (my_event, parameters, print_models)
sampler = emcee.EnsembleSampler(
emcee_settings['n_walkers'], n_dim, ln_prob, args=args)
# Run sampler.
sampler.run_mcmc(start, emcee_settings['n_steps'])
# Parse results.
n_burn = emcee_settings['n_burn']
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
r_16 = np.percentile(samples, 16, axis=0)
r_50 = np.percentile(samples, 50, axis=0)
r_84 = np.percentile(samples, 84, axis=0)
print("\nFitted parameters:")
for i in range(n_dim):
if parameters[i] == 'q':
fmt = "{:} {:.7f} +{:.7f} -{:.7f}"
else:
fmt = "{:} {:.5f} +{:.5f} -{:.5f}"
print(fmt.format(parameters[i], r_50[i], r_84[i]-r_50[i], r_50[i]-r_16[i]))
# We extract best model parameters and chi2 from the chain:
prob = sampler.lnprobability[:, n_burn:].reshape((-1))
best_index = np.argmax(prob)
best_chi2 = prob[best_index] / -0.5
best = samples[best_index, :]
print("\nSmallest chi2 model:")
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print(best_chi2)
for (i, parameter) in enumerate(parameters):
setattr(my_event.model.parameters, parameter, best[i])
my_event.fit_fluxes()
# Expected results:
# t_0 ~ 2452848.06
# u_0 ~ 0.132
# t_E ~ 61.5
# alpha ~ 223.7
# It is possible that degenerate solution is found and then u_0 ~ -0.132 and
# alpha ~ 136.3
# You can inspect the output file and search for models similar to the above.
# The first one should appear withing first 600 models calculated.
if 'x_caustic_in' in parameters:
print(' t_0 = {:.5f}'.format(my_event.model.parameters.t_0))
print(' u_0 = {:.5f}'.format(my_event.model.parameters.u_0))
print(' t_E = {:.3f}'.format(my_event.model.parameters.t_E))
print(' alpha = {:.2f}\n'.format(my_event.model.parameters.alpha))
print("chi2: ", my_event.get_chi2()) # Expected value: ~1655
|
the-stack_106_18917
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The RLD developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "rld.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
rldd and rld-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run rldd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "rldd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "rld-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in rld.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a rldd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "rldd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "rld-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple rldds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
the-stack_106_18920
|
#!/usr/bin/env python
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2015
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Laboratory(RAL)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENCE.TXT if applicable for licence details
# ** 2015/02/02 20:17:38
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
import sys,os
import sys_path
import rctm_path
import obs
import time
import tim
proc_script = "run_proc.py"
exec_cmd = "fec"
Site_list_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/site_list")
Cdl_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/cdl")
Params_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/params")
input_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "rc_rec_tmt")
site_list = "%s/%s" % (Site_list_dir, "road_cond_sites.asc")
cdl_file = "%s/%s" % (Cdl_dir, "road_cond.cdl")
nbr_file = "%s/%s" % (Site_list_dir, "pp_nbr.nc")
age = "86400 86400"
obs_dir = "None"
concat_meso_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "concat_meso")
output_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "fec_rc_rec_tmt")
log_base = "%s/%s" % (rctm_path.Log_dir, "fec_rc_rec_tmt")
params_file = "%s/%s" % (Params_dir, "fec.rc.params")
try:
date_time = os.environ['TEST_MODE_TIME']
date_tup = time.strptime("%s" % (date_time),"%Y%m%d.%H%M%S")
fcst_time = tim.mkgmtime(date_tup)
date_str = "-d %s" % (date_time)
except:
fcst_time = time.time()
date_tup = time.gmtime(fcst_time)
date_time = time.strftime("%Y%m%d.%H%M%S", date_tup)
date_str = ''
fcst_time = (int(fcst_time)/3600) * 3600
static_files = "%s %s %s" % (site_list, nbr_file, cdl_file)
concat_meso_file = obs.get_concat_meso_file(concat_meso_dir, date_time)
if(os.path.exists(concat_meso_file)):
concat_meso_str = "-f -c %s" % concat_meso_file
else:
concat_meso_str = ""
command = "%s %s -e %s -a %s -u %s %s -s %s -i %s %s -o %s -p %s -l %s %s" % (proc_script, date_str, exec_cmd, age, fcst_time, fcst_time, static_files, obs_dir, input_dir, output_dir, params_file, log_base, concat_meso_str)
#print "command = ", command
ret = os.system(command)
#ret = 0
if (ret != 0):
sys.exit(1)
else:
sys.exit(0)
|
the-stack_106_18923
|
import itertools
import os
from collections import OrderedDict
import numpy as np
import six
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import device_setter
from tensorflow.python.util import nest
from video_prediction.utils import ffmpeg_gif
from video_prediction.utils import gif_summary
IMAGE_SUMMARIES = "image_summaries"
EVAL_SUMMARIES = "eval_summaries"
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops == None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string(
'/{}:{}'.format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
def replace_read_ops(loss_or_losses, var_list):
"""
Replaces read ops of each variable in `vars` with new read ops obtained
from `read_value()`, thus forcing to read the most up-to-date values of
the variables (which might incur copies across devices).
The graph is seeded from the tensor(s) `loss_or_losses`.
"""
# ops between var ops and the loss
ops = set(ge.get_walks_intersection_ops([var.op for var in var_list], loss_or_losses))
if not ops: # loss_or_losses doesn't depend on any var in var_list, so there is nothiing to replace
return
# filter out variables that are not involved in computing the loss
var_list = [var for var in var_list if var.op in ops]
for var in var_list:
output, = var.op.outputs
read_ops = set(output.consumers()) & ops
for read_op in read_ops:
with tf.name_scope('/'.join(read_op.name.split('/')[:-1])):
with tf.device(read_op.device):
read_t, = read_op.outputs
consumer_ops = set(read_t.consumers()) & ops
# consumer_sgv might have multiple inputs, but we only care
# about replacing the input that is read_t
consumer_sgv = ge.sgv(consumer_ops)
consumer_sgv = consumer_sgv.remap_inputs([list(consumer_sgv.inputs).index(read_t)])
ge.connect(ge.sgv(var.read_value().op), consumer_sgv)
def print_loss_info(losses, *tensors):
def get_descendants(tensor, tensors):
descendants = []
for child in tensor.op.inputs:
if child in tensors:
descendants.append(child)
else:
descendants.extend(get_descendants(child, tensors))
return descendants
name_to_tensors = itertools.chain(*[tensor.items() for tensor in tensors])
tensor_to_names = OrderedDict([(v, k) for k, v in name_to_tensors])
print(tf.get_default_graph().get_name_scope())
for name, (loss, weight) in losses.items():
print(' %s (%r)' % (name, weight))
descendant_names = []
for descendant in set(get_descendants(loss, tensor_to_names.keys())):
descendant_names.append(tensor_to_names[descendant])
for descendant_name in sorted(descendant_names):
print(' %s' % descendant_name)
def with_flat_batch(flat_batch_fn, ndims=4):
def fn(x, *args, **kwargs):
shape = tf.shape(x)
flat_batch_shape = tf.concat([[-1], shape[-(ndims-1):]], axis=0)
flat_batch_shape.set_shape([ndims])
flat_batch_x = tf.reshape(x, flat_batch_shape)
flat_batch_r = flat_batch_fn(flat_batch_x, *args, **kwargs)
r = nest.map_structure(lambda x: tf.reshape(x, tf.concat([shape[:-(ndims-1)], tf.shape(x)[1:]], axis=0)),
flat_batch_r)
return r
return fn
def transpose_batch_time(x):
if isinstance(x, tf.Tensor) and x.shape.ndims >= 2:
return tf.transpose(x, [1, 0] + list(range(2, x.shape.ndims)))
else:
return x
def dimension(inputs, axis=0):
shapes = [input_.shape for input_ in nest.flatten(inputs)]
s = tf.TensorShape([None])
for shape in shapes:
s = s.merge_with(shape[axis:axis + 1])
dim = s[0].value
return dim
def unroll_rnn(cell, inputs, scope=None, use_dynamic_rnn=True):
"""Chooses between dynamic_rnn and static_rnn if the leading time dimension is dynamic or not."""
dim = dimension(inputs, axis=0)
if use_dynamic_rnn or dim is None:
return tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32,
swap_memory=False, time_major=True, scope=scope)
else:
return static_rnn(cell, inputs, scope=scope)
def static_rnn(cell, inputs, scope=None):
"""Simple version of static_rnn."""
with tf.variable_scope(scope or "rnn") as varscope:
batch_size = dimension(inputs, axis=1)
state = cell.zero_state(batch_size, tf.float32)
flat_inputs = nest.flatten(inputs)
flat_inputs = list(zip(*[tf.unstack(flat_input, axis=0) for flat_input in flat_inputs]))
flat_outputs = []
for time, flat_input in enumerate(flat_inputs):
if time > 0:
varscope.reuse_variables()
input_ = nest.pack_sequence_as(inputs, flat_input)
output, state = cell(input_, state)
flat_output = nest.flatten(output)
flat_outputs.append(flat_output)
flat_outputs = [tf.stack(flat_output, axis=0) for flat_output in zip(*flat_outputs)]
outputs = nest.pack_sequence_as(output, flat_outputs)
return outputs, state
def maybe_pad_or_slice(tensor, desired_length):
length = tensor.shape.as_list()[0]
if length < desired_length:
paddings = [[0, desired_length - length]] + [[0, 0]] * (tensor.shape.ndims - 1)
tensor = tf.pad(tensor, paddings)
elif length > desired_length:
tensor = tensor[:desired_length]
assert tensor.shape.as_list()[0] == desired_length
return tensor
def tensor_to_clip(tensor):
if tensor.shape.ndims == 6:
# concatenate last dimension vertically
tensor = tf.concat(tf.unstack(tensor, axis=-1), axis=-3)
if tensor.shape.ndims == 5:
# concatenate batch dimension horizontally
tensor = tf.concat(tf.unstack(tensor, axis=0), axis=2)
if tensor.shape.ndims == 4:
# keep up to the first 3 channels
tensor = tf.image.convert_image_dtype(tensor, dtype=tf.uint8, saturate=True)
else:
raise NotImplementedError
return tensor
def tensor_to_image_batch(tensor):
if tensor.shape.ndims == 6:
# concatenate last dimension vertically
tensor= tf.concat(tf.unstack(tensor, axis=-1), axis=-3)
if tensor.shape.ndims == 5:
# concatenate time dimension horizontally
tensor = tf.concat(tf.unstack(tensor, axis=1), axis=2)
if tensor.shape.ndims == 4:
# keep up to the first 3 channels
tensor = tf.image.convert_image_dtype(tensor, dtype=tf.uint8, saturate=True)
else:
raise NotImplementedError
return tensor
def _as_name_scope_map(values):
name_scope_to_values = {}
for name, value in values.items():
name_scope = name.split('/')[0]
name_scope_to_values.setdefault(name_scope, {})
name_scope_to_values[name_scope][name] = value
return name_scope_to_values
def add_image_summaries(outputs, max_outputs=8, collections=None):
if collections is None:
collections = [tf.GraphKeys.SUMMARIES, IMAGE_SUMMARIES]
for name_scope, outputs in _as_name_scope_map(outputs).items():
with tf.name_scope(name_scope):
for name, output in outputs.items():
if max_outputs:
output = output[:max_outputs]
output = tensor_to_image_batch(output)
if output.shape[-1] not in (1, 3):
# these are feature maps, so just skip them
continue
tf.summary.image(name, output, collections=collections)
def add_gif_summaries(outputs, max_outputs=8, collections=None):
if collections is None:
collections = [tf.GraphKeys.SUMMARIES, IMAGE_SUMMARIES]
for name_scope, outputs in _as_name_scope_map(outputs).items():
with tf.name_scope(name_scope):
for name, output in outputs.items():
if max_outputs:
output = output[:max_outputs]
output = tensor_to_clip(output)
if output.shape[-1] not in (1, 3):
# these are feature maps, so just skip them
continue
gif_summary.gif_summary(name, output[None], fps=4, collections=collections)
def add_scalar_summaries(losses_or_metrics, collections=None):
for name_scope, losses_or_metrics in _as_name_scope_map(losses_or_metrics).items():
with tf.name_scope(name_scope):
for name, loss_or_metric in losses_or_metrics.items():
if isinstance(loss_or_metric, tuple):
loss_or_metric, _ = loss_or_metric
tf.summary.scalar(name, loss_or_metric, collections=collections)
def add_summaries(outputs, collections=None):
scalar_outputs = OrderedDict()
image_outputs = OrderedDict()
gif_outputs = OrderedDict()
for name, output in outputs.items():
if not isinstance(output, tf.Tensor):
continue
if output.shape.ndims == 0:
scalar_outputs[name] = output
elif output.shape.ndims == 4:
image_outputs[name] = output
elif output.shape.ndims > 4 and output.shape[4].value in (1, 3):
gif_outputs[name] = output
add_scalar_summaries(scalar_outputs, collections=collections)
add_image_summaries(image_outputs, collections=collections)
add_gif_summaries(gif_outputs, collections=collections)
def plot_buf(y):
def _plot_buf(y):
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import io
fig = Figure(figsize=(3, 3))
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.plot(y)
ax.grid(axis='y')
fig.tight_layout(pad=0)
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return buf.getvalue()
s = tf.py_func(_plot_buf, [y], tf.string)
return s
def add_plot_image_summaries(metrics, collections=None):
if collections is None:
collections = [IMAGE_SUMMARIES]
for name_scope, metrics in _as_name_scope_map(metrics).items():
with tf.name_scope(name_scope):
for name, metric in metrics.items():
try:
buf = plot_buf(metric)
except:
continue
image = tf.image.decode_png(buf, channels=4)
image = tf.expand_dims(image, axis=0)
tf.summary.image(name, image, max_outputs=1, collections=collections)
def plot_summary(name, x, y, display_name=None, description=None, collections=None):
"""
Hack that uses pr_curve summaries for 2D plots.
Args:
x: 1-D tensor with values in increasing order.
y: 1-D tensor with static shape.
Note: tensorboard needs to be modified and compiled from source to disable
default axis range [-0.05, 1.05].
"""
from tensorboard import summary as summary_lib
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
with tf.control_dependencies([
tf.assert_equal(tf.shape(x), tf.shape(y)),
tf.assert_equal(y.shape.ndims, 1),
]):
y = tf.identity(y)
num_thresholds = y.shape[0].value
if num_thresholds is None:
raise ValueError('Size of y needs to be statically defined for num_thresholds argument')
summary = summary_lib.pr_curve_raw_data_op(
name,
true_positive_counts=tf.ones(num_thresholds),
false_positive_counts=tf.ones(num_thresholds),
true_negative_counts=tf.ones(num_thresholds),
false_negative_counts=tf.ones(num_thresholds),
precision=y[::-1],
recall=x[::-1],
num_thresholds=num_thresholds,
display_name=display_name,
description=description,
collections=collections)
return summary
def add_plot_summaries(metrics, x_offset=0, collections=None):
for name_scope, metrics in _as_name_scope_map(metrics).items():
with tf.name_scope(name_scope):
for name, metric in metrics.items():
plot_summary(name, x_offset + tf.range(tf.shape(metric)[0]), metric, collections=collections)
def add_plot_and_scalar_summaries(metrics, x_offset=0, collections=None):
for name_scope, metrics in _as_name_scope_map(metrics).items():
with tf.name_scope(name_scope):
for name, metric in metrics.items():
tf.summary.scalar(name, tf.reduce_mean(metric), collections=collections)
plot_summary(name, x_offset + tf.range(tf.shape(metric)[0]), metric, collections=collections)
def convert_tensor_to_gif_summary(summ):
if isinstance(summ, bytes):
summary_proto = tf.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
summary = tf.Summary()
for value in summ.value:
tag = value.tag
try:
images_arr = tf.make_ndarray(value.tensor)
except TypeError:
summary.value.add(tag=tag, image=value.image)
continue
if len(images_arr.shape) == 5:
images_arr = np.concatenate(list(images_arr), axis=-2)
if len(images_arr.shape) != 4:
raise ValueError('Tensors must be 4-D or 5-D for gif summary.')
channels = images_arr.shape[-1]
if channels < 1 or channels > 4:
raise ValueError('Tensors must have 1, 2, 3, or 4 color channels for gif summary.')
encoded_image_string = ffmpeg_gif.encode_gif(images_arr, fps=4)
image = tf.Summary.Image()
image.height = images_arr.shape[-3]
image.width = images_arr.shape[-2]
image.colorspace = channels # 1: grayscale, 2: grayscale + alpha, 3: RGB, 4: RGBA
image.encoded_image_string = encoded_image_string
summary.value.add(tag=tag, image=image)
return summary
def compute_averaged_gradients(opt, tower_loss, **kwargs):
tower_gradvars = []
for loss in tower_loss:
with tf.device(loss.device):
gradvars = opt.compute_gradients(loss, **kwargs)
tower_gradvars.append(gradvars)
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in all_grads.items():
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
return gradvars
# the next 3 function are from tensorpack:
# https://github.com/tensorpack/tensorpack/blob/master/tensorpack/graph_builder/utils.py
def split_grad_list(grad_list):
"""
Args:
grad_list: K x N x 2
Returns:
K x N: gradients
K x N: variables
"""
g = []
v = []
for tower in grad_list:
g.append([x[0] for x in tower])
v.append([x[1] for x in tower])
return g, v
def merge_grad_list(all_grads, all_vars):
"""
Args:
all_grads (K x N): gradients
all_vars(K x N): variables
Return:
K x N x 2: list of list of (grad, var) pairs
"""
return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
def allreduce_grads(all_grads, average):
"""
All-reduce average the gradients among K devices. Results are broadcasted to all devices.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
average (bool): average gradients or not.
Returns:
K x N: same as input, but each grad is replaced by the average over K devices.
"""
from tensorflow.contrib import nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = [] # N x K
for grads in zip(*all_grads):
summed = nccl.all_sum(grads)
grads_for_devices = [] # K
for g in summed:
with tf.device(g.device):
# tensorflow/benchmarks didn't average gradients
if average:
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_devices.append(g)
new_all_grads.append(grads_for_devices)
# transpose to K x N
ret = list(zip(*new_all_grads))
return ret
def _reduce_entries(*entries):
num_gpus = len(entries)
if entries[0] is None:
assert all(entry is None for entry in entries[1:])
reduced_entry = None
elif isinstance(entries[0], tf.Tensor):
if entries[0].shape.ndims == 0:
reduced_entry = tf.add_n(entries) / tf.to_float(num_gpus)
else:
reduced_entry = tf.concat(entries, axis=0)
elif np.isscalar(entries[0]) or isinstance(entries[0], np.ndarray):
if np.isscalar(entries[0]) or entries[0].ndim == 0:
reduced_entry = sum(entries) / float(num_gpus)
else:
reduced_entry = np.concatenate(entries, axis=0)
elif isinstance(entries[0], tuple) and len(entries[0]) == 2:
losses, weights = zip(*entries)
loss = tf.add_n(losses) / tf.to_float(num_gpus)
if isinstance(weights[0], tf.Tensor):
with tf.control_dependencies([tf.assert_equal(weight, weights[0]) for weight in weights[1:]]):
weight = tf.identity(weights[0])
else:
assert all(weight == weights[0] for weight in weights[1:])
weight = weights[0]
reduced_entry = (loss, weight)
else:
raise NotImplementedError
return reduced_entry
def reduce_tensors(structures, shallow=False):
if len(structures) == 1:
reduced_structure = structures[0]
else:
if shallow:
if isinstance(structures[0], dict):
shallow_tree = type(structures[0])([(k, None) for k in structures[0]])
else:
shallow_tree = type(structures[0])([None for _ in structures[0]])
reduced_structure = nest.map_structure_up_to(shallow_tree, _reduce_entries, *structures)
else:
reduced_structure = nest.map_structure(_reduce_entries, *structures)
return reduced_structure
def get_checkpoint_restore_saver(checkpoint, var_list=None, skip_global_step=False, restore_to_checkpoint_mapping=None):
if os.path.isdir(checkpoint):
# latest_checkpoint doesn't work when the path has special characters
checkpoint = tf.train.latest_checkpoint(checkpoint)
checkpoint_reader = tf.pywrap_tensorflow.NewCheckpointReader(checkpoint)
checkpoint_var_names = checkpoint_reader.get_variable_to_shape_map().keys()
restore_to_checkpoint_mapping = restore_to_checkpoint_mapping or (lambda name, _: name.split(':')[0])
if not var_list:
var_list = tf.global_variables()
restore_vars = {restore_to_checkpoint_mapping(var.name, checkpoint_var_names): var for var in var_list}
if skip_global_step and 'global_step' in restore_vars:
del restore_vars['global_step']
# restore variables that are both in the global graph and in the checkpoint
restore_and_checkpoint_vars = {name: var for name, var in restore_vars.items() if name in checkpoint_var_names}
restore_saver = tf.train.Saver(max_to_keep=1, var_list=restore_and_checkpoint_vars, filename=checkpoint)
# print out information regarding variables that were not restored or used for restoring
restore_not_in_checkpoint_vars = {name: var for name, var in restore_vars.items() if
name not in checkpoint_var_names}
checkpoint_not_in_restore_var_names = [name for name in checkpoint_var_names if name not in restore_vars]
if skip_global_step and 'global_step' in checkpoint_not_in_restore_var_names:
checkpoint_not_in_restore_var_names.remove('global_step')
if restore_not_in_checkpoint_vars:
print("global variables that were not restored because they are "
"not in the checkpoint:")
for name, _ in sorted(restore_not_in_checkpoint_vars.items()):
print(" ", name)
if checkpoint_not_in_restore_var_names:
print("checkpoint variables that were not used for restoring "
"because they are not in the graph:")
for name in sorted(checkpoint_not_in_restore_var_names):
print(" ", name)
return restore_saver, checkpoint
def pixel_distribution(pos, height, width):
batch_size = pos.get_shape().as_list()[0]
y, x = tf.unstack(pos, 2, axis=1)
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
Ia = tf.reshape(tf.one_hot(y0 * width + x0, height * width), [batch_size, height, width])
Ib = tf.reshape(tf.one_hot(y1 * width + x0, height * width), [batch_size, height, width])
Ic = tf.reshape(tf.one_hot(y0 * width + x1, height * width), [batch_size, height, width])
Id = tf.reshape(tf.one_hot(y1 * width + x1, height * width), [batch_size, height, width])
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = ((x1_f - x) * (y1_f - y))[:, None, None]
wb = ((x1_f - x) * (y - y0_f))[:, None, None]
wc = ((x - x0_f) * (y1_f - y))[:, None, None]
wd = ((x - x0_f) * (y - y0_f))[:, None, None]
return tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
def flow_to_rgb(flows):
"""The last axis should have dimension 2, for x and y values."""
def cartesian_to_polar(x, y):
magnitude = tf.sqrt(tf.square(x) + tf.square(y))
angle = tf.atan2(y, x)
return magnitude, angle
mag, ang = cartesian_to_polar(*tf.unstack(flows, axis=-1))
ang_normalized = (ang + np.pi) / (2 * np.pi)
mag_min = tf.reduce_min(mag)
mag_max = tf.reduce_max(mag)
mag_normalized = (mag - mag_min) / (mag_max - mag_min)
hsv = tf.stack([ang_normalized, tf.ones_like(ang), mag_normalized], axis=-1)
rgb = tf.image.hsv_to_rgb(hsv)
return rgb
|
the-stack_106_18924
|
"""
Defines the object class that uses a Kepler PRF model to compute apertures and its
metrics
"""
import os
import warnings
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib import patches
from astropy.io import fits
from . import PACKAGEDIR, DATAOUTDIR
from .utils import _make_A_polar
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=sparse.SparseEfficiencyWarning)
class KeplerPRF(object):
"""
Class to load PRF models computed from FFI, to create photometric apertures
"""
def __init__(
self,
prf_ws: np.array,
n_r_knots: int = 5,
n_phi_knots: int = 15,
rmin: float = 0.25,
rmax: float = 5,
):
"""
A KeplerPRF object is build by providing the hyperparameters of the spline
model, and the weights of each basis spline. The hyperparameters allow to
reconstruct the same basis splines while the weights are used at evaluation of
the model in new data.
Parameters
__________
prf_ws : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
Attributes
----------
prf_w : numpy.ndarray
Weights corresponding to each basis of the design matrix.
rmin : float
The minimum radius for the PRF model to be fit.
rmax : float
The maximum radius for the PRF model to be fit.
n_r_knots : int
Number of radial knots in the spline model.
n_phi_knots : int
Number of azimuthal knots in the spline model.
"""
self.prf_ws = prf_ws
self.rmin = rmin
self.rmax = rmax
self.n_r_knots = n_r_knots
self.n_phi_knots = n_phi_knots
@staticmethod
def load_from_file(
quarter: int = 5,
channel: int = 1,
):
"""
Loads a PRF model build from Kepler's FFI for a given quarter and channel.
Note: the file with the PRF models is csv file with a multiindex pandas
DataFrame, the FITS version is in development.
Parameters
----------
channel : int
Channel number of the FFI to be used to model the PRF. Valid values are
between 1 and 84.
quarter : int
Number of the quarter that will be used to model the PRF.
Valid values are between 1 and 17.
Returns
-------
KeplerPRF : KeplerPRF
An object with the PRF model ready to be evaluated in new data.
"""
# load PSF model
fname = "%s/data/ffi_prf_models_v0.1.0.csv" % (PACKAGEDIR)
if not os.path.isfile(fname):
raise FileNotFoundError("No PSF files: ", fname)
try:
tab = pd.read_csv(fname, index_col=0, header=[0, 1])
n_r_knots = int(tab.loc[channel, (str(quarter), "n_r_knots")])
n_phi_knots = int(tab.loc[channel, (str(quarter), "n_phi_knots")])
rmin = int(tab.loc[channel, (str(quarter), "rmin")])
rmax = int(tab.loc[channel, (str(quarter), "rmax")])
prf_ws = tab.loc[channel, str(quarter)].iloc[4:].values
except KeyError:
raise IOError(
"Quarter %i and channel %i has no PRF model data" % (quarter, channel)
)
return KeplerPRF(prf_ws, n_r_knots, n_phi_knots, rmin, rmax)
def evaluate_PSF(self, dx, dy):
"""
Function to evaluate the PRF model in a grid of data. THe function returns
a the prediction of the model as normalized flux. The model is evaluated in
pixels up to r < 7 from the location of the source.
Parameters
----------
dx : numpy.ndarray
Distance between pixels (row direction) and source coordinates.
dx : numpy.ndarray
Distance between pixels (column direction) and source coordinates.
Returns
-------
source_model: scipy.sparse.csr_matrix
Normalized fluxvalues of the PRF model evaluation in the dx, dy grid
"""
r = np.hypot(dx, dy)
phi = np.arctan2(dy, dx)
source_mask = r <= np.floor(self.rmax)
phi[phi >= np.pi] = np.pi - 1e-6
try:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=self.rmin,
rmax=self.rmax,
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
except ValueError:
dm = _make_A_polar(
phi[source_mask].ravel(),
r[source_mask].ravel(),
rmin=np.percentile(r[source_mask].ravel(), 1),
rmax=np.percentile(r[source_mask].ravel(), 99),
n_r_knots=self.n_r_knots,
n_phi_knots=self.n_phi_knots,
)
source_model = sparse.csr_matrix(r.shape)
m = 10 ** dm.dot(self.prf_ws)
source_model[source_mask] = m
source_model.eliminate_zeros()
# psf_models = source_model.multiply(1 / source_model.sum(axis=1)).tocsr()
return source_model
def diagnose_metrics(self, psf_models, idx=0, ax=None, plot=True):
"""
Function to evaluate the flux metrics for a single source as a function of
the parameter that controls the aperture size.
The flux metrics are computed by taking into account the PSF models of
neighbor sources.
This function is meant to be used only to generate the diagnostic or as a
helping function of `optimize_aperture()` to precalculate the values of the
metrics and find the optimal aperture in case of isolated sources, where the
optimal is the full aperture.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0].
ax : matplotlib.axes
Axis to be used to plot the figure
plot : boolean
Plot the metrics values.
Returns
-------
ax : matplotlib.axes
Figure axes
"""
compl, crowd, cut = [], [], []
for p in range(0, 101, 1):
cut.append(p)
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, p)
).toarray()[0]
crowd.append(self.compute_CROWDSAP(psf_models, mask, idx))
compl.append(self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask))
self.compl = np.array(compl)
self.crowd = np.array(crowd)
self.cut = np.array(cut)
if plot:
if ax is None:
fig, ax = plt.subplots(1)
ax.plot(self.cut, self.compl, label=r"FLFRCSAP")
ax.plot(self.cut, self.crowd, label=r"CROWDSAP")
ax.set_xlabel("Percentile")
ax.set_ylabel("Metric")
ax.legend()
return ax
def create_aperture_mask(self, psf_models, percentile=0, idx=None):
"""
Function to create the aperture mask of a given source for a given aperture
size. This function can compute aperutre mask for one or all sources available
in the psf_models
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
percentile : float
Percentile value that defines the isophote from the distribution of values
in the psf model of the source
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : numpy.ndarray
Flux metric indicating flux completeness for the selected aperture.
crowdeness : numpy.ndarray
Flux metric indicating flux contamination for the selected aperture.
"""
if idx is not None:
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd
else:
masks, completeness, crowdeness = [], [], []
for idx in range(psf_models.shape[0]):
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, percentile)
).toarray()[0]
masks.append(mask)
completeness.append(
self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
)
crowdeness.append(self.compute_CROWDSAP(psf_models, mask, idx))
return np.array(masks), np.array(completeness), np.array(crowdeness)
def optimize_aperture(
self, psf_models, idx=0, target_complet=0.9, target_crowd=0.9, max_iter=100
):
"""
Function to optimize the aperture mask for a given source. There are two
special cases:
* Isolated sources, the optimal aperture is the full aperture.
* If optimizing for one single metric.
For these last two case, no actual optimization if performed, and we use the
results from `diagnose_metrics()`.
The optimization is done using scipy Brent's algorithm and it uses a custom
loss function that uses a Leaky ReLU term to achive the target value for
both metrics.
Parameters
----------
psf_models : scipy.sparse.csr_matrix
Sparse matrix with the PSF models of all sources in the field. It has shape
of [n_sources, n_pixels]
idx : int
Index of the source for which the metrcs will be computed. Has to be a
number between 0 and psf_models.shape[0]. If None, then it computes the
apertures for all sources in psf_models.
target_complet : float
Value of the target completeness metric.
target_crowd : float
Value of the target crowdeness metric.
max_iter : int
Numer of maximum iterations to be performed by the optimizer.
Returns
-------
mask : numpy.ndarray
Boolean array with the aperture mask.
completeness : float
Flux metric indicating flux completeness for the selected aperture.
crowdeness : float
Flux metric indicating flux contamination for the selected aperture.
optimal_percentile : float
Percentile of the normalized flux distribution that defines the isophote.
"""
# Do special cases when optimizing for only one metric
self.diagnose_metrics(psf_models, idx=idx, plot=False)
if target_complet < 0 and target_crowd > 0:
optim_p = self.cut[np.argmax(self.crowd)]
elif target_crowd < 0 and target_complet > 0:
optim_p = self.cut[np.argmax(self.compl)]
# for isolated sources, only need to optimize for completeness, in case of
# asking for 2 metrics
elif target_complet > 0 and target_crowd > 0 and all(self.crowd > 0.99):
optim_p = self.cut[np.argmax(self.compl)]
else:
optim_params = {
"percentile_bounds": [5, 95],
"target_complet": target_complet,
"target_crowd": target_crowd,
"max_iter": max_iter,
"psf_models": psf_models,
"idx": idx,
}
minimize_result = minimize_scalar(
self._goodness_metric_obj_fun,
method="Bounded",
bounds=[5, 95],
options={"maxiter": max_iter, "disp": False},
args=(optim_params),
)
optim_p = minimize_result.x
mask = (
psf_models[idx] >= np.percentile(psf_models[idx].data, optim_p)
).toarray()[0]
# recompute metrics for optimal mask
complet = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
crowd = self.compute_CROWDSAP(psf_models, mask, idx)
return mask, complet, crowd, optim_p
def _goodness_metric_obj_fun(self, percentile, optim_params):
"""
The objective function to minimize with scipy.optimize.minimize_scalar called
during optimization of the photometric aperture.
Parameters
----------
percentile : int
Percentile of the normalized flux distribution that defines the isophote.
optim_params : dictionary
Dictionary with the variables needed for evaluate the metric:
psf_models
idx
target_complet
target_crowd
Returns
-------
penalty : int
Value of the objective function to be used for optiization.
"""
psf_models = optim_params["psf_models"]
idx = optim_params["idx"]
# Find the value where to cut
cut = np.percentile(psf_models[idx].data, int(percentile))
# create "isophot" mask with current cut
mask = (psf_models[idx] > cut).toarray()[0]
# Do not compute and ignore if target score < 0
if optim_params["target_complet"] > 0:
completMetric = self.compute_FLFRCSAP(psf_models[idx].toarray()[0], mask)
else:
completMetric = 1.0
# Do not compute and ignore if target score < 0
if optim_params["target_crowd"] > 0:
crowdMetric = self.compute_CROWDSAP(psf_models, mask, idx)
else:
crowdMetric = 1.0
# Once we hit the target we want to ease-back on increasing the metric
# However, we don't want to ease-back to zero pressure, that will
# unconstrain the penalty term and cause the optmizer to run wild.
# So, use a "Leaky ReLU"
# metric' = threshold + (metric - threshold) * leakFactor
leakFactor = 0.01
if (
optim_params["target_complet"] > 0
and completMetric >= optim_params["target_complet"]
):
completMetric = optim_params["target_complet"] + 0.001 * (
completMetric - optim_params["target_complet"]
)
if (
optim_params["target_crowd"] > 0
and crowdMetric >= optim_params["target_crowd"]
):
crowdMetric = optim_params["target_crowd"] + 0.1 * (
crowdMetric - optim_params["target_crowd"]
)
penalty = -(completMetric + 10 * crowdMetric)
return penalty
# def plot_mean_PSF(self, ax=None):
# """
# Function to plot the PRF model as created from the FFI. This is only for
# illustration purposes.
#
# Parameters
# ----------
# ax : matplotlib.axes
# Matlotlib axis can be provided, if not one will be created and returned
#
# Returns
# -------
# ax : matplotlib.axes
# Matlotlib axis with the figure
# """
# if not hasattr(self, "x_data"):
# raise AttributeError("Class doesn't have attributes to plot PSF model")
#
# if ax is None:
# fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# vmin = -0.5
# vmax = -3
# cax = ax[0].scatter(
# self.x_data,
# self.y_data,
# c=self.f_data,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[0])
# ax[0].set_title("Data mean flux")
# ax[0].set_ylabel("dy")
# ax[0].set_xlabel("dx")
#
# cax = ax[1].scatter(
# self.x_data,
# self.y_data,
# c=self.f_model,
# marker=".",
# s=2,
# vmin=vmin,
# vmax=vmax,
# )
# fig.colorbar(cax, ax=ax[1])
# ax[1].set_title("Average PSF Model")
# ax[1].set_xlabel("dx")
#
# return ax
def plot_aperture(self, flux, mask=None, ax=None, log=False):
"""
Function to plot the photometric aperture for a given source.
Parameters
----------
flux : numpy.ndarray
Data array with the flux image.
mask : numpy.ndarray
Boolean array with the aperture mask
log : boolean
Plot the image in log or linear scale.
ax : matplotlib.axes
Matlotlib axis can be provided, if not one will be created and returned
Returns
-------
ax : matplotlib.axes
Matlotlib axis with the figure
"""
if ax is None:
fig, ax = plt.subplots(1, figsize=(5, 5))
pc = ax.pcolor(
flux,
shading="auto",
norm=colors.LogNorm() if log else None,
)
plt.colorbar(pc, label="", fraction=0.038, ax=ax)
ax.set_aspect("equal", adjustable="box")
ax.set_title("")
if mask is not None:
for i in range(flux.shape[0]):
for j in range(flux.shape[1]):
if mask[i, j]:
rect = patches.Rectangle(
xy=(j, i),
width=1,
height=1,
color="red",
fill=False,
hatch="",
)
ax.add_patch(rect)
zoom = np.argwhere(mask == True)
ax.set_ylim(
np.maximum(0, zoom[0, 0] - 3),
np.minimum(zoom[-1, 0] + 3, flux.shape[0]),
)
ax.set_xlim(
np.maximum(0, zoom[0, -1] - 3),
np.minimum(zoom[-1, -1] + 3, flux.shape[1]),
)
else:
ax.set_xlim(np.argmax(flux))
ax.set_ylim()
return ax
@staticmethod
def compute_FLFRCSAP(psf_model, mask):
"""
Compute fraction of target flux enclosed in the optimal aperture to total flux
for a given source (flux completeness).
Parameters
----------
psf_model: numpy ndarray
Array with the PSF model for the target source. It has shape [n_pixels]
mask: boolean array
Array of boolean indicating the aperture for the target source.
Returns
-------
FLFRCSAP: float
Completeness metric
"""
return psf_model[mask].sum() / psf_model.sum()
@staticmethod
def compute_CROWDSAP(psf_models, mask, idx):
"""
Compute the ratio of target flux relative to flux from all sources within
the photometric aperture (i.e. 1 - Crowdeness).
Parameters
----------
psf_models: numpy ndarray
Array with the PSF models for all targets in the cutout. It has shape
[n_sources, n_pixels].
mask: boolean array
Array of boolean indicating the aperture for the target source.
idx: int
Index of the source to compute the metric. It has to be a number between
0 and psf_models.shape[0].
Returns
-------
CROWDSAP: float
Crowdeness metric
"""
ratio = (
psf_models.multiply(1 / psf_models.sum(axis=0)).tocsr()[idx].toarray()[0]
)
return ratio[mask].sum() / mask.sum()
|
the-stack_106_18925
|
from os.path import join
from typing import List
from catcher.steps.external_step import ExternalStep
from catcher.steps.step import Step, update_variables
from catcher.utils.logger import debug
from catcher.utils.misc import fill_template_str
class S3(ExternalStep):
"""
Allows you to get/put/list/delete files in Amazon `S3 <https://aws.amazon.com/s3/>`_
Useful hint: for local testing you can use `Minio <https://min.io/>`_ run in docker as it is S3 API compatible.
:Input:
:config: s3 config object, used in other s3 commands.
- key_id: access key id
- secret_key: secret for the access key
- region: region. *Optional*.
- url: endpoint_url url. Can be used to run against Minio. *Optional*
:put: put file to s3
- config: s3 config object
- path: path including the filename. First dir treats like a bucket.
F.e. /my_bucket/subdir/file or my_bucket/subfir/file
- content: file's content. *Optional*
- content_resource: path to a file. *Optional*. Either `content` or `content_resource` must be set.
:get: Get file from s3
- config: s3 config object
- path: path including the filename
:list: List S3 directory
- config: s3 config object
- path: path to the directory being listed
:delete: Delete file or directory from S3
- config: s3 config object
- path: path to the deleted
- recursive: if path is directory and recursive is true - will delete directory with all content. *Optional*,
default is false.
:Examples:
Put data into s3
::
s3:
put:
config: '{{ s3_config }}'
path: /foo/bar/file.csv
content: '{{ my_data }}'
Get data from s3
::
s3:
get:
config: '{{ s3_config }}'
path: /foo/bar/file.csv
register: {csv: '{{ OUTPUT }}'}
List files
::
s3:
list:
config: '{{ s3_config }}'
path: /foo/bar/
register: {files: '{{ OUTPUT }}'}
Delete file
::
s3:
delete:
config: '{{ s3_config }}'
path: '/remove/me'
recursive: true
"""
@update_variables
def action(self, includes: dict, variables: dict) -> any:
body = self.simple_input(variables)
method = Step.filter_predefined_keys(body) # get/put/list
oper = body[method]
conf = oper['config']
import boto3
s3_client = boto3.client('s3',
endpoint_url=conf.get('url'),
aws_access_key_id=conf['key_id'],
aws_secret_access_key=conf['secret_key'],
region_name=conf.get('region')
)
path = oper['path']
if method == 'get':
return variables, self._get_file(s3_client, path)
elif method == 'put':
content = oper.get('content')
if not content:
if 'content_resource' not in oper:
raise ValueError('No content for s3 put')
with open(join(variables['RESOURCES_DIR'], oper['content_resource']), 'r') as f:
content = f.read()
content = fill_template_str(content, variables)
return variables, self._put_file(s3_client, path, content)
elif method == 'list':
return variables, self._list_dir(conf, path)
elif method == 'delete':
return variables, self._delete(conf, path)
else:
raise AttributeError('unknown method: ' + method)
def _get_file(self, s3_client, path):
bucket, filename = self._parse_path(path)
debug('Get {}/{}'.format(bucket, filename))
response = s3_client.get_object(Bucket=bucket, Key=filename)
# TODO check response
return response['Body'].read().decode()
def _put_file(self, s3_client, path, content, retry=True):
from botocore.exceptions import ClientError
bucket, filename = self._parse_path(path)
debug('Put {}/{}'.format(bucket, filename))
try:
res = s3_client.put_object(Bucket=bucket, Key=filename, Body=content)
return self._check_response(res)
except ClientError as e:
if retry and hasattr(e, 'response') and 'Error' in e.response and 'Code' in e.response['Error']:
if e.response['Error']['Code'] == 'NoSuchBucket':
res = s3_client.create_bucket(Bucket=bucket)
self._check_response(res)
return self._put_file(s3_client, path, content, False)
raise e
def _list_dir(self, conf: dict, path: str) -> List[str]:
import boto3
res = boto3.resource('s3',
endpoint_url=conf.get('url'),
aws_access_key_id=conf['key_id'],
aws_secret_access_key=conf['secret_key'],
region_name=conf.get('region')
)
bucket, rest = self._parse_path(path)
bucket = res.Bucket(bucket)
data = []
for obj in bucket.objects.all():
if obj.key.startswith(rest):
data += [obj.key]
return data
def _delete(self, conf: dict, path: str):
bucket, filename = self._parse_path(path)
try:
files = self._list_dir(conf, path)
except:
files = []
if len(files) > 1 or (len(files) == 1 and not path.endswith(files[0])):
[self._delete(conf, join(bucket, file)) for file in files] # delete files in directory
debug('Delete {}/{}'.format(bucket, filename))
import boto3
res = boto3.resource('s3',
endpoint_url=conf.get('url'),
aws_access_key_id=conf['key_id'],
aws_secret_access_key=conf['secret_key'],
region_name=conf.get('region')
)
obj = res.Object(bucket, filename)
obj.delete()
@staticmethod
def _check_response(res):
if 'ResponseMetadata' in res and 'HTTPStatusCode' in res['ResponseMetadata'] \
and res['ResponseMetadata']['HTTPStatusCode'] == 200:
return True
raise Exception("Operation failed")
@staticmethod
def _parse_path(path: str):
splitted = [s for s in path.split('/') if s != '']
return splitted[0], '/'.join(splitted[1:])
|
the-stack_106_18926
|
""""
CLI for command line arguments for manage-study
"""
import argparse
# Subparser tool names
c_TOOL_LIST_STUDY = "list-studies"
c_TOOL_CLUSTER = "upload-cluster"
c_TOOL_EXPRESSION = "upload-expression"
c_TOOL_METADATA = "upload-metadata"
c_TOOL_PERMISSION = "permission"
c_TOOL_STUDY = "create-study"
c_TOOL_STUDY_EDIT_DESC = "edit-study-description"
c_TOOL_STUDY_GET_ATTR = "get-study-attribute"
c_TOOL_STUDY_GET_EXT = "get-study-external-resources"
c_TOOL_STUDY_DEL_EXT = "delete-study-external-resources"
c_TOOL_STUDY_CREATE_EXT = "create-study-external-resource"
def create_parser():
args = argparse.ArgumentParser(
prog="manage-study",
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
args.add_argument(
"--token",
default=None,
help="Personal token after logging into Google (OAuth2). This token is not persisted after the finish of the script.",
)
args.add_argument(
"--dry-run",
action="store_true",
help="Walk through and log what would occur, without performing the actions.",
)
args.add_argument(
"--no-validate",
dest="validate",
action="store_false",
help="Do not check file locally before uploading.",
)
args.add_argument(
"--no-user-agent",
dest="user_agent",
action="store_false",
help="Do not send user-agent string for ingest pipeline version.",
)
args.add_argument(
"--verbose", action="store_true", help="Whether to print debugging information"
)
args.add_argument(
"--environment",
default="production",
choices=["development", "staging", "production"],
help="API environment to use",
)
# Create tools (subparser)
subargs = args.add_subparsers(dest="command")
## List studies subparser
parser_list_studies = subargs.add_parser(
c_TOOL_LIST_STUDY,
help='List studies. "'
+ args.prog
+ " "
+ c_TOOL_LIST_STUDY
+ ' -h" for more details',
)
parser_list_studies.add_argument(
"--summary",
dest="summarize_list",
action="store_true",
help="Do not list, only summarize number of accessible studies",
)
## Create study subparser
parser_create_studies = subargs.add_parser(
c_TOOL_STUDY,
help='Create a study. "'
+ args.prog
+ " "
+ c_TOOL_STUDY
+ ' -h" for more details',
)
parser_create_studies.add_argument(
"--description",
dest="study_description",
default="Single Cell Genomics Study",
help="Short description of the study",
)
parser_create_studies.add_argument(
"--study-name", required=True, help="Short name of the study"
)
parser_create_studies.add_argument(
"--branding",
default=None,
help="Portal branding to associate with the study",
)
parser_create_studies.add_argument(
"--billing",
default=None,
help="Portal billing project to associate with the study",
)
parser_create_studies.add_argument(
"--is-private", action="store_true", help="Whether the study is private"
)
# Create edit description subparser
parser_edit_description = subargs.add_parser(
c_TOOL_STUDY_EDIT_DESC,
help='Edit a study description. "'
+ args.prog
+ " "
+ c_TOOL_STUDY_EDIT_DESC
+ ' -h" for more details',
)
parser_edit_description.add_argument(
"--study-name",
required=True,
help="Name of the study for which to edit description.",
)
parser_edit_description.add_argument(
"--new-description",
required=True,
help="New description of the study to replace current one.",
)
parser_edit_description.add_argument(
"--from-file",
action="store_true",
help="If true, assumes new_description argument is name pointing to file containing new_description.",
)
parser_edit_description.add_argument(
"--accept-html",
action="store_true",
help="If true, will allow HTML formatting in new description.",
)
## Create study get attribute subparser
parser_get_attribute = subargs.add_parser(
c_TOOL_STUDY_GET_ATTR,
help='Get a study attribute (such as cell_count, etc). "'
+ args.prog
+ " "
+ c_TOOL_STUDY_GET_ATTR
+ ' -h" for more details',
)
parser_get_attribute.add_argument(
"--study-name",
required=True,
help="Name of the study from which to get attribute.",
)
parser_get_attribute.add_argument(
"--attribute",
required=True,
help="Attribute to return (such as cell_count, etc).",
)
## Create study get external resources subparser
parser_get_ext_resources = subargs.add_parser(
c_TOOL_STUDY_GET_EXT,
help='Get study external resources for a study. "'
+ args.prog
+ " "
+ c_TOOL_STUDY_GET_EXT
+ ' -h" for more details',
)
parser_get_ext_resources.add_argument(
"--study-name",
required=True,
help="Name of the study from which to get resources.",
)
## Create study delete external resources subparser
parser_delete_ext_resources = subargs.add_parser(
c_TOOL_STUDY_DEL_EXT,
help='Delete all study external resources for a study. "'
+ args.prog
+ " "
+ c_TOOL_STUDY_DEL_EXT
+ ' -h" for more details',
)
parser_delete_ext_resources.add_argument(
"--study-name",
required=True,
help="Name of the study from which to delete resources.",
)
## Create study new external resource subparser
parser_create_ext_resource = subargs.add_parser(
c_TOOL_STUDY_CREATE_EXT,
help='Create a new external resource for a study. "'
+ args.prog
+ " "
+ c_TOOL_STUDY_CREATE_EXT
+ ' -h" for more details',
)
parser_create_ext_resource.add_argument(
"--study-name",
required=True,
help="Name of the study to which to add resource.",
)
parser_create_ext_resource.add_argument(
"--title",
required=True,
help="Title of resource.",
)
parser_create_ext_resource.add_argument(
"--url",
required=True,
help="URL of resource.",
)
parser_create_ext_resource.add_argument(
"--description",
required=True,
help="Tooltip description of resource.",
)
parser_create_ext_resource.add_argument(
"--publication-url",
action="store_true",
help="Whether resource is publication URL.",
)
# TODO: Fix permissions subparser (SCP-2024)
# ## Permissions subparser
# parser_permissions = subargs.add_parser(
# c_TOOL_PERMISSION,
# help="Change user permissions in a study. \""
# + args.prog
# + " "
# + c_TOOL_PERMISSION
# + " -h\" for more details",
# )
# parser_permissions.add_argument(
# '--email',
# dest='email',
# required=True,
# default='Single Cell Genomics Study',
# help='User email to update study permission.',
# )
# parser_permissions.add_argument(
# '--study-name', dest='study_name', required=True, help='Short name of the study.'
# )
# parser_permissions.add_argument(
# '--access',
# dest='permission',
# choices=scp_api.c_PERMISSIONS,
# required=True,
# help='Access to give the user. Must be one of the following values: '
# + " ".join(scp_api.c_PERMISSIONS),
# )
## Create cluster file upload subparser
parser_upload_cluster = subargs.add_parser(
c_TOOL_CLUSTER,
help='Upload a cluster file. "'
+ args.prog
+ " "
+ c_TOOL_CLUSTER
+ ' -h" for more details',
)
parser_upload_cluster.add_argument(
"--file", dest="cluster_file", required=True, help="Cluster file to load."
)
parser_upload_cluster.add_argument(
"--study-name",
required=True,
help="Name of the study to add the file.",
)
parser_upload_cluster.add_argument(
"--description",
default="Coordinates and optional metadata to visualize clusters.",
help="Text describing the cluster file.",
)
parser_upload_cluster.add_argument(
"--cluster-name",
required=True,
help="Name of the clustering that will be used to refer to the plot.",
)
parser_upload_cluster.add_argument(
"--x", dest="x_label", default=None, help="X axis label (test)."
)
parser_upload_cluster.add_argument(
"--y", dest="y_label", default=None, help="Y axis label (test)."
)
parser_upload_cluster.add_argument(
"--z", dest="z_label", default=None, help="Z axis label (test)."
)
## Create expression file upload subparser
parser_upload_expression = subargs.add_parser(
c_TOOL_EXPRESSION,
help='Upload a gene expression matrix file. "'
+ args.prog
+ " "
+ c_TOOL_EXPRESSION
+ ' -h" for more details',
)
parser_upload_expression.add_argument(
"--file", dest="expression_file", required=True, help="Expression file to load."
)
parser_upload_expression.add_argument(
"--study-name",
required=True,
help="Name of the study to add the file.",
)
parser_upload_expression.add_argument(
"--description",
default="Gene expression in cells",
help="Text describing the gene expression matrix file.",
)
parser_upload_expression.add_argument(
"--species",
required=True,
help="Species from which the data is generated.",
)
parser_upload_expression.add_argument(
"--genome",
required=True,
help="Genome assembly used to generate the data.",
)
# TODO: Add upstream support for this in SCP RESI API
# parser_upload_expression.add_argument(
# '--axis_label', dest='axis_label',
# default='',
# help=''
# )
## Create metadata file upload subparser
parser_upload_metadata = subargs.add_parser(
c_TOOL_METADATA,
help='Upload a metadata file. "'
+ args.prog
+ " "
+ c_TOOL_METADATA
+ ' -h" for more details',
)
parser_upload_metadata.add_argument(
"--file", dest="metadata_file", required=True, help="Metadata file to load."
)
parser_upload_metadata.add_argument(
"--use-convention",
help="Whether to use metadata convention: validates against standard vocabularies, and will enable faceted search on this data",
action="store_true",
)
parser_upload_metadata.add_argument(
"--validate-against-convention",
help="Validates against standard vocabularies prior to upload",
action="store_true",
)
parser_upload_metadata.add_argument(
"--study-name",
required=True,
help="Name of the study to add the file.",
)
parser_upload_metadata.add_argument(
"--description",
default="",
help="Text describing the metadata file.",
)
return args
|
the-stack_106_18927
|
import sys
import mido
mido.set_backend("mido.backends.pygame")
# import serialcomm as comm
import enginecomm as comm
import playertools
def handleRaw(msg, sd): # sd = song data
if isinstance(msg, mido.MetaMessage) or not hasattr(msg, "note"): # Filter out meta messages
return
# Get the channel it's on and convert to floppy track
if msg.channel not in sd["TRACK_KEY"]: return
#track = sd["TRACK_KEY"].index(msg.channel)
tracks = [i for i, x in enumerate(sd["TRACK_KEY"]) if x == msg.channel]
# Determine if it's a NOTE_ON or NOTE_OFF
state = msg.type=="note_on" and msg.velocity > 0
# Convert the message to a floppy note then transpose
note = msg.note-46
note += sd["TRANSPOSE"] # Transposition
# Capping on either side of the range
if note > 31:
note = 20+((note-32)%12)
if note <= 0:
# Fix this later
print("Jaspar")
note = abs(note)%12
# If it isn't playable by the florchestra
#if note <= 0 or note > 31: return
if note <= 0: return
if note > 31:
print(note)
return
for track in tracks:
if state:
comm.sendNote(note, track)
else:
comm.sendNote(0, track)
if len(sys.argv) < 2 and len(sys.argv) > 3:
print("Invalid argument count!")
sys.exit()
print("Getting song info...")
songdata = playertools.readSongData("../songs/"+sys.argv[1])
print("Reading MIDI file into memory...")
songfile = mido.MidiFile("../songs/"+songdata["MIDI_NAME"]+".mid")
print("Starting engine...")
comm.init(sys.argv[2] if len(sys.argv) == 3 else "1", "0")
print("Ready to begin.")
for message in songfile.play():
handleRaw(message, songdata)
comm.exit()
|
the-stack_106_18929
|
# coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
from k8s.models.common import ObjectMeta
from k8s.models.custom_resource_definition import CustomResourceDefinitionNames, CustomResourceDefinitionSpec, \
CustomResourceDefinition
LOG = logging.getLogger(__name__)
class CrdResourcesSyncerApiextensionsV1Beta1(object):
@staticmethod
def _create_or_update(kind, plural, short_names, group):
name = "%s.%s" % (plural, group)
metadata = ObjectMeta(name=name)
names = CustomResourceDefinitionNames(kind=kind, plural=plural, shortNames=short_names)
spec = CustomResourceDefinitionSpec(group=group, names=names, version="v1")
definition = CustomResourceDefinition.get_or_create(metadata=metadata, spec=spec)
definition.save()
LOG.info("Created or updated CustomResourceDefinition with name %s", name)
@classmethod
def update_crd_resources(cls):
cls._create_or_update("Application", "applications", ("app", "fa"), "fiaas.schibsted.io")
cls._create_or_update("ApplicationStatus", "application-statuses", ("status", "appstatus", "fs"),
"fiaas.schibsted.io")
|
the-stack_106_18930
|
import os
import sys
import unittest
import numpy
from os.path import join as pjn
import QENSmodels
# resolve path to reference_data
this_module_path = sys.modules[__name__].__file__
data_dir = pjn(os.path.dirname(this_module_path), 'reference_data')
class TestLorentzian(unittest.TestCase):
""" Tests QENSmodels.lorentzian function """
def test_type_output(self):
""" Test type of output depending on type of input x """
# x = float
self.assertIsInstance(QENSmodels.lorentzian(1), numpy.float64)
# x = list
self.assertIsInstance(QENSmodels.lorentzian([1, 2]), numpy.ndarray)
# x = numpy.array
self.assertIsInstance(QENSmodels.lorentzian(numpy.array([1, 2])),
numpy.ndarray)
def test_size_output(self):
""" Test size of output depending on type of input x """
self.assertEqual(QENSmodels.lorentzian(1).size, 1)
self.assertEqual(QENSmodels.lorentzian([1, 2]).size, 2)
def test_parameter_value(self):
""" Test the definition of function in border edge cases"""
# hwhm = 0
x = [0, 1, 2, 3, 4, 5]
numpy.testing.assert_array_equal(
QENSmodels.lorentzian(x, 0.3, 0.4, 0.0),
QENSmodels.delta(x, 0.3, 0.4))
def test_reference_data(self):
""" Test output values in comparison with reference data
(file in 'reference data' folder) """
# load reference data
ref_data = numpy.loadtxt(pjn(data_dir, "lorentzian_ref_data.dat"))
# generate data from current model
# for info: the parameters' values used for the reference data are
# specified in the README file in the 'reference data' folder
w = numpy.arange(-2, 2.01, 0.01)
actual_data = numpy.column_stack([w,
QENSmodels.lorentzian(w, scale=3.,
center=0.25,
hwhm=0.4)])
# compare 2 arrays
numpy.testing.assert_array_almost_equal(ref_data,
actual_data,
decimal=12)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_18931
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""CSV Handling utilities
"""
from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface
from ..io import add_traits
class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):
in_file = File(
exists=True, mandatory=True, desc="Input comma-seperated value (CSV) file"
)
header = traits.Bool(
False, usedefault=True, desc="True if the first line is a column header"
)
class CSVReader(BaseInterface):
"""
Examples
--------
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP
True
>>> reader = CSVReader() # doctest: +SKIP
>>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP
>>> reader.inputs.header = True # doctest: +SKIP
>>> out = reader.run() # doctest: +SKIP
>>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP
True
>>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP
True
>>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP
True
"""
input_spec = CSVReaderInputSpec
output_spec = DynamicTraitedSpec
_always_run = True
def _append_entry(self, outputs, entry):
for key, value in zip(self._outfields, entry):
outputs[key].append(value)
return outputs
def _parse_line(self, line):
line = line.replace("\n", "")
entry = [x.strip() for x in line.split(",")]
return entry
def _get_outfields(self):
with open(self.inputs.in_file, "r") as fid:
entry = self._parse_line(fid.readline())
if self.inputs.header:
self._outfields = tuple(entry)
else:
self._outfields = tuple(["column_" + str(x) for x in range(len(entry))])
return self._outfields
def _run_interface(self, runtime):
self._get_outfields()
return runtime
def _outputs(self):
return self._add_output_traits(super(CSVReader, self)._outputs())
def _add_output_traits(self, base):
return add_traits(base, self._get_outfields())
def _list_outputs(self):
outputs = self.output_spec().get()
isHeader = True
for key in self._outfields:
outputs[key] = [] # initialize outfields
with open(self.inputs.in_file, "r") as fid:
for line in fid.readlines():
if self.inputs.header and isHeader: # skip header line
isHeader = False
continue
entry = self._parse_line(line)
outputs = self._append_entry(outputs, entry)
return outputs
|
the-stack_106_18932
|
import pandas as pd
import time
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import roc_auc_score
from lightgbm import LGBMModel, LGBMClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
RANDOM = 42
xgb_params = dict(
model__max_depth=list(range(2, 6)),
model__min_child_weight=list(range(1, 20)),
model__subsample=[0.5, 0.6, 0.7, 0.8, 0.9],
model__gamma=[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],
model__colsample_bytree=[0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
model__reg_lambda=[0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4],
model__reg_alpha=[1e-5, 1e-2, 0.1, 1, 100],
model__learning_rate=[0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.13, 0.16, 0.19],
model__n_estimators=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000,
2000],
)
lgb_params = dict(
model__max_depth=list(range(2, 6)),
model__num_leaves=[6, 8, 12, 16, 32, 48, 100, 130, 150, 180, 200],
model__min_data_in_leaf=[5, 10, 25, 50, 75, 100, 150],
model__feature_fraction=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
model__subsample=[0.5, 0.6, 0.7, 0.8, 0.9],
model__learning_rate=[0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.13, 0.16, 0.19],
model__n_estimators=[200, 300, 500, 700],
)
cbc_params = dict(
model__learning_rate=[0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.13, 0.16, 0.19],
model__max_depth=list(range(2, 6)),
model__subsample=[0.5, 0.6, 0.7, 0.8, 0.9],
model__n_estimators=[200, 300, 500, 700]
)
def check_dataframes(dataframes, models):
output = pd.DataFrame()
for dataframe in dataframes:
for model in models:
result = check_model_results(dataframe, model)
output = output.append(result, ignore_index=True)
return output
def check_model_results(
dataframe: pd.DataFrame,
model
) -> dict:
pipe = Pipeline([("scaler", StandardScaler()), ("model", model)])
train_sample = dataframe.sample(frac=0.01, replace=True, random_state=42)
X = train_sample[train_sample.columns[2:]].values
y = train_sample["TARGET"].to_numpy()
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM
)
if model.name is "CatBoost":
pipe_rs = RandomizedSearchCV(
pipe, cbc_params, n_iter=15, cv=3, verbose=1, n_jobs=5,
random_state=RANDOM)
pipe_rs.fit(X_train, y_train)
model_with_params = select_cbc_model_params(pipe_rs)
elif model.name is "XGB":
pipe_rs = RandomizedSearchCV(
pipe, xgb_params, n_iter=15, cv=3, verbose=1, n_jobs=5,
random_state=RANDOM)
pipe_rs.fit(X_train, y_train)
model_with_params = select_xgb_model_params(pipe_rs)
else:
pipe_rs = RandomizedSearchCV(
pipe, lgb_params, n_iter=15, cv=3, verbose=1, n_jobs=5,
random_state=RANDOM)
pipe_rs.fit(X_train, y_train)
model_with_params = select_lgbm_model_params(pipe_rs)
start = time.time()
model_with_params.fit(X_train, y_train)
end = time.time()
y_pred_train = model_with_params.predict_proba(X_train)
y_pred = model_with_params.predict_proba(X_test)
return {
"model": model,
"dataframe": dataframe.name,
"best_params": pipe_rs.best_params_,
"training score": round(roc_auc_score(y_train, y_pred_train[:, 1]), 3),
"validation score": round(roc_auc_score(y_test, y_pred[:, 1]), 3),
"time": round(end - start, 4),
}
def select_lgbm_model_params(pipe_rs):
return LGBMClassifier(random_state=RANDOM,
objective="binary",
verbose=-1,
max_depth=pipe_rs.best_params_['model__max_depth'],
num_leaves=pipe_rs.best_params_['model__num_leaves'],
min_data_in_leaf=pipe_rs.best_params_[
'model__min_data_in_leaf'],
feature_fraction=pipe_rs.best_params_[
'model__feature_fraction'],
subsample=pipe_rs.best_params_['model__subsample'],
learning_rate=pipe_rs.best_params_[
"model__learning_rate"],
n_estimators=pipe_rs.best_params_[
"model__n_estimators"]
)
def select_xgb_model_params(pipe_rs):
return XGBClassifier(random_state=RANDOM,
verbosity=0,
nthread=4,
max_depth=pipe_rs.best_params_['model__max_depth'],
min_child_weight=pipe_rs.best_params_[
'model__min_child_weight'],
subsample=pipe_rs.best_params_['model__subsample'],
gamma=pipe_rs.best_params_['model__gamma'],
colsample_bytree=pipe_rs.best_params_[
'model__colsample_bytree'],
reg_lambda=pipe_rs.best_params_['model__reg_lambda'],
reg_alpha=pipe_rs.best_params_['model__reg_alpha'],
learning_rate=pipe_rs.best_params_[
"model__learning_rate"],
n_estimators=pipe_rs.best_params_[
"model__n_estimators"]
)
def select_cbc_model_params(pipe_rs):
return CatBoostClassifier(random_state=RANDOM,
verbose=0,
learning_rate=pipe_rs.best_params_[
"model__learning_rate"],
n_estimators=pipe_rs.best_params_[
"model__n_estimators"],
max_depth=pipe_rs.best_params_[
'model__max_depth'],
subsample=pipe_rs.best_params_['model__subsample']
)
|
the-stack_106_18933
|
"""Ensures that all appropriate changes have been made to Wagtail that will make the site navigable."""
from django.core.management.base import BaseCommand
from cms.api import (
ensure_home_page_and_site,
ensure_resource_pages,
ensure_product_index,
)
class Command(BaseCommand):
"""Ensures that all appropriate changes have been made to Wagtail that will make the site navigable."""
help = __doc__
def handle(self, *args, **options):
ensure_home_page_and_site()
ensure_product_index()
ensure_resource_pages()
|
the-stack_106_18934
|
import sys
import typing as t
if sys.version_info >= (3, 8):
from typing import Protocol
else: # pragma: no cover
from typing_extensions import Protocol
if t.TYPE_CHECKING: # pragma: no cover
from flask.wrappers import Response # noqa: F401
from werkzeug.datastructures import Headers # noqa: F401
from wsgiref.types import WSGIApplication # noqa: F401
from .fields import Field # noqa: F401
from .schemas import Schema # noqa: F401
from .security import HTTPBasicAuth # noqa: F401
from .security import HTTPTokenAuth # noqa: F401
DecoratedType = t.TypeVar('DecoratedType', bound=t.Callable[..., t.Any])
RequestType = t.TypeVar('RequestType')
_Body = t.Union[str, bytes, t.Dict[str, t.Any], t.Generator[str, None, None], 'Response']
_Status = t.Union[str, int]
_Header = t.Union[str, t.List[str], t.Tuple[str, ...]]
_Headers = t.Union[t.Dict[str, _Header], t.List[t.Tuple[str, _Header]], 'Headers']
ResponseType = t.Union[
_Body,
t.Tuple[_Body, _Status],
t.Tuple[_Body, _Headers],
t.Tuple[_Body, _Status, _Headers],
'WSGIApplication'
]
SpecCallbackType = t.Callable[[t.Union[dict, str]], t.Union[dict, str]]
ErrorCallbackType = t.Callable[[int, str, t.Any, t.Mapping[str, str]], ResponseType]
DictSchemaType = t.Dict[str, t.Union['Field', type]]
SchemaType = t.Union['Schema', t.Type['Schema'], DictSchemaType]
HTTPAuthType = t.Union['HTTPBasicAuth', 'HTTPTokenAuth']
TagsType = t.Union[t.List[str], t.List[t.Dict[str, t.Any]]]
class PaginationType(Protocol):
page: int
per_page: int
pages: int
total: int
next_num: int
has_next: bool
prev_num: int
has_prev: bool
class ViewFuncType(Protocol):
_spec: t.Any
_method_spec: t.Any
|
the-stack_106_18935
|
"""Build rules for Tensorflow/XLA testing."""
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
load(
"//tensorflow/core/platform:build_config_root.bzl",
"tf_cuda_tests_tags",
"tf_exec_compatible_with",
)
def all_backends():
b = ["cpu"] + plugins.keys()
if cuda_is_configured():
return b + ["gpu"]
else:
return b
def tf_xla_py_test(
name,
srcs = [],
deps = [],
tags = [],
data = [],
main = None,
enabled_backends = None,
disabled_backends = None,
use_xla_device = True,
**kwargs):
"""Generates py_test targets, one per XLA backend.
This rule generates py_test() targets named name_backend, for each backend
in all_backends(). The rule also generates a test suite with named `name` that
tests all backends for the test.
For example, the following rule generates test cases foo_test_cpu,
foo_test_gpu, and a test suite name foo_test that tests both.
tf_xla_py_test(
name="foo_test",
srcs="foo_test.py",
deps=[...],
)
Args:
name: Name of the target.
srcs: Sources for the target.
deps: Dependencies of the target.
tags: Tags to apply to the generated targets.
data: Data dependencies of the target.
main: Same as py_test's main attribute.
enabled_backends: A list of backends that should be tested. Supported
values include "cpu" and "gpu". If not specified, defaults to None.
disabled_backends: A list of backends that should not be tested. Supported
values include "cpu" and "gpu". If not specified, defaults to None.
use_xla_device: If true then the --test_device argument is set to XLA_CPU
and XLA_GPU for the CPU and GPU tests. Otherwise it is set to CPU and
GPU.
**kwargs: keyword arguments passed onto the generated py_test() rules.
"""
if enabled_backends == None:
enabled_backends = all_backends()
if disabled_backends == None:
disabled_backends = []
if type(disabled_backends) != "list":
fail("disabled_backends must be a list of strings", "disabled_backends")
backends = [b for b in enabled_backends if b not in disabled_backends]
test_names = []
if use_xla_device:
cpu_xla_device = "XLA_CPU"
gpu_xla_device = "XLA_GPU"
else:
cpu_xla_device = "CPU"
gpu_xla_device = "GPU"
for backend in backends:
test_name = "{}_{}".format(name, backend)
backend_tags = ["tf_xla_{}".format(backend)]
backend_args = []
backend_deps = []
backend_data = []
if backend == "cpu":
backend_args += [
"--test_device=" + cpu_xla_device,
"--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_UINT8,DT_QUINT8,DT_INT8,DT_QINT8,DT_INT32,DT_QINT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_COMPLEX128",
]
elif backend == "gpu":
backend_args += [
"--test_device=" + gpu_xla_device,
"--types=DT_HALF,DT_FLOAT,DT_DOUBLE,DT_UINT8,DT_QUINT8,DT_INT8,DT_QINT8,DT_INT32,DT_QINT32,DT_INT64,DT_BOOL,DT_COMPLEX64,DT_COMPLEX128,DT_BFLOAT16",
]
backend_tags += tf_cuda_tests_tags()
elif backend in plugins:
backend_args += [
"--test_device=" + plugins[backend]["device"],
"--types=" + plugins[backend]["types"],
]
backend_tags += plugins[backend]["tags"]
backend_args += plugins[backend]["args"]
backend_deps += plugins[backend]["deps"]
backend_data += plugins[backend]["data"]
else:
fail("Unknown backend {}".format(backend))
test_tags = tags + backend_tags
native.py_test(
name = test_name,
srcs = srcs,
srcs_version = "PY2AND3",
args = backend_args,
main = "{}.py".format(name) if main == None else main,
data = data + backend_data,
deps = deps + backend_deps,
tags = test_tags,
exec_compatible_with = tf_exec_compatible_with({"tags": test_tags}),
**kwargs
)
test_names.append(test_name)
native.test_suite(name = name, tests = test_names)
def generate_backend_suites(backends = []):
"""Generates per-backend test_suites that run all tests for a backend."""
if not backends:
backends = all_backends()
for backend in backends:
native.test_suite(name = "%s_tests" % backend, tags = ["tf_xla_%s" % backend])
|
the-stack_106_18937
|
from io import BytesIO
import random
from hashlib import sha1
import json
from django.db.models import OneToOneField
try:
from django.db.models.fields.related_descriptors import ReverseOneToOneDescriptor
except ImportError:
from django.db.models.fields.related import SingleRelatedObjectDescriptor as ReverseOneToOneDescriptor
from django.db import models
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.serializers.json import DjangoJSONEncoder
from django.conf import settings
from django.forms.utils import ValidationError
from django.utils.translation import ugettext_lazy as _
class AutoReverseOneToOneDescriptor(ReverseOneToOneDescriptor):
def __get__(self, instance, instance_type=None):
model = self.related.related_model
try:
return super(AutoReverseOneToOneDescriptor, self).__get__(instance, instance_type)
except model.DoesNotExist:
obj = model(**{self.related.field.name: instance})
obj.save()
return (super(AutoReverseOneToOneDescriptor, self).__get__(instance, instance_type))
class AutoOneToOneField(OneToOneField):
"""
OneToOneField creates dependent object on first request from parent object
if dependent oject has not created yet.
"""
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), AutoReverseOneToOneDescriptor(related))
class ExtendedImageField(models.ImageField):
"""
Extended ImageField that can resize image before saving it.
"""
def __init__(self, *args, **kwargs):
self.width = kwargs.pop('width', None)
self.height = kwargs.pop('height', None)
super(ExtendedImageField, self).__init__(*args, **kwargs)
def save_form_data(self, instance, data):
if data and self.width and self.height:
content = self.resize_image(data.read(), width=self.width, height=self.height)
salt = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
fname = sha1(salt.encode('utf-8') + settings.SECRET_KEY.encode('utf-8')).hexdigest() + '.png'
data = SimpleUploadedFile(fname, content, content_type='image/png')
super(ExtendedImageField, self).save_form_data(instance, data)
def resize_image(self, rawdata, width, height):
"""
Resize image to fit it into (width, height) box.
"""
try:
import Image
except ImportError:
from PIL import Image
image = Image.open(BytesIO(rawdata))
oldw, oldh = image.size
if oldw >= oldh:
x = int(round((oldw - oldh) / 2.0))
image = image.crop((x, 0, (x + oldh) - 1, oldh - 1))
else:
y = int(round((oldh - oldw) / 2.0))
image = image.crop((0, y, oldw - 1, (y + oldw) - 1))
image = image.resize((width, height), resample=Image.ANTIALIAS)
string = BytesIO()
image.save(string, format='PNG')
return string.getvalue()
class JSONField(models.TextField):
"""
JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly.
Django snippet #1478
"""
def from_db_value(self, value, *args):
if value == "":
return None
try:
if isinstance(value, str):
return json.loads(value)
except ValueError:
pass
return None
def get_prep_value(self, value):
if value == "":
return None
if isinstance(value, dict):
value = json.dumps(value, cls=DjangoJSONEncoder)
return value
|
the-stack_106_18938
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libice(AutotoolsPackage):
"""libICE - Inter-Client Exchange Library."""
homepage = "http://cgit.freedesktop.org/xorg/lib/libICE"
url = "https://www.x.org/archive/individual/lib/libICE-1.0.9.tar.gz"
version('1.0.9', '95812d61df8139c7cacc1325a26d5e37')
depends_on('xproto', type='build')
depends_on('xtrans', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
|
the-stack_106_18939
|
import base64
import itertools
import math
import mimetypes
import os
import time
from flask import Flask, request, jsonify, Response
app = Flask(__name__)
@app.route("/token", methods=["GET"])
def token():
return '<div><a src="http://127.0.0.1:5003/verify?token=c9bb34ba-131b-11e8-b642-0ed5f89f718b">Link</a></div>', 200
@app.route("/headers", methods=["GET"])
def headers():
return 'OK', 200, {
'X-Integration-Value': "_HelloWorld1",
"ATestHEader": "orange",
}
@app.route("/verify", methods=["GET"])
def verify():
if request.args.get('token') == 'c9bb34ba-131b-11e8-b642-0ed5f89f718b':
return '', 200
else:
return '', 401
@app.route("/get_thing_slow", methods=["GET"])
def get_slow():
time.sleep(0.25)
response = {
"status": "OK",
}
return jsonify(response), 200
@app.route("/fake_dictionary", methods=["GET"])
def get_fake_dictionary():
fake = {
"top": {
"Thing": "value",
"nested": {
"doubly": {
"inner": "value",
}
}
},
"an_integer": 123,
"a_string": "abc",
"a_bool": True,
}
return jsonify(fake), 200
@app.route("/fake_list", methods=["GET"])
def list_response():
list_response = [
"a",
"b",
"c",
1,
2,
3,
-1.0,
-2.0,
-3.0,
]
return jsonify(list_response), 200
@app.route("/nested_list", methods=["GET"])
def nested_list_response():
response = {
"top": [
"a",
"b",
{
"key": "val",
}
]
}
return jsonify(response), 200
@app.route("/fake_upload_file", methods=["POST"])
def upload_fake_file():
if not request.files:
return '', 401
if not mimetypes.inited:
mimetypes.init()
for key, item in request.files.items():
if item.filename:
filetype = ".{}".format(item.filename.split(".")[-1])
if filetype in mimetypes.suffix_map:
if not item.content_type:
return "", 400
# Try to download each of the files downloaded to /tmp and
# then remove them
for key in request.files:
file_to_save = request.files[key]
path = os.path.join("/tmp", file_to_save.filename)
file_to_save.save(path)
return '', 200
@app.route("/nested/again", methods=["GET"])
def multiple_path_items_response():
response = {
"status": "OK",
}
return jsonify(response), 200
@app.route("/pi", methods=["GET"])
def return_fp_number():
response = {
"pi": math.pi
}
return jsonify(response), 200
@app.route("/expect_dtype", methods=["POST"])
def expect_type():
body = request.get_json()
value = body.get("value")
dtype = body.get("dtype")
dvalue = body.get("dvalue")
status = "OK"
code = 200
if not value and dtype and dvalue:
status = "Missing expected type or value"
code = 400
if str(type(value)) != "<class '{}'>".format(dtype):
status = "Unexpected type: '{}'".format(str(type(value)))
code = 400
if value != dvalue:
status = "Unexpected value: '{}'".format(value)
code = 400
return jsonify({"status": status}), code
@app.route("/status_code_return", methods=["POST"])
def status_code_return():
body = request.get_json()
response = {}
return jsonify(response), int(body["status_code"])
@app.route("/echo", methods=["POST"])
def echo_values():
body = request.get_json()
response = body
return jsonify(response), 200
@app.route("/expect_raw_data", methods=["POST"])
def expect_raw_data():
raw_data = request.stream.read().decode("utf8")
if raw_data == "OK":
response = {
"status": "ok",
}
code = 200
elif raw_data == "DENIED":
response = {
"status": "denied",
}
code = 401
else:
response = {
"status": "err: '{}'".format(raw_data),
}
code = 400
return jsonify(response), code
@app.route("/form_data", methods=["POST"])
def echo_form_values():
body = request.get_data()
key, _, value = body.decode("utf8").partition("=")
response = {key: value}
return jsonify(response), 200
@app.route("/stream_file", methods=["GET"])
def stream_file():
def iter():
for data in range(1,10):
yield bytes(data)
response = Response(iter(), mimetype='application/octet-stream')
response.headers['Content-Disposition'] = 'attachment; filename=tmp.txt'
return response
statuses = itertools.cycle(['processing', 'ready'])
@app.route("/poll", methods=["GET"])
def poll():
response = {'status': next(statuses)}
return jsonify(response)
def _maybe_get_cookie_name():
return (request.get_json() or {}).get("cookie_name", "tavern-cookie")
@app.route("/get_cookie", methods=["POST"])
def give_cookie():
cookie_name = _maybe_get_cookie_name()
response = Response()
response.set_cookie(cookie_name, base64.b64encode(os.urandom(16)).decode("utf8"))
return response, 200
@app.route("/expect_cookie", methods=["GET"])
def expect_cookie():
cookie_name = _maybe_get_cookie_name()
if cookie_name not in request.cookies:
return jsonify({"error": "No cookie named {} in request".format(cookie_name)}), 400
else:
return jsonify({"status": "ok"}), 200
|
the-stack_106_18940
|
import itertools
import os
from datetime import datetime
import pytz
import requests
from main.database.source import Source
import main.database.carpark_utils as cu
access_key = os.environ.get("LTA_API_ACCESS_KEY", None) or exit('LTA_API_ACCESS_KEY not defined.')
data_url = 'http://datamall2.mytransport.sg/ltaodataservice/CarParkAvailabilityv2?'
def log_pull():
singapore_timezone = pytz.timezone('Asia/Singapore')
current_datetime = datetime.now(singapore_timezone).strftime("%H:%M:%S")
print("Pulling carpark availability from LTA: " + current_datetime)
def pull():
headers = {'AccountKey': access_key}
response = requests.get(data_url, headers=headers)
if not response.content:
print("Empty response. LTA carpark availability not available")
return
raw_data = response.json()['value']
transformations1 = map(convert_to_data_set, raw_data)
carpark_data_sets = list(transformations1)
cu.update_carpark_availability(carpark_data_sets)
def convert_to_data_set(raw_data):
source = Source.LTA
third_party_id = raw_data["CarParkID"]
lots_available = raw_data["AvailableLots"]
return source, third_party_id, lots_available
def start():
log_pull()
pull()
print("Pull succeeded.")
start()
|
the-stack_106_18942
|
import pytest
from search import *
romania_problem = GraphProblem('Arad', 'Bucharest', romania_map)
vacumm_world = GraphProblemStochastic('State_1', ['State_7', 'State_8'], vacumm_world)
LRTA_problem = OnlineSearchProblem('State_3', 'State_5', one_dim_state_space)
def test_find_min_edge():
assert romania_problem.find_min_edge() == 70
def test_breadth_first_tree_search():
assert breadth_first_tree_search(
romania_problem).solution() == ['Sibiu', 'Fagaras', 'Bucharest']
def test_breadth_first_search():
assert breadth_first_search(romania_problem).solution() == ['Sibiu', 'Fagaras', 'Bucharest']
def test_best_first_graph_search():
# uniform_cost_search and astar_search test it indirectly
assert best_first_graph_search(
romania_problem,
lambda node: node.state).solution() == ['Sibiu', 'Fagaras', 'Bucharest']
assert best_first_graph_search(
romania_problem,
lambda node: node.state[::-1]).solution() == ['Timisoara',
'Lugoj',
'Mehadia',
'Drobeta',
'Craiova',
'Pitesti',
'Bucharest']
def test_uniform_cost_search():
assert uniform_cost_search(
romania_problem).solution() == ['Sibiu', 'Rimnicu', 'Pitesti', 'Bucharest']
def test_depth_first_graph_search():
solution = depth_first_graph_search(romania_problem).solution()
assert solution[-1] == 'Bucharest'
def test_iterative_deepening_search():
assert iterative_deepening_search(
romania_problem).solution() == ['Sibiu', 'Fagaras', 'Bucharest']
def test_depth_limited_search():
solution_3 = depth_limited_search(romania_problem, 3).solution()
assert solution_3[-1] == 'Bucharest'
assert depth_limited_search(romania_problem, 2) == 'cutoff'
solution_50 = depth_limited_search(romania_problem).solution()
assert solution_50[-1] == 'Bucharest'
def test_bidirectional_search():
assert bidirectional_search(romania_problem) == 418
def test_astar_search():
assert astar_search(romania_problem).solution() == ['Sibiu', 'Rimnicu', 'Pitesti', 'Bucharest']
def test_recursive_best_first_search():
assert recursive_best_first_search(
romania_problem).solution() == ['Sibiu', 'Rimnicu', 'Pitesti', 'Bucharest']
def test_hill_climbing():
prob = PeakFindingProblem((0, 0), [[0, 5, 10, 20],
[-3, 7, 11, 5]])
assert hill_climbing(prob) == (0, 3)
prob = PeakFindingProblem((0, 0), [[0, 5, 10, 8],
[-3, 7, 9, 999],
[1, 2, 5, 11]])
assert hill_climbing(prob) == (0, 2)
prob = PeakFindingProblem((2, 0), [[0, 5, 10, 8],
[-3, 7, 9, 999],
[1, 2, 5, 11]])
assert hill_climbing(prob) == (1, 3)
def test_simulated_annealing():
random.seed("aima-python")
prob = PeakFindingProblem((0, 0), [[0, 5, 10, 20],
[-3, 7, 11, 5]], directions4)
sols = {prob.value(simulated_annealing(prob)) for i in range(100)}
assert max(sols) == 20
prob = PeakFindingProblem((0, 0), [[0, 5, 10, 8],
[-3, 7, 9, 999],
[1, 2, 5, 11]], directions8)
sols = {prob.value(simulated_annealing(prob)) for i in range(100)}
assert max(sols) == 999
def test_BoggleFinder():
board = list('SARTELNID')
"""
>>> print_boggle(board)
S A R
T E L
N I D
"""
f = BoggleFinder(board)
assert len(f) == 206
def test_and_or_graph_search():
def run_plan(state, problem, plan):
if problem.goal_test(state):
return True
if len(plan) is not 2:
return False
predicate = lambda x: run_plan(x, problem, plan[1][x])
return all(predicate(r) for r in problem.result(state, plan[0]))
plan = and_or_graph_search(vacumm_world)
assert run_plan('State_1', vacumm_world, plan)
def test_LRTAStarAgent():
my_agent = LRTAStarAgent(LRTA_problem)
assert my_agent('State_3') == 'Right'
assert my_agent('State_4') == 'Left'
assert my_agent('State_3') == 'Right'
assert my_agent('State_4') == 'Right'
assert my_agent('State_5') is None
my_agent = LRTAStarAgent(LRTA_problem)
assert my_agent('State_4') == 'Left'
my_agent = LRTAStarAgent(LRTA_problem)
assert my_agent('State_5') is None
def test_genetic_algorithm():
# Graph coloring
edges = {
'A': [0, 1],
'B': [0, 3],
'C': [1, 2],
'D': [2, 3]
}
def fitness(c):
return sum(c[n1] != c[n2] for (n1, n2) in edges.values())
solution_chars = GA_GraphColoringChars(edges, fitness)
assert solution_chars == ['R', 'G', 'R', 'G'] or solution_chars == ['G', 'R', 'G', 'R']
solution_bools = GA_GraphColoringBools(edges, fitness)
assert solution_bools == [True, False, True, False] or solution_bools == [False, True, False, True]
solution_ints = GA_GraphColoringInts(edges, fitness)
assert solution_ints == [0, 1, 0, 1] or solution_ints == [1, 0, 1, 0]
# Queens Problem
gene_pool = range(8)
population = init_population(100, gene_pool, 8)
def fitness(q):
non_attacking = 0
for row1 in range(len(q)):
for row2 in range(row1+1, len(q)):
col1 = int(q[row1])
col2 = int(q[row2])
row_diff = row1 - row2
col_diff = col1 - col2
if col1 != col2 and row_diff != col_diff and row_diff != -col_diff:
non_attacking += 1
return non_attacking
solution = genetic_algorithm(population, fitness, gene_pool=gene_pool, f_thres=25)
assert fitness(solution) >= 25
def GA_GraphColoringChars(edges, fitness):
gene_pool = ['R', 'G']
population = init_population(8, gene_pool, 4)
return genetic_algorithm(population, fitness, gene_pool=gene_pool)
def GA_GraphColoringBools(edges, fitness):
gene_pool = [True, False]
population = init_population(8, gene_pool, 4)
return genetic_algorithm(population, fitness, gene_pool=gene_pool)
def GA_GraphColoringInts(edges, fitness):
population = init_population(8, [0, 1], 4)
return genetic_algorithm(population, fitness)
# TODO: for .ipynb:
"""
>>> compare_graph_searchers()
Searcher romania_map(A, B) romania_map(O, N) australia_map
breadth_first_tree_search < 21/ 22/ 59/B> <1158/1159/3288/N> < 7/ 8/ 22/WA>
breadth_first_search < 7/ 11/ 18/B> < 19/ 20/ 45/N> < 2/ 6/ 8/WA>
depth_first_graph_search < 8/ 9/ 20/B> < 16/ 17/ 38/N> < 4/ 5/ 11/WA>
iterative_deepening_search < 11/ 33/ 31/B> < 656/1815/1812/N> < 3/ 11/ 11/WA>
depth_limited_search < 54/ 65/ 185/B> < 387/1012/1125/N> < 50/ 54/ 200/WA>
recursive_best_first_search < 5/ 6/ 15/B> <5887/5888/16532/N> < 11/12/ 43/WA>
>>> ' '.join(f.words())
'LID LARES DEAL LIE DIETS LIN LINT TIL TIN RATED ERAS LATEN DEAR TIE LINE INTER
STEAL LATED LAST TAR SAL DITES RALES SAE RETS TAE RAT RAS SAT IDLE TILDES LEAST
IDEAS LITE SATED TINED LEST LIT RASE RENTS TINEA EDIT EDITS NITES ALES LATE
LETS RELIT TINES LEI LAT ELINT LATI SENT TARED DINE STAR SEAR NEST LITAS TIED
SEAT SERAL RATE DINT DEL DEN SEAL TIER TIES NET SALINE DILATE EAST TIDES LINTER
NEAR LITS ELINTS DENI RASED SERA TILE NEAT DERAT IDLEST NIDE LIEN STARED LIER
LIES SETA NITS TINE DITAS ALINE SATIN TAS ASTER LEAS TSAR LAR NITE RALE LAS
REAL NITER ATE RES RATEL IDEA RET IDEAL REI RATS STALE DENT RED IDES ALIEN SET
TEL SER TEN TEA TED SALE TALE STILE ARES SEA TILDE SEN SEL ALINES SEI LASE
DINES ILEA LINES ELD TIDE RENT DIEL STELA TAEL STALED EARL LEA TILES TILER LED
ETA TALI ALE LASED TELA LET IDLER REIN ALIT ITS NIDES DIN DIE DENTS STIED LINER
LASTED RATINE ERA IDLES DIT RENTAL DINER SENTI TINEAL DEIL TEAR LITER LINTS
TEAL DIES EAR EAT ARLES SATE STARE DITS DELI DENTAL REST DITE DENTIL DINTS DITA
DIET LENT NETS NIL NIT SETAL LATS TARE ARE SATI'
>>> boggle_hill_climbing(list('ABCDEFGHI'), verbose=False)
(['E', 'P', 'R', 'D', 'O', 'A', 'G', 'S', 'T'], 123)
"""
if __name__ == '__main__':
pytest.main()
|
the-stack_106_18944
|
from __future__ import absolute_import
from mock import Mock
from sentry.api.bases.team import TeamPermission
from sentry.models import ApiKey
from sentry.testutils import TestCase
class TeamPermissionBase(TestCase):
def setUp(self):
self.org = self.create_organization(flags=0)
self.team = self.create_team(organization=self.org)
super(TeamPermissionBase, self).setUp()
def has_object_perm(self, method, obj, auth=None, user=None, is_superuser=None):
perm = TeamPermission()
request = Mock()
request.auth = auth
request.user = user
request.method = method
request.is_superuser = lambda: is_superuser if is_superuser is not None else user.is_superuser
return (
perm.has_permission(request, None) and perm.has_object_permission(request, None, obj)
)
class TeamPermissionTest(TeamPermissionBase):
def test_get_regular_user(self):
user = self.create_user()
assert not self.has_object_perm('GET', self.team, user=user)
def test_get_superuser(self):
user = self.create_user(is_superuser=True)
assert self.has_object_perm('GET', self.team, user=user)
def test_get_without_team_membership(self):
user = self.create_user()
self.create_member(
user=user,
organization=self.org,
role='member',
teams=[],
)
assert not self.has_object_perm('GET', self.team, user=user)
def test_get_with_team_membership(self):
user = self.create_user()
self.create_member(
user=user,
organization=self.org,
role='member',
teams=[self.team],
)
assert self.has_object_perm('GET', self.team, user=user)
def test_get_api_key_with_org_access(self):
key = ApiKey.objects.create(
organization=self.org,
scope_list=['team:read'],
)
assert self.has_object_perm('GET', self.team, auth=key)
def test_get_api_key_without_org_access(self):
key = ApiKey.objects.create(
organization=self.create_organization(),
scope_list=['team:read'],
)
assert not self.has_object_perm('GET', self.team, auth=key)
def test_api_key_without_access(self):
key = ApiKey.objects.create(
organization=self.org,
)
assert not self.has_object_perm('GET', self.org, auth=key)
def test_api_key_with_wrong_access(self):
key = ApiKey.objects.create(
organization=self.org,
scope_list=['project:read'],
)
assert not self.has_object_perm('GET', self.org, auth=key)
def test_api_key_with_wrong_access_for_method(self):
key = ApiKey.objects.create(
organization=self.org,
scope_list=['team:read'],
)
assert not self.has_object_perm('PUT', self.project, auth=key)
|
the-stack_106_18945
|
from ray._private.utils import get_function_args
from ray.tune.schedulers.trial_scheduler import TrialScheduler, FIFOScheduler
from ray.tune.schedulers.hyperband import HyperBandScheduler
from ray.tune.schedulers.hb_bohb import HyperBandForBOHB
from ray.tune.schedulers.async_hyperband import AsyncHyperBandScheduler, ASHAScheduler
from ray.tune.schedulers.median_stopping_rule import MedianStoppingRule
from ray.tune.schedulers.pbt import (
PopulationBasedTraining,
PopulationBasedTrainingReplay,
)
from ray.tune.schedulers.resource_changing_scheduler import ResourceChangingScheduler
def _pb2_importer(*args, **kwargs):
# PB2 introduces a GPy dependency which can be expensive, so we import
# lazily.
from ray.tune.schedulers.pb2 import PB2
return PB2(*args, **kwargs)
SCHEDULER_IMPORT = {
"fifo": FIFOScheduler,
"async_hyperband": AsyncHyperBandScheduler,
"asynchyperband": AsyncHyperBandScheduler,
"median_stopping_rule": MedianStoppingRule,
"medianstopping": MedianStoppingRule,
"hyperband": HyperBandScheduler,
"hb_bohb": HyperBandForBOHB,
"pbt": PopulationBasedTraining,
"pbt_replay": PopulationBasedTrainingReplay,
"pb2": _pb2_importer,
"resource_changing": ResourceChangingScheduler,
}
def create_scheduler(
scheduler,
**kwargs,
):
"""Instantiate a scheduler based on the given string.
This is useful for swapping between different schedulers.
Args:
scheduler (str): The scheduler to use.
**kwargs: Scheduler parameters.
These keyword arguments will be passed to the initialization
function of the chosen scheduler.
Returns:
ray.tune.schedulers.trial_scheduler.TrialScheduler: The scheduler.
Example:
>>> from ray import tune
>>> pbt_kwargs = {}
>>> scheduler = tune.create_scheduler('pbt', **pbt_kwargs) # doctest: +SKIP
"""
scheduler = scheduler.lower()
if scheduler not in SCHEDULER_IMPORT:
raise ValueError(
f"The `scheduler` argument must be one of "
f"{list(SCHEDULER_IMPORT)}. "
f"Got: {scheduler}"
)
SchedulerClass = SCHEDULER_IMPORT[scheduler]
scheduler_args = get_function_args(SchedulerClass)
trimmed_kwargs = {k: v for k, v in kwargs.items() if k in scheduler_args}
return SchedulerClass(**trimmed_kwargs)
__all__ = [
"TrialScheduler",
"HyperBandScheduler",
"AsyncHyperBandScheduler",
"ASHAScheduler",
"MedianStoppingRule",
"FIFOScheduler",
"PopulationBasedTraining",
"PopulationBasedTrainingReplay",
"HyperBandForBOHB",
"ResourceChangingScheduler",
]
|
the-stack_106_18948
|
#! /usr/bin/python3
"""
Connect to the Control/Monitoring site
Retreive MagnetID list
For each MagnetID list of attached record
Check record consistency
"""
import getpass
import sys
# import os
import re
import datetime
import requests
import requests.exceptions
import lxml.html as lh
# import jsonpickle
import MRecord
import GObject
import HMagnet
def createSession(url_logging, payload):
"""create a request session"""
p = s.post(url=url_logging, data=payload)
# print the html returned or something more intelligent to see if it's a successful login page.
if debug:
print( "connect:", p.url, p.status_code )
# check return status: if not ok stop
if p.status_code != 200:
print("error %d logging to %s" % (p.status_code, url_logging) )
sys.exit(1)
p.raise_for_status()
return p
def download(session, url_data, param, link=None, save=False, debug=False):
"""download """
d = session.get(url=url_data, params=param)
if debug:
print("downloads:", d.url, d.status_code)
if d.status_code != 200:
print("error %d download %s" % (d.status_code, url_data) )
sys.exit(1)
d.raise_for_status()
if save:
filename = link.replace('../../../','')
filename = filename.replace('/','_').replace('%20','-')
# print("save to %s" % filename)
fo = open(filename, "w", newline='\n')
fo.write(d.text)
fo.close()
return d.text
# for M1:
# table fileTreeDemo_1, ul, <li class="file ext_txt">, <a href=.., rel="filename" /a> </li>
def getTable(session, url_data, index, indices, delimiter='//tbody', param=None, debug=False):
"""get table data from url_data"""
# Perform some webscrapping to get all table data
# see https://towardsdatascience.com/web-scraping-html-tables-with-python-c9baba21059
if param is None:
page = session.get(url=url_data)
else:
page = session.get(url=url_data, params=param)
if debug:
print( "connect:", page.url, page.status_code )
if page.status_code != 200 :
print("cannot logging to %s" % url_data)
sys.exit(1)
page.raise_for_status()
#Store the contents of the website under doc
doc = lh.fromstring(page.content)
# from php source
# table : id datatable, tr id=row, thead, tbody, <td class="sorting_1"...>
# Parse data that are stored between <tbody>..</tbody> of HTML
tr_elements = doc.xpath(delimiter) # '//tbody')
if debug:
print("detected tables[delimiter=%s]:" % delimiter, tr_elements)
#Create empty list ## Better to have a dict??
jid = None
Mid = None
Mjid = dict()
Mdata = dict()
if not tr_elements:
if debug:
print("page.text=", page.text, "**")
return Mdata
#For each row, store each first element (header) and an empty list
for i,t in enumerate(tr_elements[0]):
i+=1
name=t.text_content()
if debug:
print( '%d:"%s"'%(i,name) )
# get date ID status comment from sub element
data = []
for j,d in enumerate(t):
j+=1
jname = d.text_content()
if debug:
print( '\t%d:%s' % (j, jname) )
if j == index:
if param:
jid = jname[jname.find("(")+1:jname.find(")")]
# print("jid=", jid)
Mid = re.sub(' (.*)','',jname)
if j in indices:
data.append(jname)
# shall check wether key is already defined for sanity
if Mid == "-" :
print("%s index: no entry " % name)
else:
Mdata[Mid] = data
Mjid[Mid] = jid
# Mids = sorted(set(Mids)) #uniq only: list(set(Mids))
if debug:
print( "Data found: ", Mdata, "jid=", Mjid)
return (Mdata, Mjid)
def getMagnetRecord(session, url_data, magnetID, Magnets, missingIDs, debug=False):
"""get records for a given magnetID"""
if debug:
print("MagnetID=%s" % magnetID)
if not magnetID in Magnets.keys():
Magnets[magnetID] = HMagnet.HMagnet(magnetID, 0, None, "Unknown", 0)
# To get files for magnetID
params_links = (
('ref', magnetID),
('link', ''),
)
r = session.get(url=url_data, params=params_links)
if debug:
print( "data:", r.url, r.status_code, r.encoding )
if r.status_code != 200:
print("error %d loading %s" % (p.status_code, url_data) )
sys.exit(1)
r.raise_for_status()
for f in r.text.split('<br>'):
if f and not '~' in f :
replace_str='<a href='+'\''+url_downloads+'?file='
data = f.replace(replace_str,'').replace('</a>','') .replace('\'>',': ').split(': ')
link = data[0].replace(' ','%20')
site = link.replace('../../../','')
site = re.sub('/.*txt','',site)
tformat="%Y.%m.%d - %H:%M:%S"
timestamp = datetime.datetime.strptime(data[1].replace('.txt',''), tformat)
# Download a specific file
params_downloads = 'file=%s&download=1' % link
html = download(session, url_downloads, param=params_downloads, link=link)
lines = html.split('\n')[0] # get 1st line
lines_items = lines.split('\t')
actual_id = None
if len(lines_items) == 2:
actual_id = lines_items[1]
if not actual_id:
print("%s: no name defined for Magnet" % link)
else:
if actual_id != magnetID:
missingIDs.append(actual_id)
if not actual_id in Magnets.keys():
if debug:
print("Create a new entry: ", actual_id)
Magnets[actual_id] = HMagnet.HMagnet(actual_id, 0, None, "Unknown", 0)
MagnetRecords[actual_id] = []
# Magnets[actual_id].addRecord( timestamp )
record = MRecord.MRecord(timestamp, site, link)
if not record in MagnetRecords[actual_id]:
# print("actual_id: %s - %s, %s %s" %(actual_id, timestamp, site, link) )
MagnetRecords[actual_id].append( record )
else:
# Magnets[magnetID].addRecord( timestamp )
if not magnetID in MagnetRecords:
MagnetRecords[magnetID] = []
record = MRecord.MRecord(timestamp, site, link)
if not record in MagnetRecords[magnetID]:
# print("magnetID: %s - %s, %s %s" %(magnetID, timestamp, site, link) )
MagnetRecords[magnetID].append( record )
if __name__ == "__main__":
import argparse
import python_magnetrun
import matplotlib
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--user", help="specify user")
parser.add_argument("--server", help="specify server", default="http://147.173.83.216/site/sba/pages")
parser.add_argument("--save", help="save files", action='store_true')
parser.add_argument("--debug", help="activate debug mode", action='store_true')
args = parser.parse_args()
if sys.stdin.isatty():
password = getpass.getpass('Using getpass: ')
else:
print( 'Using readline' )
password = sys.stdin.readline().rstrip()
# print( 'Read: ', password )
# shall check if host ip up and running
base_url=args.server
url_logging=base_url + "/" + "login.php"
url_downloads=base_url + "/" + "courbe.php"
url_status=base_url + "/" + "Etat.php"
url_files=base_url + "/" + "getfref.php"
url_helices=base_url + "/" + "Aimant2.php"
url_materials=base_url + "/" + "Mat.php"
# Fill in your details here to be posted to the login form.
payload = {
'email': args.user,
'password': password
}
# Magnets
Magnets = dict()
MagnetRecords = dict()
MagnetComps = dict()
Status = dict()
missingIDs = []
Mats = dict()
debug = args.debug
# Use 'with' to ensure the session context is closed after use.
with requests.Session() as s:
p = createSession(url_logging, payload)
# test connection
r = s.get(url=url_status)
if r.url == url_logging:
print("check connection failed: Wrong credentials" )
sys.exit(1)
# Get Magnets from Status page
(Status, jid) = getTable(s, url_status, 2, [3])
for i,magnetID in enumerate(Status): #Mids:
getMagnetRecord(s, url_files, magnetID, Magnets, missingIDs)
Magnets[magnetID].setStatus(Status[magnetID][-1])
Magnets[magnetID].setIndex(jid[magnetID])
# check records for each missingID
while len(missingIDs) != 0 :
check_missingIDs = set(missingIDs)
missingIDs.clear()
if debug:
print("check missingIDs")
for magnetID in check_missingIDs:
if not magnetID in Status:
Status[magnetID] = "missingref"
getMagnetRecord(s, url_files, magnetID, Magnets, missingIDs)
Magnets[magnetID].setStatus(Status[magnetID][-1])
if debug:
print("\nMagnets: ")
for magnet in Magnets:
print("** %s: status=%s" % ( magnet, Magnets[magnet].getStatus() ) )
if debug:
print("loading helices for: ", magnet)
params_helix = (
('ref', magnet),
)
hindices = [3,4,5,6,7,8,9,10,11,12,13,14,15,16,19]
res = getTable(s, url_helices, 1, hindices, param=params_helix)
helices = ()
if res:
helices = res[0]
jid = res[1]
if Magnets[magnet].getIndex() is None:
Magnets[magnet].setIndex(jid[magnet])
if debug:
print("helices:", helices, "jid:", jid, Magnets[magnet].getIndex() )
for data in helices:
# print("%s:" % data )
for i in range(len(helices[data])-1):
materialID = re.sub('H.* / ','',helices[data][i])
if debug:
print("%s:" % materialID )
if materialID != '-':
r = s.post(url_materials, data={ 'REF': materialID, 'compact:': 'on', 'formsubmit': 'OK' })
r.raise_for_status()
# if debug:
# print("post MaterialID: ", r.url, r.status_code)
html = lh.fromstring(r.text.encode(r.encoding))
conductivity = html.xpath('//input[@name="CONDUCTIVITE"]/@value')[-1]
elasticlimit = html.xpath('//input[@name="LE"]/@value')[-1]
if not materialID in Mats:
Mats[materialID] = GObject.GObject(materialID, 0,0,
{"sigma0":str(conductivity), "rpe": str(elasticlimit)},
"Helix", "Unknown")
#Magnets[magnet].addGObject(materialID)
if not magnet in MagnetComps:
MagnetComps[magnet] = []
MagnetComps[magnet].append(materialID)
if debug:
print("MagnetComps[%s].append(%s)" % (magnet,materialID) )
if debug:
print("Material: %s" % materialID,
"Conductivity=", conductivity,
"ElasticLimit=", elasticlimit)
MAGconf = helices[data][-1]
MAGconf.replace(' \t\t\t\t\t\t','')
MAGconf.replace('\n',',')
Magconf_files = MAGconf.split(' ')
Magconf_files = [f for f in Magconf_files if f.endswith('.conf')]
if debug:
print("MAGconfile=", Magconf_files, " **" )
Magnets[magnet].setMAGfile(Magconf_files)
print("\nMaterials")
print("\nMaterials Found:", len(Mats))
# Ref ou REF???
r = s.post(url_materials, data={ 'compact:': 'on', 'formsubmit': 'OK' })
r.raise_for_status()
# print("post Material: ", r.url, r.status_code)
html = lh.fromstring(r.text.encode(r.encoding))
refs = html.xpath('//input[@name="REF"]/@value')
sigmas = html.xpath('//input[@name="CONDUCTIVITE"]/@value')
elasticlimits = html.xpath('//input[@name="LE"]/@value')
if len(Mats.keys()) != len(refs)-1:
print("Materials in main list:", len(refs)-1)
for i,ref in enumerate(refs):
# ref is lxml.etree._ElementUnicodeResult
if not ref in Mats and not "finir" in ref:
if debug:
print("ref:", ref, type(ref), sigmas[i], elasticlimits[i])
Mats[ref] = GObject.GObject(str(ref), 0,0,
{"sigma0":str(sigmas[i]), "rpe":str(elasticlimits[i])},
"Helix", "Unknown")
#try:
print("=============================")
for i in [1, 5, 7, 8 , 9, 10]:
print("Loading txt files for M%d site" % i)
sitename = "/var/www/html/M%d/" % i
sitename = sitename.replace('/','%2F')
# print("sitename=", sitename)
r = s.post(url="http://147.173.83.216/site/sba/vendor/jqueryFileTree/connectors/jqueryFileTree.php",
data={ 'dir': sitename , })
# print("r.url=", r.url)
r.raise_for_status()
# print("r.text=", r.text)
tree = lh.fromstring(r.text)
# print("tree:", tree)
for tr in tree.xpath('//a'):
if tr.text_content().endswith(".txt"):
print('tr:', tr.text_content() )
try:
tformat="%Y.%m.%d - %H:%M:%S"
timestamp = datetime.datetime.strptime(tr.text_content().replace('.txt',''), tformat)
except:
tformat="%Y.%m.%d - %H_%M_%S"
timestamp = datetime.datetime.strptime(tr.text_content().replace('.txt',''), tformat)
print("changed tformat: %s" % tr.text_content())
pass
link = "../../../M%d/%s" % (i,tr.text_content().replace(' ','%20'))
# print("MRecord: ", timestamp, "M%d" % i, link)
record = MRecord.MRecord(timestamp, "M%d" % i, link)
data = record.getData(s, url_downloads, save=args.save)
mrun = python_magnetrun.MagnetRun.fromStringIO("M%d"%i, data)
insert = mrun.getInsert()
print("M%d: insert=%s file=%s" % (i, insert, tr.text_content()) )
if not insert in Magnets:
Magnets[insert] = HMagnet.HMagnet(insert, 0, None, "Unknown", 0)
if not insert in MagnetRecords:
MagnetRecords[insert] = []
if not record in MagnetRecords[insert]:
#print("addRecord: %s, %s, %s" % (insert, "M%d"%i, link) )
MagnetRecords[insert].append( record )
print("=============================")
# except:
# print( "Failed to perform jqueryFileTree" )
# pass
print("\nSum up: ")
print("\nMagnets:")
for magnet in Magnets:
if not magnet in MagnetRecords:
MagnetRecords[magnet] = []
if not magnet in MagnetComps:
MagnetComps[magnet] = []
print("** %s: status=%s, records=%d, helices=%d" % ( magnet,
Magnets[magnet].getStatus(),
len(MagnetRecords[magnet]),
len(MagnetComps[magnet]) ) )
print("\nMagnets in Operation:")
for magnet in Magnets:
## Broken to json:
#try:
if Magnets[magnet].getStatus() == "En service":
print("%s: " % magnet)
if args.save:
fo = open(magnet + ".json", "w", newline='\n')
fo.write(Magnets[magnet].to_json())
fo.close()
for record in MagnetRecords[magnet]:
ax = plt.gca()
data = record.getData(s, url_downloads, save=args.save)
try:
mrun = python_magnetrun.MagnetRun.fromStringIO(record.getSite(), data)
mrun.plateaus(thresold=2.e-3, duration=10, save=args.save)
except:
print("record: trouble with data from %s" % record.getLink())
pass
print("\nMaterials:")
for mat in Mats:
print(mat, ":", Mats[mat])
if args.save:
fo = open(mat + ".json", "w", newline='\n')
fo.write(Mats[mat].to_json())
fo.close()
|
the-stack_106_18949
|
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance_store import backend
from oslo_config import cfg
from oslo_log import log as logging
from taskflow.patterns import linear_flow as lf
from taskflow import task
from taskflow.types import failure
from glance.common import exception
from glance.common.scripts import utils as script_utils
from glance.i18n import _, _LE
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class _WebDownload(task.Task):
default_provides = 'file_uri'
def __init__(self, task_id, task_type, image_repo, image_id, uri):
self.task_id = task_id
self.task_type = task_type
self.image_repo = image_repo
self.image_id = image_id
self.uri = uri
super(_WebDownload, self).__init__(
name='%s-WebDownload-%s' % (task_type, task_id))
if CONF.node_staging_uri is None:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Missing node_staging_uri: %(work_dir)s") %
{'task_id': self.task_id,
'task_type': self.task_type,
'work_dir': CONF.node_staging_uri})
raise exception.BadTaskConfiguration(msg)
self.store = self._build_store()
def _build_store(self):
# NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're
# forced to build our own config object, register the required options
# (and by required I mean *ALL* of them, even the ones we don't want),
# and create our own store instance by calling a private function.
# This is certainly unfortunate but it's the best we can do until the
# glance_store refactor is done. A good thing is that glance_store is
# under our team's management and it gates on Glance so changes to
# this API will (should?) break task's tests.
conf = cfg.ConfigOpts()
backend.register_opts(conf)
conf.set_override('filesystem_store_datadir',
CONF.node_staging_uri[7:],
group='glance_store')
# NOTE(flaper87): Do not even try to judge me for this... :(
# With the glance_store refactor, this code will change, until
# that happens, we don't have a better option and this is the
# least worst one, IMHO.
store = backend._load_store(conf, 'file')
if store is None:
msg = (_("%(task_id)s of %(task_type)s not configured "
"properly. Could not load the filesystem store") %
{'task_id': self.task_id, 'task_type': self.task_type})
raise exception.BadTaskConfiguration(msg)
store.configure()
return store
def execute(self):
"""Create temp file into store and return path to it
:param image_id: Glance Image ID
"""
# NOTE(jokke): We've decided to use staging area for this task as
# a way to expect users to configure a local store for pre-import
# works on the image to happen.
#
# While using any path should be "technically" fine, it's not what
# we recommend as the best solution. For more details on this, please
# refer to the comment in the `_ImportToStore.execute` method.
data = script_utils.get_image_data_iter(self.uri)
path = self.store.add(self.image_id, data, 0)[0]
return path
def revert(self, result, **kwargs):
if isinstance(result, failure.Failure):
LOG.exception(_LE('Task: %(task_id)s failed to import image '
'%(image_id)s to the filesystem.'),
{'task_id': self.task_id,
'image_id': self.image_id})
def get_flow(**kwargs):
"""Return task flow for web-download.
:param task_id: Task ID.
:param task_type: Type of the task.
:param image_repo: Image repository used.
:param uri: URI the image data is downloaded from.
"""
task_id = kwargs.get('task_id')
task_type = kwargs.get('task_type')
image_repo = kwargs.get('image_repo')
image_id = kwargs.get('image_id')
uri = kwargs.get('import_req')['method'].get('uri')
return lf.Flow(task_type).add(
_WebDownload(task_id, task_type, image_repo, image_id, uri),
)
|
the-stack_106_18950
|
print("type help to ask for instruction")
instraction = input()
instraction = help
if instraction == help:
print('''
start - to start the car
stop - to stop the car
quit- to exit ''' )
else:
print("please type help")
instra = input()
instra = "start"
if instra == "start":
print("car started.")
elif instra == "stop":
print("car stopped")
elif instra == "quit":
exit()
|
the-stack_106_18951
|
"""
Plot results
Author(s): Wei Chen ([email protected])
"""
import os
import itertools
import numpy as np
from matplotlib import pyplot as plt
from cfg_reader import read_config
from shape_plot import plot_shape
from run_batch_experiments import novelty_score, non_dominated_sort
from simulation import detect_intersect
import sys
sys.path.append('..')
from evaluation import diversity_score
def convert_perf(perf):
perf[:,1] = perf[:,0]/perf[:,1]
ind = np.logical_or(np.isinf(perf), np.isnan(perf))
perf[ind] = 0
return perf
def select_best_ind(metric, n_selected, feasibility):
sorted_ind = np.argsort(metric*feasibility)
selected_ind = sorted_ind[-n_selected:]
return selected_ind
def plot_airfoils(airfoils, airfoils_nearest, perfs, ax):
n = airfoils.shape[0]
zs = np.vstack((np.zeros(n), 0.6*np.arange(n))).T
for i in range(n):
plot_shape(airfoils[i], zs[i, 0], zs[i, 1], ax, 1., False, None, c='k', lw=1.2)
plot_shape(airfoils_nearest[i], zs[i, 0], zs[i, 1], ax, 1., False, None, c='k', lw=1.2, ls='--', alpha=.5)
plt.annotate(r'$C_L={:.2f}$, $C_L/C_D={:.2f}$'.format(perfs[i,0], perfs[i,1]), xy=(zs[i, 0], zs[i, 1]+0.3), size=14)
ax.axis('off')
ax.axis('equal')
ax.set_ylim(zs[0,1]-0.2, zs[-1,1]+0.6)
if __name__ == "__main__":
config_fname = 'config.ini'
list_models = ['MO-PaDGAN', 'GAN', 'SVD', 'FFD']
# list_models = ['MO-PaDGAN', 'GAN']
m = len(list_models)
###############################################################################
# Plot diversity, quality, and novelty scores
print('Plotting scores ...')
plt.rcParams.update({'font.size': 14})
n = 300 # generated sample size for each trained model
subset_size = 10 # for computing DDP
sample_times = n # for computing DDP
# Training data
x_path = './data/xs_train.npy'
airfoils_data = np.load(x_path)
list_div = []
list_qa0 = []
list_qa1 = []
list_nvl = []
list_selected_ind = []
for model_name in list_models:
if model_name == 'FFD':
parameterization = 'ffd'
elif model_name == 'SVD':
parameterization = 'svd'
elif model_name == 'GAN':
parameterization = 'gan'
lambda0, lambda1 = 0., 0.
else:
parameterization = 'gan'
_, _, _, _, _, _, _, lambda0, lambda1, _ = read_config(config_fname)
if parameterization == 'gan':
save_dir = 'trained_gan/{}_{}'.format(lambda0, lambda1)
else:
save_dir = '{}'.format(parameterization)
airfoils = np.load('{}/gen_xs.npy'.format(save_dir))[:n]
div = diversity_score(airfoils, subset_size, sample_times)
qa = np.load('{}/gen_ys.npy'.format(save_dir))[:n]
qa = convert_perf(qa)
nvl_path = '{}/novelty_scores.npy'.format(save_dir)
nns_path = '{}/nearest_neighbors.npy'.format(save_dir)
if os.path.exists(nvl_path) and os.path.exists(nns_path):
nvl = np.load(nvl_path)
nns = np.load(nns_path)
else:
nvl = []
nns = []
for i, airfoil in enumerate(airfoils):
print('{}/{}'.format(i+1, n))
dist, nearest_airfoil = novelty_score(airfoil, airfoils_data)
nvl.append(dist)
nns.append(nearest_airfoil)
np.save(nvl_path, nvl)
np.save(nns_path, nns)
feasibility = np.logical_not(np.all(qa==0, axis=1))
for i, airfoil in enumerate(airfoils):
if detect_intersect(airfoil):
feasibility[i] = False
print('{}: {:.2f}%'.format(model_name, sum(feasibility)/len(feasibility)*100))
selected_ind = select_best_ind(nvl, 5, feasibility)
list_div.append(div)
list_qa0.append(qa[:,0])
list_qa1.append(qa[:,1])
list_nvl.append(nvl)
list_selected_ind.append(selected_ind)
list_xlabels = list_models
fig = plt.figure(figsize=(15, 3))
ax1 = fig.add_subplot(141)
ax1.set_title('Diversity')
ax1.boxplot(list_div, 0, '')
ax1.set_xlim(0.5, len(list_xlabels) + 0.5)
ax1.set_xticklabels(list_xlabels, rotation=20)
ax2 = fig.add_subplot(142)
ax2.set_title(r'$C_L$')
ax2.boxplot(list_qa0, 0, '')
ax2.set_xlim(0.5, len(list_xlabels) + 0.5)
ax2.set_xticklabels(list_xlabels, rotation=20)
ax3 = fig.add_subplot(143)
ax3.set_title(r'$C_L/C_D$')
ax3.boxplot(list_qa1, 0, '')
ax3.set_xlim(0.5, len(list_xlabels) + 0.5)
ax3.set_xticklabels(list_xlabels, rotation=20)
ax3 = fig.add_subplot(144)
ax3.set_title('Novelty')
ax3.boxplot(list_nvl, 0, '')
ax3.set_xlim(0.5, len(list_xlabels) + 0.5)
ax3.set_xticklabels(list_xlabels, rotation=20)
plt.tight_layout()
plt.savefig('./airfoil_scores.svg')
plt.savefig('./airfoil_scores.png')
plt.close()
###############################################################################
# Plot most novel airfoils
print('Plotting most novel airfoils ...')
fig = plt.figure(figsize=(15, 6))
for i, model_name in enumerate(list_models):
if model_name == 'FFD':
parameterization = 'ffd'
elif model_name == 'SVD':
parameterization = 'svd'
elif model_name == 'GAN':
parameterization = 'gan'
lambda0, lambda1 = 0., 0.
else:
parameterization = 'gan'
_, _, _, _, _, _, _, lambda0, lambda1, _ = read_config(config_fname)
if parameterization == 'gan':
save_dir = 'trained_gan/{}_{}'.format(lambda0, lambda1)
else:
save_dir = '{}'.format(parameterization)
airfoils = np.load('{}/gen_xs.npy'.format(save_dir))[:n]
airfoils_nearest = np.load('{}/nearest_neighbors.npy'.format(save_dir))
perfs = np.load('{}/gen_ys.npy'.format(save_dir))[:n]
perfs = convert_perf(perfs)
ax = fig.add_subplot(1, m, i+1)
plot_airfoils(airfoils[list_selected_ind[i]], airfoils_nearest[list_selected_ind[i]], perfs[list_selected_ind[i]], ax)
ax.set_title(model_name)
# plt.tight_layout()
plt.savefig('./airfoil_most_novel.svg')
plt.savefig('./airfoil_most_novel.png')
plt.close()
###############################################################################
# Plot Pareto front for a single run
print('Plotting Pareto front for a single run ...')
colors = ['#003f5c', '#7a5195', '#ef5675', '#ffa600']
iter_colors = itertools.cycle(colors)
markers = ['s', '^', 'o', 'v']
iter_markers = itertools.cycle(markers)
fig = plt.figure()
for model_name in list_models:
if model_name == 'FFD':
parameterization = 'ffd'
elif model_name == 'SVD':
parameterization = 'svd'
elif model_name == 'GAN':
parameterization = 'gan'
lambda0, lambda1 = 0., 0.
else:
parameterization = 'gan'
_, _, _, _, _, _, _, lambda0, lambda1, _ = read_config(config_fname)
if parameterization == 'gan':
save_dir = 'trained_gan/{}_{}/optimization'.format(lambda0, lambda1)
else:
save_dir = '{}/optimization'.format(parameterization)
y_history_path = '{}/0/y_hist.npy'.format(save_dir)
y_history = np.load(y_history_path)
pf_y, _, _ = non_dominated_sort(y_history)
plt.scatter(-pf_y[:,0], -pf_y[:,1], c=next(iter_colors), marker=next(iter_markers), label=model_name)
plt.legend(frameon=True, title='Parameterization')
plt.xlabel(r'$C_L$')
plt.ylabel(r'$C_L/C_D$')
plt.tight_layout()
plt.savefig('pareto_pts_1run.svg')
plt.savefig('pareto_pts_1run.png')
plt.close()
|
the-stack_106_18953
|
import base64
import hashlib
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lookup.pe as s_l_pe
class FileBase(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
norm = valu.strip().lower().replace('\\', '/')
if norm.find('/') != -1:
mesg = 'file:base may not contain /'
raise s_exc.BadTypeValu(name=self.name, valu=valu, mesg=mesg)
subs = {}
if norm.find('.') != -1:
subs['ext'] = norm.rsplit('.', 1)[1]
return norm, {'subs': subs}
class FilePath(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
def _normPyStr(self, valu):
if len(valu) == 0:
return '', {}
lead = ''
if valu[0] == '/':
lead = '/'
valu = valu.strip().lower().replace('\\', '/').strip('/')
if not valu:
return '', {}
if valu in ('.', '..'):
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='Cannot norm a bare relative path.')
path = []
vals = [v for v in valu.split('/') if v]
for part in vals:
if part == '.':
continue
if part == '..':
if len(path):
path.pop()
continue
path.append(part)
fullpath = lead + '/'.join(path)
base = path[-1]
subs = {'base': base}
if '.' in base:
subs['ext'] = base.rsplit('.', 1)[1]
if len(path) > 1:
subs['dir'] = lead + '/'.join(path[:-1])
return fullpath, {'subs': subs}
class FileBytes(s_types.Str):
def postTypeInit(self):
s_types.Str.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(list, self._normPyList)
self.setNormFunc(tuple, self._normPyList)
self.setNormFunc(bytes, self._normPyBytes)
def _normPyList(self, valu):
guid, info = self.modl.type('guid').norm(valu)
norm = f'guid:{guid}'
return norm, {}
def _normPyStr(self, valu):
if valu == '*':
guid = s_common.guid()
norm = f'guid:{guid}'
return norm, {}
if valu.find(':') == -1:
# we're ok with un-adorned sha256s
if len(valu) == 64 and s_common.uhex(valu):
valu = valu.lower()
subs = {'sha256': valu}
return f'sha256:{valu}', {'subs': subs}
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='unadorned file:bytes value is not a sha256')
kind, kval = valu.split(':', 1)
if kind == 'base64':
byts = base64.b64decode(kval)
return self._normPyBytes(byts)
kval = kval.lower()
if kind == 'hex':
byts = s_common.uhex(kval)
return self._normPyBytes(byts)
if kind == 'guid':
kval = kval.lower()
if not s_common.isguid(kval):
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='guid is not a guid')
return f'guid:{kval}', {}
if kind == 'sha256':
if len(kval) != 64:
raise s_exc.BadTypeValu(name=self.name, valu=valu,
mesg='invalid length for sha256 valu')
s_common.uhex(kval)
subs = {'sha256': kval}
return f'sha256:{kval}', {'subs': subs}
raise s_exc.BadTypeValu(name=self.name, valu=valu, kind=kind,
mesg='unable to norm as file:bytes')
def _normPyBytes(self, valu):
sha256 = hashlib.sha256(valu).hexdigest()
norm = f'sha256:{sha256}'
subs = {
'md5': hashlib.md5(valu).hexdigest(),
'sha1': hashlib.sha1(valu).hexdigest(),
'sha256': sha256,
'sha512': hashlib.sha512(valu).hexdigest(),
'size': len(valu),
}
return norm, {'subs': subs}
class FileModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.prop('file:bytes:mime').onSet(self._onSetFileBytesMime)
async def _onSetFileBytesMime(self, node, oldv):
name = node.get('mime')
if name == '??':
return
await node.snap.addNode('file:ismime', (node.ndef[1], name))
def getModelDefs(self):
modl = {
'ctors': (
('file:bytes', 'synapse.models.files.FileBytes', {}, {
'doc': 'The file bytes type with SHA256 based primary property.'}),
('file:base', 'synapse.models.files.FileBase', {}, {
'doc': 'A file name with no path.',
'ex': 'woot.exe'}),
('file:path', 'synapse.models.files.FilePath', {}, {
'doc': 'A normalized file path.',
'ex': 'c:/windows/system32/calc.exe'}),
),
'types': (
('file:subfile', ('comp', {'fields': (('parent', 'file:bytes'), ('child', 'file:bytes'))}), {
'doc': 'A parent file that fully contains the specified child file.',
}),
('file:filepath', ('comp', {'fields': (('file', 'file:bytes'), ('path', 'file:path'))}), {
'doc': 'The fused knowledge of the association of a file:bytes node and a file:path.',
}),
('file:mime', ('str', {'lower': 1}), {
'doc': 'A file mime name string.',
'ex': 'text/plain',
}),
('file:ismime', ('comp', {'fields': (('file', 'file:bytes'), ('mime', 'file:mime'))}), {
'doc': 'Records one, of potentially multiple, mime types for a given file.',
}),
('file:mime:pe:section', ('comp', {'fields': (
('file', 'file:bytes'),
('name', 'str'),
('sha256', 'hash:sha256'),
)}), {
'doc': 'The fused knowledge a file:bytes node containing a pe section.',
}),
('file:mime:pe:resource', ('comp', {'fields': (
('file', 'file:bytes'),
('type', 'pe:resource:type'),
('langid', 'pe:langid'),
('resource', 'file:bytes'))}), {
'doc': 'The fused knowledge of a file:bytes node containing a pe resource.',
}),
('file:mime:pe:export', ('comp', {'fields': (
('file', 'file:bytes'),
('name', 'str'))}), {
'doc': 'The fused knowledge of a file:bytes node containing a pe named export.',
}),
('file:mime:pe:vsvers:keyval', ('comp', {'fields': (
('name', 'str'),
('value', 'str'))}), {
'doc': 'A key value pair found in a PE vsversion info structure.',
}),
('file:mime:pe:vsvers:info', ('comp', {'fields': (
('file', 'file:bytes'),
('keyval', 'file:mime:pe:vsvers:keyval'))}), {
'doc': 'knowledge of a file:bytes node containing vsvers info.',
}),
('file:string', ('comp', {'fields': (
('file', 'file:bytes'),
('string', 'str'))}), {
'doc': 'The fused knowledge of a file:bytes node containing a string.',
}),
('pe:resource:type', ('int', {'enums': s_l_pe.getRsrcTypes()}), {
'doc': 'The typecode for the resource.',
}),
('pe:langid', ('int', {'enums': s_l_pe.getLangCodes()}), {
'doc': 'The PE language id.',
}),
),
'forms': (
('file:bytes', {}, (
('size', ('int', {}), {
'doc': 'The file size in bytes.'}),
('md5', ('hash:md5', {}), {'ro': 1,
'doc': 'The md5 hash of the file.'}),
('sha1', ('hash:sha1', {}), {'ro': 1,
'doc': 'The sha1 hash of the file.'}),
('sha256', ('hash:sha256', {}), {'ro': 1,
'doc': 'The sha256 hash of the file.'}),
('sha512', ('hash:sha512', {}), {'ro': 1,
'doc': 'The sha512 hash of the file.'}),
('name', ('file:base', {}), {
'doc': 'The best known base name for the file.'}),
('mime', ('file:mime', {}), {
'doc': 'The "best" mime type name for the file.'}),
('mime:x509:cn', ('str', {}), {
'doc': 'The Common Name (CN) attribute of the x509 Subject.'}),
('mime:pe:size', ('int', {}), {
'doc': 'The size of the executable file according to the PE file header.'}),
('mime:pe:imphash', ('guid', {}), {
'doc': 'The PE import hash of the file as calculated by pefile; '
'https://github.com/erocarrera/pefile .'}),
('mime:pe:compiled', ('time', {}), {
'doc': 'The compile time of the file according to the PE header.'}),
('mime:pe:pdbpath', ('file:path', {}), {
'doc': 'The PDB string according to the PE.'}),
('mime:pe:exports:time', ('time', {}), {
'doc': 'The export time of the file according to the PE.'}),
('mime:pe:exports:libname', ('str', {}), {
'doc': 'The export library name according to the PE.'}),
('mime:pe:richhdr', ('hash:sha256', {}), {
'doc': 'The sha256 hash of the rich header bytes.'}),
)),
('file:mime', {}, ()),
('file:ismime', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file node that is an instance of the named mime type.',
}),
('mime', ('file:mime', {}), {
'ro': True,
'doc': 'The mime type of the file.',
}),
)),
('file:mime:pe:section', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file containing the section.',
}),
('name', ('str', {}), {
'ro': True,
'doc': 'The textual name of the section.',
}),
('sha256', ('hash:sha256', {}), {
'ro': True,
'doc': 'The sha256 hash of the section. Relocations must be zeroed before hashing.',
}),
)),
('file:mime:pe:resource', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file containing the resource.',
}),
('type', ('pe:resource:type', {}), {
'ro': True,
'doc': 'The typecode for the resource.',
}),
('langid', ('pe:langid', {}), {
'ro': True,
'doc': 'The language code for the resource.',
}),
('resource', ('file:bytes', {}), {
'ro': True,
'doc': 'The sha256 hash of the resource bytes.',
}),
)),
('file:mime:pe:export', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file containing the export.',
}),
('name', ('str', {}), {
'ro': True,
'doc': 'The name of the export in the file.',
}),
)),
('file:mime:pe:vsvers:keyval', {}, (
('name', ('str', {}), {
'ro': True,
'doc': 'The key for the vsversion keyval pair.',
}),
('value', ('str', {}), {
'ro': True,
'doc': 'The value for the vsversion keyval pair.',
}),
)),
('file:mime:pe:vsvers:info', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file containing the vsversion keyval pair.',
}),
('keyval', ('file:mime:pe:vsvers:keyval', {}), {
'ro': True,
'doc': 'The vsversion info keyval in this file:bytes node.',
}),
)),
('file:string', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file containing the string.',
}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string contained in this file:bytes node.',
}),
)),
('file:base', {}, (
('ext', ('str', {}), {'ro': 1,
'doc': 'The file extension (if any).'}),
)),
('file:filepath', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file seen at a path.',
}),
('path', ('file:path', {}), {
'ro': True,
'doc': 'The path a file was seen at.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory.',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The name of the file.',
}),
('path:base:ext', ('str', {}), {
'ro': True,
'doc': 'The extension of the file name.',
}),
)),
('file:subfile', {}, (
('parent', ('file:bytes', {}), {
'ro': True,
'doc': 'The parent file containing the child file.',
}),
('child', ('file:bytes', {}), {
'ro': True,
'doc': 'The child file contained in the parent file.',
}),
('name', ('file:base', {}), {
'doc': 'The name of the child file. Because a given set of bytes '
'can have any number of arbitrary names, this field is '
'used for display purposes only.'
})
)),
('file:path', {}, (
('dir', ('file:path', {}), {'ro': 1,
'doc': 'The parent directory.'}),
('base', ('file:base', {}), {'ro': 1,
'doc': 'The file base name.'}),
('base:ext', ('str', {}), {'ro': 1,
'doc': 'The file extension.'}),
)),
),
}
name = 'file'
return ((name, modl),)
|
the-stack_106_18954
|
#!/usr/bin/env python3
import argparse
from xcanalyzer.xcodeproject.parsers import XcProjectParser
from xcanalyzer.xcodeproject.generators import XcProjReporter
from xcanalyzer.xcodeproject.exceptions import XcodeProjectReadException
# --- Arguments ---
argument_parser = argparse.ArgumentParser(description="List all targets and files of the Xcode project.")
# Project folder argument
argument_parser.add_argument('path',
help='Path of the folder containing your `.xcodeproj` folder.')
# Only "shared" files between targets
argument_parser.add_argument('-s', '--only-shared',
dest='only_shared',
action='store_true',
help='Give the list of files used by multiple targets.')
# --- Parse arguments ---
args = argument_parser.parse_args()
# Xcode code project reader
xcode_project_reader = XcProjectParser(args.path)
# Loading the project
try:
xcode_project_reader.load()
except XcodeProjectReadException as e:
print("An error occurred when loading Xcode project: {}".format(e.message))
exit()
# Reporter
reporter = XcProjReporter(xcode_project_reader.xc_project)
if args.only_shared:
reporter.print_shared_files()
else:
reporter.print_files_by_targets()
reporter.print_files_summary()
|
the-stack_106_18955
|
import yaml
import os
from sdg.translations import TranslationInputSdmx
source_language = 'en'
source = 'https://nsiws-stable-camstat-live.officialstatistics.org/rest/dataflow/KH_NIS/DF_SDG_KH/1.2?references=all&detail=referencepartial'
request_params = {
'headers': {
'User-Agent': 'Mozilla'
}
}
translation_input = TranslationInputSdmx(source=source, request_params=request_params)
translation_input.execute()
translations = translation_input.get_translations()
for concept in translations[source_language]:
filename = os.path.join('translations', 'dsd', source_language, concept) + '.yml'
with open(filename, 'w', encoding='utf-8') as stream:
yaml.dump(translations[source_language][concept], stream, allow_unicode=True, width=1000)
|
the-stack_106_18956
|
#!/usr/bin/env python
# Filename: prepare_label_raster.py
"""
introduction: for test, crop and resample label raster for training.
authors: Huang Lingcao
email:[email protected]
add time: 05 July, 2021
"""
import os,sys
code_dir = os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS')
sys.path.insert(0, code_dir)
import basic_src.io_function as io_function
import raster_io
import basic_src.RSImageProcess as RSImageProcess
import basic_src.map_projection as map_projection
data_dir=os.path.expanduser('~/Data/LandCover_LandUse_Change')
def resample_crop_raster(ref_raster, input_raster, output_raster=None, resample_method='near'):
if output_raster is None:
output_raster = io_function.get_name_by_adding_tail(os.path.basename(input_raster),'res_sub')
if os.path.isfile(output_raster):
print('Warning, %s exists'%output_raster)
return output_raster
# check projection
prj4_ref = map_projection.get_raster_or_vector_srs_info_proj4(ref_raster)
prj4_input = map_projection.get_raster_or_vector_srs_info_proj4(input_raster)
if prj4_ref != prj4_input:
raise ValueError('projection inconsistent: %s and %s'%(ref_raster, input_raster))
# crop
RSImageProcess.subset_image_baseimage(output_raster, input_raster, ref_raster, same_res=True,resample_m=resample_method)
if os.path.isfile(output_raster):
return output_raster
else:
return False
def crop_resample_label_raster():
img_path = os.path.join(data_dir,'rs_imagery/Planet/Brazil_area1_2019Feb07_psscene4band_analytic_sr_udm2/'
'Brazil_area1_20190207_3B_AnalyticMS_SR_mosaic_8bit_rgb_sub.tif')
label_path = os.path.join(data_dir, 'LCLUC_MapBiomas_Gabriel/COLECAO_5_DOWNLOADS_COLECOES_ANUAL_2019_merge_prj.tif')
# crop and resample
label_sub = resample_crop_raster(img_path,label_path)
# rename the label raster
new_label_img = io_function.get_name_by_adding_tail(os.path.basename(img_path),'label')
io_function.move_file_to_dst(label_sub,new_label_img)
def main():
crop_resample_label_raster()
if __name__ == '__main__':
main()
pass
|
the-stack_106_18958
|
import argparse
import cv2
import numpy as np
import torch
from albumentations.pytorch import ToTensorV2
import albumentations as Aug
from model import FaceModel
from wider_face_dataset import img_size
parser = argparse.ArgumentParser(description='add batch size')
parser.add_argument('model_path', type=str, help='the path of your model')
parser.add_argument('image_path', type=str, help='the path of the image that u want to test')
args = parser.parse_args()
def nms(dets, thresh):
if dets.shape[0] == 0:
return []
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def post_process(face_locations):
grid_size = face_locations.shape[1]
size = img_size / grid_size
face_locations = face_locations.reshape(-1, 5)
output0 = []
for i, out in enumerate(face_locations):
if out[4] >= 0.02:
colm = i % grid_size
row = int(i / grid_size)
x = (out[0] + colm) * img_size / grid_size
y = (out[1] + row) * img_size / grid_size
w = out[2] * img_size
h = out[3] * img_size
k = [x-w/2, y-h/2, x+w/2, y+h/2]
k.append(out[4])
output0.append(k)
return output0
def draw(frame, face_location):
for k in face_location:
if k[4] >= 0.3:
k = list(map(int, k))
cv2.rectangle(frame, (k[0], k[1]), (k[2], k[3]), (256, 0, 0), 2)
cv2.imshow('image', frame)
cv2.waitKey(0)
model = FaceModel('resnet18').cuda()
model.load_state_dict(torch.load(args.model_path))
transforms = Aug.Compose([
Aug.Resize(img_size, img_size),
Aug.Normalize(),
ToTensorV2()])
image = cv2.imread(args.image_path)
orig_size = (image.shape[0], image.shape[1])
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
transformed = transforms(image=image2)
x = transformed['image']
x = x.unsqueeze(0).cuda()
output = model(x)
with torch.no_grad():
dets1 = post_process(torch.squeeze(output[0].cpu()))
dets2 = post_process(torch.squeeze(output[1].cpu()))
dets3 = post_process(torch.squeeze(output[2].cpu()))
dets = np.array(dets1 + dets2 + dets3)
keep = nms(dets, 0.25)
dets = dets[keep]
dets[..., 0] = dets[..., 0] * orig_size[1] / img_size
dets[..., 1] = dets[..., 1] * orig_size[0] / img_size
dets[..., 2] = dets[..., 2] * orig_size[1] / img_size
dets[..., 3] = dets[..., 3] * orig_size[0] / img_size
draw(image, dets)
|
the-stack_106_18960
|
'''
Platform tests to discover the system capabilities.
'''
import os
import sys
import select
import struct
import threading
from pyroute2 import config
from pyroute2.common import uifname
from pyroute2 import RawIPRoute
from pyroute2.netlink.rtnl import RTMGRP_LINK
class SkipTest(Exception):
pass
class TestCapsRtnl(object):
'''
A minimal test set to collect the RTNL implementation
capabilities.
It uses raw RTNL sockets and doesn't run any proxy code, so
no transparent helpers are executed -- e.g., it will not
create bridge via `brctl`, if RTNL doesn't support it.
A short developer's guide::
def test_whatever_else(self):
code
This test will create a capability record `whatever_else`. If
the `code` fails, the `whatever_else` will be set to `False`.
If it throws the `SkipTest` exception, the `whatever_else` will
be set to `None`. Otherwise it will be set to whatever the test
returns.
To collect the capabilities::
tce = TestCapsExt()
tce.collect()
print(tce.capabilities)
Collected capabilities are in the `TestCapsExt.capabilities`
dictionary, you can use them directly or by setting the
`config.capabilities` singletone::
from pyroute2 import config
# ...
tce.collect()
config.capabilities = tce.capabilities
'''
def __init__(self):
self.capabilities = {}
self.ifnames = []
self.rtm_newlink = {}
self.rtm_dellink = {}
self.rtm_events = {}
self.cmd, self.cmdw = os.pipe()
self.ip = None
self.event = threading.Event()
def __getitem__(self, key):
return self.capabilities[key]
def set_capability(self, key, value):
'''
Set a capability.
'''
self.capabilities[key] = value
def ifname(self):
'''
Register and return a new unique interface name to
be used in a test.
'''
ifname = uifname()
self.ifnames.append(ifname)
self.rtm_events[ifname] = threading.Event()
self.rtm_newlink[ifname] = []
self.rtm_dellink[ifname] = []
return ifname
def monitor(self):
# The monitoring code to collect RTNL messages
# asynchronously.
# Do **NOT** run manually.
# use a separate socket for monitoring
ip = RawIPRoute()
ip.bind(RTMGRP_LINK)
poll = select.poll()
poll.register(ip, select.POLLIN | select.POLLPRI)
poll.register(self.cmd, select.POLLIN | select.POLLPRI)
self.event.set()
while True:
events = poll.poll()
for (fd, evt) in events:
if fd == ip.fileno():
msgs = ip.get()
for msg in msgs:
name = msg.get_attr('IFLA_IFNAME')
event = msg.get('event')
if name not in self.rtm_events:
continue
if event == 'RTM_NEWLINK':
self.rtm_events[name].set()
self.rtm_newlink[name].append(msg)
elif event == 'RTM_DELLINK':
self.rtm_dellink[name].append(msg)
else:
ip.close()
return
def setup(self):
# The setup procedure for a test.
# Do **NOT** run manually.
# create the raw socket
self.ip = RawIPRoute()
def teardown(self):
# The teardown procedure for a test.
# Do **NOT** run manually.
# clear the collected interfaces
for ifname in self.ifnames:
self.rtm_events[ifname].wait()
self.rtm_events[ifname].clear()
if self.rtm_newlink.get(ifname):
self.ip.link('del', index=self.rtm_newlink[ifname][0]['index'])
self.ifnames = []
# close the socket
self.ip.close()
def collect(self):
'''
Run the tests and collect the capabilities. They will be
saved in the `TestCapsRtnl.capabilities` attribute.
'''
symbols = sorted(dir(self))
# start the monitoring thread
mthread = threading.Thread(target=self.monitor)
mthread.start()
self.event.wait()
# wait for the thread setup
for name in symbols:
if name.startswith('test_'):
self.setup()
try:
ret = getattr(self, name)()
if ret is None:
ret = True
self.set_capability(name[5:], ret)
except SkipTest:
self.set_capability(name[5:], None)
except Exception:
for ifname in self.ifnames:
# cancel events queued for that test
self.rtm_events[ifname].set()
self.set_capability(name[5:], False)
self.teardown()
# stop the monitor
os.write(self.cmdw, b'q')
mthread.join()
return self.capabilities
def test_uname(self):
'''
Return collected uname
'''
return config.uname
def test_python_version(self):
'''
Return Python version
'''
return sys.version
def test_unpack_from(self):
'''
Does unpack_from() support bytearray as the buffer
'''
# probe unpack from
try:
struct.unpack_from('I', bytearray((1, 0, 0, 0)), 0)
except:
return False
# works... but may it be monkey patched?
if hasattr(struct, '_u_f_orig'):
return False
def test_create_dummy(self):
'''
An obvious test: an ability to create dummy interfaces
'''
self.ghost = self.ifname()
self.ip.link('add', ifname=self.ghost, kind='dummy')
def test_create_bridge(self):
'''
Can the kernel create bridges via netlink?
'''
self.ip.link('add', ifname=self.ifname(), kind='bridge')
def test_create_bond(self):
'''
Can the kernel create bonds via netlink?
'''
self.ip.link('add', ifname=self.ifname(), kind='bond')
def test_ghost_newlink_count(self):
'''
A normal flow (req == request, brd == broadcast message)::
(req) -> RTM_NEWLINK
(brd) <- RTM_NEWLINK
(req) -> RTM_DELLINK
(brd) <- RTM_DELLINK
But on old kernels you can encounter the following::
(req) -> RTM_NEWLINK
(brd) <- RTM_NEWLINK
(req) -> RTM_DELLINK
(brd) <- RTM_DELLINK
(brd) <- RTM_NEWLINK (!) false positive
And that obviously can break the code that relies on
broadcast updates, since it will see as a new interface
is created immediately after it was destroyed.
One can ignore RTM_NEWLINK for the same name that follows
a normal RTM_DELLINK. To do that, one should be sure the
message will come.
Another question is how many messages to ignore.
This is not a test s.str., but it should follow after the
`test_create_dummy`. It counts, how many RTM_NEWLINK
messages arrived during the `test_create_dummy`.
The ghost newlink messages count will be the same for other
interface types as well.
'''
with open('/proc/version', 'r') as f:
if int(f.read().split()[2][0]) > 2:
# the issue is reported only for kernels 2.x
return 0
# there is no guarantee it will come; it *may* come
self.rtm_events[self.ghost].wait(0.5)
return max(len(self.rtm_newlink.get(self.ghost, [])) - 1, 0)
|
the-stack_106_18961
|
# coding: utf8
import re
from aoc import aoc
# We need to have two masks to correctly overwrite bits: an AND and an OR mask.
RE_MEMSET = re.compile(r"mem\[(\d+)\] = (\d+)")
RE_BITMASK = re.compile(r"mask = ([01X]+)")
def bitstring(num, bits):
return bin(num)[2:].zfill(bits)
class Problem(aoc.Problem):
def solve(self, part):
memory = {}
mask = None
and_mask = None
or_mask = None
for ins in self.dataset_lines:
if ins.startswith("mask"):
mask = RE_BITMASK.findall(ins)[0]
or_mask = int(mask.replace("X", "0"), 2)
and_mask = int(mask.replace("X", "1"), 2)
elif ins.startswith("mem"):
address, value = RE_MEMSET.findall(ins)[0]
address, value = int(address), int(value)
if part == 1:
masked_value = (int(value) | or_mask) & and_mask
memory[address] = masked_value
else:
address_template = ""
# find all permutations
for address_bit, mask_bit in zip(bitstring(address, 36), mask):
# basically we OR the bit
if mask_bit == "0":
address_template += address_bit
elif mask_bit == "1":
address_template += mask_bit
elif mask_bit == "X":
address_template += "{}"
# We iterate through 2 to the power of how many floating bits.
floating_len = mask.count("X")
for floating in range(2 ** floating_len):
memory[
int(
address_template.format(
*bitstring(floating, floating_len)
),
2,
)
] = value
return sum(memory.values())
|
the-stack_106_18963
|
_base_ = [
'../../_base_/models/mocov3/vit_small.py',
'../../_base_/datasets/cifar10/mocov3_vit_sz224_bs64.py',
'../../_base_/default_runtime.py',
]
# interval for accumulate gradient
update_interval = 8 # total: 8 x bs64 x 8 accumulates = bs4096
# additional hooks
custom_hooks = [
dict(type='CosineScheduleHook', # update momentum
end_momentum=1.0,
adjust_scope=[0.05, 1.0],
warming_up="constant",
interval=update_interval),
]
# optimizer
optimizer = dict(
type='AdamW',
lr=1.5e-4 * 4096 / 256, # bs4096
betas=(0.9, 0.95), weight_decay=0.1,
paramwise_options={
'(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.),
'bias': dict(weight_decay=0.),
'pos_embed': dict(weight_decay=0.),
'cls_token': dict(weight_decay=0.)
})
# apex
use_fp16 = False
fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))
# optimizer args
optimizer_config = dict(update_interval=update_interval, use_fp16=use_fp16, grad_clip=None)
# learning policy
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False, min_lr=0.,
warmup='linear',
warmup_iters=40, warmup_by_epoch=True,
warmup_ratio=1e-5,
)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=1000)
|
the-stack_106_18965
|
from itertools import count
from numpy import array, zeros, arange, searchsorted, unique
from pyNastran.dev.bdf_vectorized.cards.elements.property import Property
from pyNastran.bdf.field_writer_8 import print_card_8 #, set_default_if_blank
from pyNastran.bdf.field_writer_16 import print_card_16
#from pyNastran.bdf.field_writer_double import print_card_double
from pyNastran.bdf.field_writer_8 import set_blank_if_default
#from pyNastran.bdf.field_writer_8 import set_string8_blank_if_default
#from pyNastran.bdf.field_writer_16 import set_string16_blank_if_default
from pyNastran.bdf.bdf_interface.assign_type import (
integer, string, double_or_blank, string_or_blank, double) # fields
#from pyNastran.dev.bdf_vectorized.utils import slice_to_iter
class PBARL(Property):
type = 'PBARL'
valid_types = {
"ROD": 1,
"TUBE": 2,
"I": 6,
"CHAN": 4,
"T": 4,
"BOX": 4,
"BAR": 2,
"CROSS": 4,
"H": 4,
"T1": 4,
"I1": 4,
"CHAN1": 4,
"Z": 4,
"CHAN2": 4,
"T2": 4,
"BOX1": 6,
"HEXA": 3,
"HAT": 4,
"HAT1": 5,
"DBOX": 10, # was 12
} # for GROUP="MSCBML0"
def __init__(self, model):
"""
Defines the PCOMP object.
Parameters
----------
model : BDF
the BDF object
"""
Property.__init__(self, model)
def allocate(self, card_count):
ncards = card_count[self.type]
if ncards:
self.n = ncards
self.model.log.debug('ncards PBARL = %s' % ncards)
float_fmt = self.model.float_fmt
#: Property ID
self.property_id = zeros(ncards, dtype='int32')
#: Material ID
self.material_id = zeros(ncards, dtype='int32')
self.group = zeros(ncards, dtype='|U8')
#: Section Type (e.g. 'ROD', 'TUBE', 'I', 'H')
self.Type = zeros(ncards, dtype='|U8')
#: non-structural mass
self.nsm = zeros(ncards, dtype=float_fmt)
#: dimension list
self.dim = {}
def add_card(self, card, comment):
i = self.i
self.property_id[i] = integer(card, 1, 'property_id')
self.material_id[i] = integer(card, 2, 'material_id')
self.group[i] = string_or_blank(card, 3, 'group', 'MSCBMLO')
Type = string(card, 4, 'Type')
self.Type[i] = Type
ndim = self.valid_types[Type]
dim = []
for idim in range(ndim):
dimi = double(card, 9 + idim, 'dim%i' % (idim + 1))
dim.append(dimi)
assert len(dim) == ndim, 'PBARL ndim=%s len(dims)=%s' % (ndim, len(dim))
assert None not in dim
#: dimension list
self.dim[i] = dim
nsm = double_or_blank(card, 9 + ndim + 1, 'nsm', 0.0)
self.nsm[i] = nsm
assert isinstance(nsm, float), 'nsm=%r' % nsm
if Type not in self.valid_types:
msg = ('Invalid PBARL Type, Type=%s '
'valid_types=%s' % (Type, self.valid_types.keys()))
raise RuntimeError(msg)
if len(dim) != self.valid_types[Type]:
msg = 'dim=%s len(dim)=%s Type=%s len(dimType)=%s' % (
dim, len(dim), Type,
self.valid_types[Type])
raise RuntimeError(msg)
self.i += 1
def add_op2(self, data):
i = self.i
self.property_id[i] = data[0]
self.material_id[i] = data[1]
self.group[i] = data[2].strip()
self.Type[i] = data[3].strip()
self.dim[i] = list(data[4:-1])
self.nsm[i] = data[-1]
#print("group = %r" % self.group)
#print("Type = %r" % self.Type)
#print("dim = ",self.dim)
#print(str(self))
#print("*PBARL = ",data)
#raise NotImplementedError('not finished...')
if Type not in self.valid_types:
msg = ('Invalid PBARL Type, Type=%s '
'valid_types=%s' % (Type, self.valid_types.keys()))
raise RuntimeError(msg)
if len(dim) != self.valid_types[Type]:
msg = 'dim=%s len(dim)=%s Type=%s len(dimType)=%s' % (
dim, len(dim), Type,
self.valid_types[Type])
raise RuntimeError(msg)
assert None not in dim
self.i += 1
def build(self):
if self.n:
unique_pids = unique(self.property_id)
if len(unique_pids) != len(self.property_id):
raise RuntimeError('There are duplicate PBARL IDs...')
i = self.property_id.argsort()
self.property_id = self.property_id[i]
self.material_id = self.material_id[i]
self.group = self.group[i]
self.Type = self.Type[i]
self.nsm = self.nsm[i]
self.dim = {ii : self.dim[j] for ii, j in zip(count(), i)}
else:
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node' : nid_map,
'property' : pid_map,
}
"""
if self.n:
nid_map = maps['node']
pid_map = maps['property']
for i, pid in enumerate(self.property_id):
try:
self.property_id[i] = pid_map[pid]
except KeyError:
print('pid_map = %s' % pid_map)
raise
#=========================================================================
def get_index(self, property_ids):
if isinstance(property_ids, int):
property_ids = array([property_ids])
if property_ids is None:
return arange(self.n)
indexs = searchsorted(self.property_id, property_ids)
assert len(indexs) == len(property_ids), 'indexs=%s pids=%s' % (indexs, property_ids)
return indexs
def __getitem__(self, property_ids):
"""
Allows for slicing:
- elements[1:10]
- elements[4]
- elements[1:10:2]
- elements[[1,2,5]]
- elements[array([1,2,5])]
"""
i = searchsorted(self.property_id, property_ids)
return self.slice_by_index(i)
#def __getitem__(self, property_ids):
#property_ids, int_flag = slice_to_iter(property_ids)
#obj = PBARL(self.model)
#properties = {}
#for pid in sorted(property_ids):
#properties[pid] = self.properties[pid]
#obj.n = len(property_ids)
#obj.properties = properties
#obj.property_id = sorted(self.properties.keys())
##obj._comments = obj._comments[index]
##obj.comments = obj.comments[index]
#return obj
#=========================================================================
def write_card(self, bdf_file, size=8, property_id=None):
#self.model.log.debug('PBARL.n = %s' % self.n)
if self.n:
if property_id is None:
i = arange(self.n)
else:
i = searchsorted(self.property_id, property_id)
#self.model.log.debug('i = %s' % i)
#cid = [cid if cid != 0 else '' for cid in self.coord_id]
#group = set_blank_if_default(self.group, 'MSCBMLO')
#list_fields = ['PBARL', self.pid, self.Mid(), group, self.Type, None,
#None, None, None] + self.dim + [self.nsm]
#self.model.log.debug('*pbarl write pids=%s' % self.property_id)
for (j, pid, mid, group, Type, nsm) in zip(count(), self.property_id[i],
self.material_id[i],
self.group[i], self.Type[i], self.nsm[i]):
if pid in self._comments:
bdf_file.write(self._comments[pid])
dim = self.dim[j]
ndim = self.valid_types[Type]
assert len(dim) == ndim, 'PBARL ndim=%s len(dims)=%s' % (ndim, len(self.dim))
sgroup = set_blank_if_default(group, 'MSCBMLO')
list_fields = ['PBARL', pid, mid, group, Type, None,
None, None, None] + dim + [nsm]
#group = set_blank_if_default(self.group, 'MSCBMLO')
#list_fields = ['PBARL', self.pid, self.Mid(), group, self.Type, None,
#None, None, None] + self.dim + [self.nsm]
if size == 8:
bdf_file.write(print_card_8(list_fields))
else:
bdf_file.write(print_card_16(list_fields))
def slice_by_index(self, i):
i = self._validate_slice(i)
obj = PBARL(self.model)
obj.n = len(i)
#obj._comments = obj._comments[i]
#obj.comments = obj.comments[i]
obj.property_id = self.property_id[i]
obj.material_id = self.material_id[i]
obj.Type = self.Type[i]
obj.group = self.group[i]
obj.nsm = self.nsm[i]
dim = {}
j = 0
for ii, dimi in self.dim.items():
if ii in i:
dim[j] = dimi
j += 1
obj.dim = dim
return obj
|
the-stack_106_18966
|
#%%
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.rcParams['axes.spines.bottom'] = False
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.top'] = False
@np.vectorize
def circ(x):
if x > 2*np.pi:
return x-2*np.pi
elif x < 0:
return x+2*np.pi
else:
return x
def get_phi(x,y):
phi = np.arctan(y/x)
if x < 0:
phi = np.pi + phi
return phi
#%%
class UpdateFigure:
def __init__(self, ax:plt.Axes, data:np.ndarray):
"""Plot the first frame for the animation.
Args:
ax (plt.Axes): axes of flight icons.
data (np.ndarray): 1-D array of number of passagers for each days
"""
self.colors = dict(
init=[0,0,0,1],
red=np.array([230,0,18,255])/255.0,
green=np.array([0,176,80,255])/255.0,
)
self.ax = ax
# generate a circle
theta = np.linspace(0, 2*np.pi, 1000)
self.ax.plot(np.cos(theta), np.sin(theta), color=self.colors['init'], lw=4, zorder=1)
# array to record the color of each flight
self.ax.set_xlim(-1.1,1.1)
self.ax.set_ylim(-1.1,1.1)
self.ax.set_xticks([])
self.ax.set_yticks([])
self.data = data
self.ax.axis('scaled')
# initialize text
self.n_chord = 0
self.n_chord_hit = 0
self.ax.set_xlabel(f'{0:5.3f}', fontsize=40)
self.ax.set_title(f'{self.n_chord_hit:>5d}/{self.n_chord:<5d}', fontsize=40)
@staticmethod
def vert_chord(seed,):
xs = np.ones(2)*(seed*2-1)
ys = np.sqrt(1 - xs**2)
ys[1] *= -1
return xs, ys
@staticmethod
def radiate_chord(seed,):
xs, ys = np.zeros(2), np.zeros(2)
xs[0], ys[0] = 1, 0
xs[1], ys[1] = np.cos(seed*2*np.pi), np.sin(seed*2*np.pi)
return xs, ys
@staticmethod
def area_chord(seed,):
xc, yc = seed*2-1
rho = np.sqrt((seed[0]*2-1)**2+(seed[1]*2-1)**2)
phi = get_phi(xc,yc)
angle = np.arccos(rho)
theta = circ(np.array([phi-angle, phi+angle]))
xs, ys = np.cos(theta), np.sin(theta)
return xs, ys
@staticmethod
def chord_len(xs, ys):
return np.sqrt((xs[0]-xs[1])**2+(ys[0]-ys[1])**2)
def __call__(self, i):
# This way the plot can continuously run and we just keep
# watching new realizations of the process
if i > 0:
xs, ys = self.area_chord(self.data[i]) # can be replaced by vert_chord and radiate_chord
if self.chord_len(xs, ys) > np.sqrt(3):
lines = self.ax.plot(xs, ys, color=self.colors['red'], lw=1, zorder=0)
self.n_chord_hit += 1
else:
lines = self.ax.plot(xs, ys, color=self.colors['green'], lw=1, zorder=0)
self.n_chord += 1
self.ax.set_title(f'{self.n_chord_hit:>5d}/{self.n_chord:<5d}', fontsize=40)
self.ax.set_xlabel(f'{self.n_chord_hit*1.0/self.n_chord:5.3f}', fontsize=40)
else:
lines = self.ax.plot([], [])
return lines
# ======================================================
# create canvas
fig, ax = plt.subplots(1,1, figsize=(5,5))
# Genearate random number
n_frame = 500
# ======================================================
# random seeds for vert_chord and radiate_chord
# randx = np.random.rand(n_frame+1)
# ======================================================
# random seeds for area_chord
randx = np.random.rand(n_frame*2, 2)
mask = (randx[:,0]-0.5)**2 + (randx[:,1]-0.5)**2<= 0.25
randx = randx[mask,:]
# ======================================================
ud = UpdateFigure(ax, randx)
anim = FuncAnimation(fig, ud, frames=n_frame+1, blit=True)
# save animation as *.mp4
anim.save('rcp_movie.mp4', fps=24, dpi=100, codec='libx264', bitrate=-1, extra_args=['-pix_fmt', 'yuv420p'])
# ======================================================
# %%
|
the-stack_106_18969
|
import numpy
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Image
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import PointCloud2
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
from openai_ros.openai_ros_common import ROSLauncher
from gazebo_msgs.msg import ModelStates
from cv_bridge import CvBridge, CvBridgeError
import cv2
from sensor_msgs.msg import Image
class TurtleBot2SumoEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all CubeSingleDisk environments.
"""
def __init__(self, ros_ws_abspath):
self.bridge = CvBridge()
rospy.logdebug("Start TurtleBot2SumoEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="put_turtlebot2_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(TurtleBot2SumoEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_sensors_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/odom", Odometry, self._odom_callback)
rospy.Subscriber("/gazebo/model_states", ModelStates ,self._model_state_callback)
#rospy.Subscriber("/camera/depth/image_raw", Image, self._camera_depth_image_raw_callback)
#rospy.Subscriber("/camera/depth/points", PointCloud2, self._camera_depth_points_callback)
rospy.Subscriber("/camera/rgb/image_raw", Image, self._camera_rgb_image_raw_callback)
rospy.Subscriber("/kobuki/laser/scan", LaserScan, self._laser_scan_callback)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._check_publishers_connection()
self.gazebo.pauseSim()
rospy.logdebug("Finished TurtleBot2Env INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _model_state_callback(self,msg):
models = msg.name
item_name = None
for name in msg.name:
if 'ball' in name:
item_name = name
break
if item_name is not None:
ball_idx = msg.name.index(item_name)
self.ball_position = [msg.pose[ball_idx].position.x, msg.pose[ball_idx].position.y]
robot_idx = msg.name.index('mobile_base')
self.robot_position = [msg.pose[robot_idx].position.x, msg.pose[robot_idx].position.y]
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_all_sensors_ready()
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_odom_ready()
# We dont need to check for the moment, takes too long
#self._check_camera_depth_image_raw_ready()
#self._check_camera_depth_points_ready()
self._check_ball_position()
#self._check_camera_rgb_image_raw_ready()
self._check_laser_scan_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_odom_ready(self):
self.odom = None
rospy.logdebug("Waiting for /odom to be READY...")
while self.odom is None and not rospy.is_shutdown():
try:
self.odom = rospy.wait_for_message("/odom", Odometry, timeout=5.0)
rospy.logdebug("Current /odom READY=>")
except:
rospy.logerr("Current /odom not ready yet, retrying for getting odom")
return self.odom
def _check_camera_depth_image_raw_ready(self):
self.camera_depth_image_raw = None
rospy.logdebug("Waiting for /camera/depth/image_raw to be READY...")
while self.camera_depth_image_raw is None and not rospy.is_shutdown():
try:
self.camera_depth_image_raw = rospy.wait_for_message("/camera/depth/image_raw", Image, timeout=5.0)
rospy.logdebug("Current /camera/depth/image_raw READY=>")
except:
rospy.logerr("Current /camera/depth/image_raw not ready yet, retrying for getting camera_depth_image_raw")
return self.camera_depth_image_raw
def _check_camera_depth_points_ready(self):
self.camera_depth_points = None
rospy.logdebug("Waiting for /camera/depth/points to be READY...")
while self.camera_depth_points is None and not rospy.is_shutdown():
try:
self.camera_depth_points = rospy.wait_for_message("/camera/depth/points", PointCloud2, timeout=10.0)
rospy.logdebug("Current /camera/depth/points READY=>")
except:
rospy.logerr("Current /camera/depth/points not ready yet, retrying for getting camera_depth_points")
return self.camera_depth_points
def _check_camera_rgb_image_raw_ready(self):
self.camera_rgb_image_raw = None
rospy.logdebug("Waiting for /camera/rgb/image_raw to be READY...")
while self.camera_rgb_image_raw is None and not rospy.is_shutdown():
try:
data = rospy.wait_for_message("/camera/rgb/image_raw", Image, timeout=5.0)
#self.camera_rgb_image_raw = self.bridge.imgmsg_to_cv2(data,"rgb8")
rospy.logdebug("Current /camera/rgb/image_raw READY=>")
except:
rospy.logerr("Current /camera/rgb/image_raw not ready yet, retrying for getting camera_rgb_image_raw")
return self.camera_rgb_image_raw
def _check_laser_scan_ready(self):
self.laser_scan = None
rospy.logdebug("Waiting for /kobuki/laser/scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/kobuki/laser/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /kobuki/laser/scan READY=>")
except:
rospy.logerr("Current /kobuki/laser/scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _check_ball_position(self):
self.ball_position = None
rospy.logdebug("Waiting for /gazebo/model_states to be READY...")
while self.ball_position is None and not rospy.is_shutdown():
try:
self.ball_position = rospy.wait_for_message("/gazebo/model_states", ModelStates, timeout=5.0)
rospy.logdebug("Current /gazebo/model_states READY=>")
except:
rospy.logerr("Current /gazebo/model_states not ready yet, retrying for getting ball_position")
return self.ball_position
def _odom_callback(self, data):
self.odom = data
def _camera_depth_image_raw_callback(self, data):
self.camera_depth_image_raw = data
def _camera_depth_points_callback(self, data):
self.camera_depth_points = data
def _camera_rgb_image_raw_callback(self, data):
self.camera_rgb_image_raw = self.bridge.imgmsg_to_cv2(data,"rgb8")
def _laser_scan_callback(self, data):
self.laser_scan = data
def _check_publishers_connection(self):
"""
Checks that all the publishers are working
:return:
"""
rate = rospy.Rate(10) # 10hz
while self._cmd_vel_pub.get_num_connections() == 0 and not rospy.is_shutdown():
rospy.logdebug("No susbribers to _cmd_vel_pub yet so we wait and try again")
try:
rate.sleep()
except rospy.ROSInterruptException:
# This is to avoid error when world is rested, time when backwards.
pass
rospy.logdebug("_cmd_vel_pub Publisher Connected")
rospy.logdebug("All Publishers READY")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def move_base(self, linear_speed, angular_speed,sleep_time = 0.2, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(sleep_time)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def wait_until_twist_achieved(self, cmd_vel_value, epsilon, update_rate, min_laser_distance=-1):
"""
We wait for the cmd_vel twist given to be reached by the robot reading
from the odometry.
:param cmd_vel_value: Twist we want to wait to reach.
:param epsilon: Error acceptable in odometry readings.
:param update_rate: Rate at which we check the odometry.
:return:
"""
rospy.logwarn("START wait_until_twist_achieved...")
rate = rospy.Rate(update_rate)
start_wait_time = rospy.get_rostime().to_sec()
end_wait_time = 0.0
epsilon = 0.05
rospy.logdebug("Desired Twist Cmd>>" + str(cmd_vel_value))
rospy.logdebug("epsilon>>" + str(epsilon))
linear_speed = cmd_vel_value.linear.x
angular_speed = cmd_vel_value.angular.z
linear_speed_plus = linear_speed + epsilon
linear_speed_minus = linear_speed - epsilon
angular_speed_plus = angular_speed + epsilon
angular_speed_minus = angular_speed - epsilon
while not rospy.is_shutdown():
crashed_into_something = self.has_crashed(min_laser_distance)
current_odometry = self._check_odom_ready()
odom_linear_vel = current_odometry.twist.twist.linear.x
odom_angular_vel = current_odometry.twist.twist.angular.z
rospy.logdebug("Linear VEL=" + str(odom_linear_vel) + ", ?RANGE=[" + str(linear_speed_minus) + ","+str(linear_speed_plus)+"]")
rospy.logdebug("Angular VEL=" + str(odom_angular_vel) + ", ?RANGE=[" + str(angular_speed_minus) + ","+str(angular_speed_plus)+"]")
linear_vel_are_close = (odom_linear_vel <= linear_speed_plus) and (odom_linear_vel > linear_speed_minus)
angular_vel_are_close = (odom_angular_vel <= angular_speed_plus) and (odom_angular_vel > angular_speed_minus)
if linear_vel_are_close and angular_vel_are_close:
rospy.logwarn("Reached Velocity!")
end_wait_time = rospy.get_rostime().to_sec()
break
if crashed_into_something:
rospy.logerr("TurtleBot has crashed, stopping movement!")
break
rospy.logwarn("Not there yet, keep waiting...")
rate.sleep()
delta_time = end_wait_time- start_wait_time
rospy.logdebug("[Wait Time=" + str(delta_time)+"]")
rospy.logwarn("END wait_until_twist_achieved...")
return delta_time
def has_crashed(self, min_laser_distance):
"""
It states based on the laser scan if the robot has crashed or not.
Crashed means that the minimum laser reading is lower than the
min_laser_distance value given.
If min_laser_distance == -1, it returns always false, because its the way
to deactivate this check.
"""
robot_has_crashed = False
if min_laser_distance != -1:
laser_data = self.get_laser_scan()
for i, item in enumerate(laser_data.ranges):
if item == float ('Inf') or numpy.isinf(item):
pass
elif numpy.isnan(item):
pass
else:
# Has a Non Infinite or Nan Value
if (item < min_laser_distance):
rospy.logerr("TurtleBot HAS CRASHED >>> item=" + str(item)+"< "+str(min_laser_distance))
robot_has_crashed = True
break
return robot_has_crashed
def get_odom(self):
return self.odom
def get_camera_depth_image_raw(self):
return self.camera_depth_image_raw
def get_camera_depth_points(self):
return self.camera_depth_points
def get_camera_rgb_image_raw(self):
return self.camera_rgb_image_raw
def get_laser_scan(self):
return self.laser_scan
def get_ball_position(self):
return self.ball_position
def get_robot_position(self):
return self.robot_position
def reinit_sensors(self):
"""
This method is for the tasks so that when reseting the episode
the sensors values are forced to be updated with the real data and
"""
|
the-stack_106_18972
|
#completely rewritten by Rolarga, original from mr
# Shadow Weapon Coupons contributed by BiTi for the Official L2J Datapack Project
# Visit http://www.l2jdp.com/forum/ for more details
import sys
from com.l2jserver.gameserver.model.quest import State
from com.l2jserver.gameserver.model.quest import QuestState
from com.l2jserver.gameserver.model.quest.jython import QuestJython as JQuest
qn = "232_TestOfLord"
MARK_OF_LORD = 3390
ORDEAL_NECKLACE = 3391
VARKEES_CHARM = 3392
TANTUS_CHARM = 3393
HATOS_CHARM = 3394
TAKUNA_CHARM = 3395
CHIANTA_CHARM = 3396
MANAKIAS_ORDERS = 3397
BREKA_ORC_FANG = 3398
MANAKIAS_AMULET = 3399
HUGE_ORC_FANG = 3400
SUMARIS_LETTER = 3401
URUTU_BLADE = 3402
TIMAK_ORC_SKULL = 3403
SWORD_INTO_SKULL = 3404
NERUGA_AXE_BLADE = 3405
AXE_OF_CEREMONY = 3406
MARSH_SPIDER_FEELER = 3407
MARSH_SPIDER_FEET = 3408
HANDIWORK_SPIDER_BROOCH = 3409
CORNEA_OF_EN_MONSTEREYE = 3410
MONSTEREYE_WOODCARVING = 3411
BEAR_FANG_NECKLACE = 3412
MARTANKUS_CHARM = 3413
RAGNA_ORC_HEAD = 3414
RAGNA_CHIEF_NOTICE = 3415
IMMORTAL_FLAME = 3416
BONE_ARROW = 1341
ADENA = 57
SHADOW_WEAPON_COUPON_CGRADE = 8870
NPC=[30510,30515,30558,30564,30565,30566,30567,30568,30641,30642,30643,30649]
MOBS=[20233,20269,20270,20564,20583,20584,20585,20586,20587,20588,20778,20779]
STATS=[["atubaStat","nerugaStat","urutuStat","urutuDrop","dudaStat","gandiStat","markantusStat"],["cond","phase"]]
#This handle all Dropdata for the Mobs in this Quest npcId:[var,value,newValue,chance,maxcount,item]
DROPLIST={
20269:["atubaStat", 2,3, 40,20,BREKA_ORC_FANG ],
20270:["atubaStat", 2,3, 50,20,BREKA_ORC_FANG ],
20583:["urutuDrop", 0,1, 50,10,TIMAK_ORC_SKULL],
20584:["urutuDrop", 0,1, 55,10,TIMAK_ORC_SKULL],
20585:["urutuDrop", 0,1, 60,10,TIMAK_ORC_SKULL],
20586:["urutuDrop", 0,1, 65,10,TIMAK_ORC_SKULL],
20587:["urutuDrop", 0,1, 70,10,TIMAK_ORC_SKULL],
20588:["urutuDrop", 0,1, 75,10,TIMAK_ORC_SKULL],
20233:["dudaStat", 1,2,100,10,MARSH_SPIDER_FEELER],
20564:["gandiStat", 1,2, 90,20,CORNEA_OF_EN_MONSTEREYE],
20778:["markantusStat",1,1,100, 1,RAGNA_ORC_HEAD],
20779:["markantusStat",1,1,100, 1,RAGNA_CHIEF_NOTICE]
}
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = range(3391,3417)
def onAdvEvent (self,event,npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
if st.getInt("phase")==0:
if event=="1":
st.setState(State.STARTED)
st.giveItems(ORDEAL_NECKLACE,1)
st.playSound("ItemSound.quest_accept")
htmltext="30565-05.htm"
for var in STATS[0]:
st.set(var,"0")
st.set("cond","1")
st.set("phase","1")
elif st.getInt("phase")==1:
if event == "30565_1" :
htmltext = "30565-08.htm"
st.takeItems(SWORD_INTO_SKULL,1)
st.takeItems(AXE_OF_CEREMONY,1)
st.takeItems(MONSTEREYE_WOODCARVING,1)
st.takeItems(HANDIWORK_SPIDER_BROOCH,1)
st.takeItems(ORDEAL_NECKLACE,1)
st.giveItems(BEAR_FANG_NECKLACE,1)
st.takeItems(HUGE_ORC_FANG,1)
st.set("phase","2")
elif event == "30566_1" :
st.set("atubaStat","1")
st.giveItems(VARKEES_CHARM,1)
htmltext = "30566-02.htm"
elif event == "30567_1" :
st.set("nerugaStat","1")
htmltext = "30567-02.htm"
st.giveItems(TANTUS_CHARM,1)
elif event == "30558_1" :
st.set("nerugaStat","2")
htmltext = "30558-02.htm"
st.giveItems(NERUGA_AXE_BLADE,1)
st.takeItems(ADENA,1000)
elif event == "30568_1" :
st.set("urutuStat","1")
st.set("urutuDrop","0")
htmltext = "30568-02.htm"
st.giveItems(HATOS_CHARM,1)
elif event == "30641_1" :
st.set("dudaStat","1")
htmltext = "30641-02.htm"
st.giveItems(TAKUNA_CHARM,1)
elif event == "30642_1" :
st.set("gandiStat","1")
htmltext = "30642-02.htm"
st.giveItems(CHIANTA_CHARM,1)
elif st.getInt("phase")==2:
if event == "30565_2":
htmltext = "30565-12.htm"
st.addExpAndSp(92955,16250)
st.giveItems(MARK_OF_LORD,1)
st.giveItems(SHADOW_WEAPON_COUPON_CGRADE,15)
st.takeItems(IMMORTAL_FLAME,1)
st.playSound("ItemSound.quest_finish")
for var in STATS[0]:
st.unset(var)
for var in STATS[1]:
st.unset(var)
st.exitQuest(False)
elif event == "30649_1" :
htmltext = "30649-02.htm"
elif event == "30649_2" :
htmltext = "30649-03.htm"
elif event == "30649_3" :
st.set("markantusStat","1")
htmltext = "30649-04.htm"
st.giveItems(MARTANKUS_CHARM,1)
st.takeItems(BEAR_FANG_NECKLACE,1)
elif event == "30649_4" :
htmltext = "30649-07.htm"
st.addSpawn(30643,21036,-107690,-3038)
st.set("markantusStat","4")
elif event == "30643_1" :
htmltext = "30643-02.htm"
elif event == "30643_2" :
htmltext = "30643-03.htm"
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != NPC[4] and id != State.STARTED : return htmltext
if id == State.CREATED:
for var in STATS[1]:
st.set(var,"0")
if npcId == NPC[4]:
if st.getInt("cond")==0:
if player.getRace().ordinal() != 3 :
htmltext = "30565-01.htm"
st.exitQuest(1)
else:
if player.getClassId().getId() != 0x32 :
htmltext = "30565-02.htm"
st.exitQuest(1)
else:
if player.getLevel() < 39 :
htmltext = "30565-03.htm"
st.exitQuest(1)
else:
htmltext = "30565-04.htm"
elif id == State.COMPLETED:
htmltext = "<html><body>This quest has already been completed.</body></html>"
else:
if st.getInt("phase") == 1:
atuba=st.getInt("atubaStat")
neruga=st.getInt("nerugaStat")
urutu=st.getInt("urutuStat")
duda=st.getInt("dudaStat")
gandi=st.getInt("gandiStat")
# Atuba Part
if npcId == NPC[5]:
if atuba==0:
htmltext = "30566-01.htm"
elif atuba>0 and atuba<4:
htmltext = "30566-03.htm"
elif atuba==4:
st.set("atubaStat","5")
htmltext = "30566-04.htm"
st.takeItems(VARKEES_CHARM,1)
st.giveItems(HUGE_ORC_FANG,1)
st.takeItems(MANAKIAS_AMULET,1)
elif atuba>4:
htmltext = "30566-05.htm"
elif npcId == NPC[1]:
if atuba==1:
htmltext = "30515-01.htm"
st.giveItems(MANAKIAS_ORDERS,1)
st.set("atubaStat","2")
elif atuba==2:
htmltext = "30515-02.htm"
elif atuba==3:
st.set("atubaStat","4")
htmltext = "30515-03.htm"
st.giveItems(MANAKIAS_AMULET,1)
st.takeItems(MANAKIAS_ORDERS,1)
st.takeItems(DROPLIST[20269][5],DROPLIST[20269][4])
elif atuba==4:
htmltext = "30515-04.htm"
elif atuba==5:
htmltext = "30515-05.htm"
# Neruga Part
elif npcId == NPC[6]:
if neruga==0:
htmltext = "30567-01.htm"
elif neruga==1:
htmltext = "30567-03.htm"
elif neruga==2:
if st.getQuestItemsCount(BONE_ARROW)>999:
st.set("nerugaStat","3")
st.takeItems(BONE_ARROW,1000)
st.takeItems(NERUGA_AXE_BLADE,1)
st.takeItems(TANTUS_CHARM,1)
st.giveItems(AXE_OF_CEREMONY,1)
htmltext = "30567-04.htm"
else:
htmltext = "30567-03.htm"
elif neruga==3:
htmltext = "30567-05.htm"
elif npcId == NPC[2]:
if neruga==1:
if st.getQuestItemsCount(ADENA)>999:
htmltext = "30558-01.htm"
else:
htmltext = "30558-03.htm"
elif neruga==2:
htmltext = "30558-04.htm"
# Urutu Part
elif npcId == NPC[7]:
if urutu==0:
htmltext = "30568-01.htm"
elif urutu==3 and st.getInt("urutuDrop")==1:
st.set("urutuStat","4")
htmltext = "30568-04.htm"
st.takeItems(HATOS_CHARM,1)
st.takeItems(URUTU_BLADE,1)
st.takeItems(DROPLIST[20587][5],DROPLIST[20587][4])
st.giveItems(SWORD_INTO_SKULL,1)
elif urutu>0 and urutu<4:
htmltext = "30568-03.htm"
elif urutu==4:
htmltext = "30568-05.htm"
elif npcId == NPC[3]:
if urutu == 1:
st.set("urutuStat","2")
htmltext = "30564-01.htm"
st.giveItems(SUMARIS_LETTER,1)
elif npcId == NPC[0]:
if urutu==2:
st.set("urutuStat","3")
st.giveItems(URUTU_BLADE,1)
st.takeItems(SUMARIS_LETTER,1)
htmltext = "30510-01.htm"
elif urutu==3:
htmltext = "30510-02.htm"
elif urutu==4:
htmltext = "30510-03.htm"
# Duda Part
elif npcId == NPC[8]:
if duda==0:
htmltext = "30641-01.htm"
elif duda in [1,2]:
htmltext = "30641-03.htm"
elif duda==3:
st.set("dudaStat","4")
htmltext = "30641-04.htm"
st.takeItems(DROPLIST[20233][5],DROPLIST[20233][4])
st.takeItems(MARSH_SPIDER_FEET,st.getQuestItemsCount(MARSH_SPIDER_FEET))
st.giveItems(HANDIWORK_SPIDER_BROOCH,1)
st.takeItems(TAKUNA_CHARM,1)
elif duda==4:
htmltext = "30641-05.htm"
# Gandi Part
elif npcId == NPC[9]:
if gandi==0:
htmltext = "30642-01.htm"
elif gandi==1:
htmltext = "30642-03.htm"
elif gandi==2:
st.set("gandiStat","3")
htmltext = "30642-04.htm"
st.takeItems(DROPLIST[20564][5],DROPLIST[20564][4])
st.giveItems(MONSTEREYE_WOODCARVING,1)
st.takeItems(CHIANTA_CHARM,1)
elif gandi==3:
htmltext = "30642-05.htm"
# end of phase 1
elif npcId == NPC[4]:
if gandi==3 and duda==4 and urutu==4 and neruga==3 and atuba==5:
htmltext = "30565-07.htm"
else:
htmltext = "30565-06.htm"
elif st.getInt("phase")==2:
markantus=st.getInt("markantusStat")
if npcId == NPC[11]:
if markantus==0:
htmltext = "30649-01.htm"
elif markantus==1:
htmltext = "30649-05.htm"
elif markantus==2:
st.set("markantusStat","3")
htmltext = "30649-06.htm"
st.takeItems(MARTANKUS_CHARM,1)
st.takeItems(RAGNA_ORC_HEAD,1)
st.giveItems(IMMORTAL_FLAME,1)
st.takeItems(RAGNA_CHIEF_NOTICE,1)
elif markantus==3:
htmltext = "30649-07.htm"
st.addSpawn(30643,21036,-107690,-3038)
st.set("markantusStat","4")
elif markantus>3:
htmltext = "30649-08.htm"
elif npcId == NPC[10]:
if markantus>2:
htmltext = "30643-01.htm"
elif npcId == NPC[4]:
if markantus==0:
htmltext = "30565-09.htm"
elif markantus==1 or markantus==2:
htmltext = "30565-10.htm"
elif markantus>2:
htmltext = "30565-11.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != State.STARTED : return
npcId = npc.getNpcId()
var,value,newValue,chance,maxcount,item=DROPLIST[npcId]
random=st.getRandom(100)
count=st.getQuestItemsCount(item)
spiderCount=st.getQuestItemsCount(MARSH_SPIDER_FEET)
if item == MARSH_SPIDER_FEELER and int(st.get(var)) == value:
if spiderCount<10:
st.giveItems(MARSH_SPIDER_FEET,1)
st.playSound("ItemSound.quest_itemget")
elif st.getQuestItemsCount(MARSH_SPIDER_FEELER)<9:
st.giveItems(MARSH_SPIDER_FEELER,1)
st.playSound("ItemSound.quest_itemget")
elif st.getQuestItemsCount(MARSH_SPIDER_FEELER)==9:
st.giveItems(MARSH_SPIDER_FEELER,1)
st.playSound("ItemSound.quest_middle")
st.set("dudaStat","3")
elif int(st.get(var)) == value and random < chance and count < maxcount:
st.giveItems(item,1)
if count == maxcount-1:
st.playSound("ItemSound.quest_middle")
if newValue == 1 and st.getQuestItemsCount(RAGNA_ORC_HEAD) and st.getQuestItemsCount(RAGNA_CHIEF_NOTICE):
st.set(var,"2")
else:
st.set(var,str(newValue))
else:
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(232,qn,"Test Of Lord")
QUEST.addStartNpc(NPC[4])
for npcId in NPC:
QUEST.addTalkId(npcId)
for mobId in MOBS:
QUEST.addKillId(mobId)
|
the-stack_106_18973
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .contractual_rules_attribution import ContractualRulesAttribution
class ContractualRulesTextAttribution(ContractualRulesAttribution):
"""Defines a contractual rule for text attribution.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar target_property_name: The name of the field that the rule applies
to.
:vartype target_property_name: str
:param _type: Constant filled by server.
:type _type: str
:ivar must_be_close_to_content: A Boolean value that determines whether
the contents of the rule must be placed in close proximity to the field
that the rule applies to. If true, the contents must be placed in close
proximity. If false, or this field does not exist, the contents may be
placed at the caller's discretion.
:vartype must_be_close_to_content: bool
:param text: The attribution text. Text attribution applies to the entity
as a whole and should be displayed immediately following the entity
presentation. If there are multiple text or link attribution rules that do
not specify a target, you should concatenate them and display them using a
"Data from:" label.
:type text: str
:ivar optional_for_list_display: Indicates whether this provider's
attribution is optional.
:vartype optional_for_list_display: bool
"""
_validation = {
'target_property_name': {'readonly': True},
'_type': {'required': True},
'must_be_close_to_content': {'readonly': True},
'text': {'required': True},
'optional_for_list_display': {'readonly': True},
}
_attribute_map = {
'target_property_name': {'key': 'targetPropertyName', 'type': 'str'},
'_type': {'key': '_type', 'type': 'str'},
'must_be_close_to_content': {'key': 'mustBeCloseToContent', 'type': 'bool'},
'text': {'key': 'text', 'type': 'str'},
'optional_for_list_display': {'key': 'optionalForListDisplay', 'type': 'bool'},
}
def __init__(self, text):
super(ContractualRulesTextAttribution, self).__init__()
self.text = text
self.optional_for_list_display = None
self._type = 'ContractualRules/TextAttribution'
|
the-stack_106_18974
|
from RIRData import *
from torch import nn, optim
from torch.nn import functional as F
import torchaudio
import torchaudio.functional as audioF
from torchsummary import summary
import librosa
import librosa.display
from torch.utils.tensorboard.writer import SummaryWriter
from models.my_vae import MyVAE
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import random_split
import warnings
import random
import argparse
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
# Backend settings
torch.cuda.set_device(2)
torch.manual_seed(2)
device = torch.device("cuda")
def train(epoch, model, optimizer, writer, train_loader, args):
model.train()
train_loss = 0
for batch_idx, (mag, phase, min, max, label) in enumerate(train_loader):
mag = mag.to(device)
label = label.to(device)
optimizer.zero_grad()
kwargs = {'input':mag, 'label': label}
[recon_batch, input, mu, logvar] = model(**kwargs)
loss_dict = model.loss_function(recon_batch, input, mu, logvar)
loss_dict['loss'].backward()
train_loss += loss_dict['loss']
optimizer.step()
writer.add_scalar('loss', loss_dict['loss'], global_step=epoch * len(train_loader) + batch_idx)
writer.add_scalar('recon_Loss', loss_dict['Reconstruction_Loss'],
global_step=epoch * len(train_loader) + batch_idx)
writer.add_scalar('kl_Loss', loss_dict['KLD'], global_step=epoch * len(train_loader) + batch_idx)
if batch_idx % args.log_interval == 0:
rnd_generate(model=model, writer=writer, train_loader=train_loader, epoch=epoch, batch_idx=batch_idx, num=4, args=args)
# spec_to_wav(epoch, batch_idx, mag, max=max, min=min)
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss * args.batch_size / len(train_loader.dataset)))
def test(epoch, model, optimizer, writer, test_loader, train_loader, args):
model.eval()
test_loss = 0
with torch.no_grad():
for i, (mag, phase, min, max, label) in enumerate(test_loader):
mag = mag.to(device)
label = label.to(device)
phase = phase.to(device)
kwargs = {'input': mag, 'label': label}
[recon_batch, input, mu, logvar] = model(**kwargs)
test_loss += model.loss_function(recon_batch, input, mu, logvar)['loss']
if i == 0:
n = 4
phase = torch.exp(phase * torch.complex(real=torch.zeros(phase.shape[1], phase.shape[2]).to(device),
imag=torch.ones(phase.shape[1], phase.shape[2]).to(device)))
origin_pad = F.pad(input=mag, pad=(0, 1, 0, 1), mode='replicate')
recon_pad = F.pad(input=recon_batch, pad=(0, 1, 0, 1), mode='replicate')
min_pad = torch.cat((min, torch.Tensor([-100.0])), dim=0)
max_pad = torch.cat((max, torch.Tensor([10.0])), dim=0)
# min_pad = F.pad(input=min, pad=(0,1), mode='replicate')
# max_pad = F.pad(input=max, pad=(0, 1), mode='replicate')
fig, ax = plt.subplots(nrows=2, ncols=n // 2, sharex=True)
for i in range(n // 2):
rnd = random.randint(0, args.batch_size-1)
origin_m_inverse = inverse_normalize(origin_pad[rnd], min_pad[rnd], max_pad[rnd])
recon_m_inverse = inverse_normalize(recon_pad[rnd], min_pad[rnd], max_pad[rnd])
img1 = librosa.display.specshow(origin_m_inverse.squeeze(0).cpu().numpy(), y_axis='log',
sr=16000,
hop_length=128, x_axis='time', ax=ax[0, i])
img2 = librosa.display.specshow(recon_m_inverse.squeeze(0).cpu().numpy(), y_axis='log',
sr=16000,
hop_length=128, x_axis='time', ax=ax[1, i])
fig.colorbar(img1, ax=ax[0, n // 2 - 1], format="%+2.f dB")
fig.colorbar(img2, ax=ax[1, n // 2 - 1], format="%+2.f dB")
writer.add_figure('compare', figure=fig, global_step=epoch * len(train_loader) + i)
print('====> Test set loss: {:.4f}'.format(test_loss * args.batch_size / len(train_loader.dataset)))
def inverse_normalize(t, min, max):
return ((t.cpu() + 1.0) * (max.cpu() - min.cpu())) / 2.0 + min.cpu()
def rnd_generate(model, writer, train_loader, epoch, batch_idx, num, args):
sample = torch.randn(num, args.latent_dim).to(device)
sample = model.decode(sample).cpu()
min = -100.0 * torch.ones(num)
max = torch.FloatTensor(num).normal_(mean=13.4350, std=6.9768)
spec_to_wav(epoch, writer, train_loader, batch_idx, sample, max=max, min=min)
def spec_to_wav(epoch, writer, train_loader, batch_idx, sample, max, min):
with torch.no_grad():
data = F.pad(input=sample, pad=(0, 1, 0, 1), mode='replicate') # (129,129)
max_pad = max
min_pad = min
for i in range(list(max.shape)[0]):
data_inverse = inverse_normalize(data[i].cpu(), min_pad[i], max_pad[i])
data_inverse_power = audioF.DB_to_amplitude(data_inverse, power=1.0, ref=1.0)
griffin = torchaudio.transforms.GriffinLim(n_fft=256, hop_length=128, win_length=256, power=2.0)
griffin.train()
wave = griffin(data_inverse_power)
fig = plt.figure()
plt.plot(wave.t().numpy())
#writer.add_figure('./train_output/fig/sample_spec_'+str(i), figure=fig, global_step=epoch * len(train_loader) + batch_idx)
#writer.add_audio('./train_output/audio/sample_wav_'+str(i), snd_tensor=wave, global_step=epoch * len(train_loader) + batch_idx)
# torchaudio.save(filepath='RIR_result/sampleWAV_' + str(epoch) + '.wav', src=wave, sample_rate=16000)
def main():
parser = argparse.ArgumentParser(description='spech trainp arguments')
parser.add_argument('--epoch', type=int, default=60)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--gpu', type=int, default=1)
parser.add_argument('--latent_dim', type=int, default=32)
parser.add_argument('--log_interval', type=int, default=10)
parser.add_argument('--log_path', type=str, default='./train_output/log/')
parser.add_argument('--conditional', type=bool, default=True)
parser.add_argument('--num_classes', type=int, default=4)
args = parser.parse_args()
writer = SummaryWriter(args.log_path)
# load dataset
dataset = BUTRIR(transform=None)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_data, test_data = random_split(dataset, [train_size, test_size])
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=0)
test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=True, num_workers=0)
model = MyVAE(in_channels=1, latent_dim=args.latent_dim, num_classes=args.num_classes, conditional=args.conditional).to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# optimizer = optim.Adadelta(model.parameters())
# summary(model, input_size=(1, 128, 128))
for epoch in range(1, args.epoch + 1):
train(epoch=epoch, model=model, optimizer=optimizer, train_loader=train_loader,writer=writer,args=args)
test(epoch=epoch, model=model, optimizer=optimizer, train_loader=train_loader, test_loader=test_loader, writer=writer, args=args)
if __name__ == "__main__":
main()
|
the-stack_106_18975
|
# -*- coding: utf-8 -*-
"""Implementation of the BoxE model."""
from typing import Any, ClassVar, Mapping, Optional
from torch.nn.init import uniform_
from ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE
from ...losses import NSSALoss
from ...models import ERModel
from ...nn.emb import EmbeddingSpecification
from ...nn.init import uniform_norm_
from ...nn.modules import BoxEInteraction
from ...typing import Hint, Initializer
__all__ = [
"BoxE",
]
class BoxE(ERModel):
r"""An implementation of BoxE from [abboud2020]_.
.. note::
This implementation only currently supports unimodal knowledge graphs consisting only of binary facts,
whereas the original BoxE applies to arbitrary facts of any arity, i.e., unary facts, binary facts,
ternary facts, etc. For use on higher-arity knowledge bases, please refer to the original implementation at
https://www.github.com/ralphabb/BoxE.
---
citation:
author: Abboud
year: 2020
link: https://arxiv.org/abs/2007.06267
github: ralphabb/BoxE
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default: ClassVar[Mapping[str, Any]] = dict(
embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,
p=dict(type=int, low=1, high=2),
)
loss_default = NSSALoss
loss_default_kwargs = dict(margin=3, adversarial_temperature=2.0, reduction="sum")
def __init__(
self,
*,
embedding_dim: int = 256,
tanh_map: bool = True,
p: int = 2,
power_norm: bool = False,
entity_initializer: Hint[Initializer] = uniform_norm_,
entity_initializer_kwargs: Optional[Mapping[str, Any]] = None,
relation_initializer: Hint[Initializer] = uniform_norm_, # Has to be scaled as well
relation_initializer_kwargs: Optional[Mapping[str, Any]] = None,
relation_size_initializer: Hint[Initializer] = uniform_, # Has to be scaled as well
relation_size_initializer_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs,
) -> None:
r"""Initialize BoxE.
:param embedding_dim:
The entity embedding dimension $d$. Defaults to 200. Is usually $d \in [50, 300]$.
:param tanh_map:
Whether to use tanh mapping after BoxE computation (defaults to true). The hyperbolic tangent mapping
restricts the embedding space to the range [-1, 1], and thus this map implicitly
regularizes the space to prevent loss reduction by growing boxes arbitrarily large.
:param p:
order of norm in score computation
:param power_norm:
whether to use the p-th power of the norm instead
:param entity_initializer:
Entity initializer function. Defaults to :func:`pykeen.nn.init.uniform_norm_`
:param entity_initializer_kwargs:
Keyword arguments to be used when calling the entity initializer
:param relation_initializer:
Relation initializer function. Defaults to :func:`pykeen.nn.init.uniform_norm_`
:param relation_initializer_kwargs:
Keyword arguments to be used when calling the relation initializer
:param relation_size_initializer:
Relation initializer function. Defaults to :func:`torch.nn.init.uniform_`
Defaults to :func:`torch.nn.init.uniform_`
:param relation_size_initializer_kwargs: Keyword arguments to be used when calling the
relation matrix initializer
:param kwargs:
Remaining keyword arguments passed through to :class:`pykeen.models.ERModel`.
This interaction relies on Abboud's point-to-box distance
:func:`pykeen.utils.point_to_box_distance`.
"""
super().__init__(
interaction=BoxEInteraction,
interaction_kwargs=dict(
p=p,
power_norm=power_norm,
tanh_map=tanh_map,
),
entity_representations=[ # Base position
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
initializer_kwargs=entity_initializer_kwargs,
), # Bump
# entity bias for head
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
initializer_kwargs=entity_initializer_kwargs,
),
],
relation_representations=[
# relation position head
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
# relation shape head
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
EmbeddingSpecification(
embedding_dim=1, # Size
initializer=relation_size_initializer,
initializer_kwargs=relation_size_initializer_kwargs,
),
EmbeddingSpecification( # Tail position
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
# relation shape tail
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
EmbeddingSpecification(
embedding_dim=1, # Tail Size
initializer=relation_size_initializer,
initializer_kwargs=relation_size_initializer_kwargs,
),
],
**kwargs,
)
|
the-stack_106_18977
|
import os
import torch
import numpy as np
# import scipy.misc as m
import imageio as m
from PIL import Image
import re
import glob
from torch.utils import data
class CELEBA(data.Dataset):
def __init__(self, root, split="train", is_transform=False, img_size=(32, 32), augmentations=None):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 40
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([73.15835921, 82.90891754, 72.39239876]) # TODO(compute this mean)
self.files = {}
self.labels = {}
self.label_file = self.root+"/list_attr_celeba.csv"
label_map = {}
with open(self.label_file, 'r') as l_file:
labels = l_file.read().split('\n')[1:]
for label_line in labels:
f_name = re.sub('jpg', 'png', label_line.split(',')[0])
label_txt = list(map(lambda x:int(x), re.sub('-1','0',label_line).split(',')[1:]))
label_map[f_name]=label_txt
self.all_files = glob.glob(self.root+'/img_align_celeba_png/*.png')
with open(root+'/list_eval_partition.csv', 'r') as f:
fl = f.read().split('\n')
fl.pop()
if 'train' in self.split:
selected_files = list(filter(lambda x:x.split(',')[1]=='0', fl))
elif 'val' in self.split:
selected_files = list(filter(lambda x:x.split(',')[1]=='1', fl))
elif 'test' in self.split:
selected_files = list(filter(lambda x:x.split(',')[1]=='2', fl))
selected_file_names = list(map(lambda x:re.sub('jpg', 'png', x.split(',')[0]), selected_files))
base_path = '/'.join(self.all_files[0].split('/')[:-1])
self.files[self.split] = list(map(lambda x: '/'.join([base_path, x]), set(map(lambda x:x.split('/')[-1], self.all_files)).intersection(set(selected_file_names))))
self.labels[self.split] = list(map(lambda x: label_map[x], set(map(lambda x:x.split('/')[-1], self.all_files)).intersection(set(selected_file_names))))
self.class_names = ['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',
'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',
'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',
'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young']
if len(self.files[self.split]) < 2:
raise Exception("No files for split=[%s] found in %s" % (self.split, self.root))
print("Found %d %s images" % (len(self.files[self.split]), self.split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
label = self.labels[self.split][index]
img = np.asarray(m.imread(img_path))
if self.augmentations is not None:
img = self.augmentations(np.array(img, dtype=np.uint8))
if self.is_transform:
img = self.transform_img(img)
return [img] + label
def transform_img(self, img):
"""transform
Mean substraction, remap to [0,1], channel order transpose to make Torch happy
"""
img = np.array(Image.fromarray(img).resize( (self.img_size[0], self.img_size[1])))
img = img[:, :, ::-1]
img = img.astype(np.float64)
img -= self.mean
# img = m.imresize(img, (self.img_size[0], self.img_size[1]))
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCWH
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
if __name__ == '__main__':
import torchvision
import matplotlib.pyplot as plt
local_path = 'CELEB_A_PATH'
dst = CELEBA(local_path, is_transform=True, augmentations=None)
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=0)
for i, data in enumerate(trainloader):
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0,2,3,1])
f, axarr = plt.subplots(bs,4)
for j in range(bs):
axarr[j][0].imshow(imgs[j])
axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
axarr[j][2].imshow(instances[j,0,:,:])
axarr[j][3].imshow(instances[j,1,:,:])
plt.show()
a = raw_input()
if a == 'ex':
break
else:
plt.close()
|
the-stack_106_18980
|
import sys
import json
# Information https://hacs.xyz/docs/publish/remove
if len(sys.argv) < 3:
print(
' Usage: python3 scripts/remove_repo.py [repository] [removal_type] "[reason]" [link]'
)
exit(1)
try:
repo = sys.argv[1]
except Exception:
repo = None
try:
removal_type = sys.argv[2]
except Exception:
removal_type = None
try:
reason = sys.argv[3]
except Exception:
reason = None
try:
link = sys.argv[4]
except Exception:
link = None
remove = {
"link": link,
"reason": reason,
"removal_type": removal_type,
"repository": repo,
}
orgs = ["custom-cards", "custom-components"]
foundcategory = None
categorycontent = None
blacklistcontent = None
removedcontent = None
for category in [
"appdaemon",
"integration",
"netdaemon",
"plugin",
"python_script",
"theme",
]:
with open(category, "r") as cat_file:
content = json.loads(cat_file.read())
if remove["repository"] in content:
print(f"Found in {category}")
foundcategory = category
categorycontent = content
content.remove(remove["repository"])
with open(category, "w") as outfile:
outfile.write(json.dumps(sorted(content, key=str.casefold), indent=2))
break
if remove["repository"].split("/")[0] not in orgs:
if foundcategory is None or foundcategory is None:
print(f"Could not find repository {remove['repository']}")
exit(1)
with open("blacklist", "r") as blacklist_file:
blacklistcontent = json.loads(blacklist_file.read())
with open("removed", "r") as removed_file:
removedcontent = json.loads(removed_file.read())
blacklistcontent.append(remove["repository"])
if remove["repository"].split("/")[0] not in orgs:
if remove["repository"] in categorycontent:
categorycontent.remove(remove["repository"])
data = {"repository": remove["repository"]}
if remove["reason"] is not None:
data["reason"] = remove["reason"]
if remove["removal_type"] is not None:
data["removal_type"] = remove["removal_type"]
if remove["link"] is not None:
data["link"] = remove["link"]
removedcontent.append(data)
with open("blacklist", "w") as blacklist_file:
blacklist_file.write(
json.dumps(sorted(blacklistcontent, key=str.casefold), indent=2)
)
with open("removed", "w") as removed_file:
removed_file.write(json.dumps(removedcontent, indent=2))
if remove["repository"].split("/")[0] not in orgs:
with open(foundcategory, "w") as cat_file:
cat_file.write(json.dumps(sorted(categorycontent, key=str.casefold), indent=2))
|
the-stack_106_18981
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from driver.items import InputItem, FormItem
from selenium import webdriver
class FormSpider(CrawlSpider):
name = "form"
allowed_domains = ["127.0.0.1"]
def __init__(self, *args, **kwargs):
super(FormSpider, self).__init__(*args, **kwargs)
self.start_urls = [kwargs.get('start_url')]
follow = True if kwargs.get('follow') == 'true' else False
self.rules = (
Rule (SgmlLinkExtractor(allow=('')), callback='parse_form', follow=follow),
)
super(FormSpider, self)._compile_rules()
try:
proxy = kwargs.get('proxy')
service_args = [
'--proxy=' + proxy,
'--proxy-type=http',
]
except:
service_args = None
self.browser = webdriver.PhantomJS(service_args=service_args)
def closed(self, reason):
self.browser.quit()
def parse_form(self, response):
register_patterns = ['register', 'signup', 'sign-up', 'sign_up']
if any(pattern in response.url for pattern in register_patterns):
use_browser = True
else:
use_browser = False
for sel in response.xpath('//form'):
if use_browser:
self.browser.get(response.url)
formItem = FormItem()
formItem['action'] = ''
try:
formItem['action'] = sel.xpath('@action').extract()[0]
except:
pass
formItem['url'] = response.url
formItem['method'] = ''
try:
formItem['method'] = sel.xpath('@method').extract()[0].lower()
except:
pass
formItem['inputs'] = []
for ip in sel.xpath('.//input|.//textarea'):
try:
_id = ip.xpath('@id').extract()[0]
except:
_id = ''
if _id != '':
if use_browser:
input_element = self.browser.find_element_by_id(_id)
if not input_element.is_displayed():
continue
try:
name = ip.xpath('@name').extract()[0]
except:
name = ''
try:
_type = ip.xpath('@type').extract()[0]
except:
_type = 'textarea'
try:
value = ip.xpath('@value').extract()[0]
except:
value = ''
inputItem = InputItem()
inputItem['id'] = _id
inputItem['name'] = name
inputItem['type'] = _type
inputItem['value'] = value
formItem['inputs'].append(inputItem)
try:
_id = sel.xpath('@id').extract()[0]
except:
_id = ''
try:
_class = sel.xpath('@class').extract()[0]
except:
_class = ''
try:
enctype = sel.xpath('@enctype').extract()[0]
except:
enctype = ''
formItem['id'] = _id
formItem['clazz'] = _class
formItem['enctype'] = enctype
yield formItem
|
the-stack_106_18983
|
import pytest
from dateutil.parser import parse as dateutil_parse
from django.urls import reverse
from django.utils.timezone import now
from rest_framework import status
from datahub.company.test.factories import AdviserFactory
from datahub.core.test_utils import AdminTestMixin
from datahub.omis.order.test.factories import OrderPaidFactory, OrderWithOpenQuoteFactory
from datahub.omis.payment.constants import PaymentMethod, RefundStatus
from datahub.omis.payment.models import Refund
from datahub.omis.payment.test.factories import (
ApprovedRefundFactory,
RejectedRefundFactory,
RequestedRefundFactory,
)
class TestRefundAdmin(AdminTestMixin):
"""Tests for the Refund Admin."""
def test_add(self):
"""
Test adding a refund with status 'Approved'.
This is the only status allowed when creating a record at the moment.
"""
order = OrderPaidFactory()
now_datetime = now()
now_date_str = now_datetime.date().isoformat()
now_time_str = now_datetime.time().isoformat()
assert Refund.objects.count() == 0
url = reverse('admin:omis_payment_refund_add')
data = {
'order': order.pk,
'status': RefundStatus.APPROVED,
'requested_on_0': now_date_str,
'requested_on_1': now_time_str,
'requested_by': AdviserFactory().pk,
'requested_amount': order.total_cost,
'refund_reason': 'lorem ipsum refund reason',
'level1_approved_on_0': now_date_str,
'level1_approved_on_1': now_time_str,
'level1_approved_by': AdviserFactory().pk,
'level1_approval_notes': 'lorem ipsum level 1',
'level2_approved_on_0': now_date_str,
'level2_approved_on_1': now_time_str,
'level2_approved_by': AdviserFactory().pk,
'level2_approval_notes': 'lorem ipsum level 2',
'method': PaymentMethod.BACS,
'net_amount': order.total_cost - 1,
'vat_amount': 1,
'additional_reference': 'additional reference',
'rejection_reason': 'lorem ipsum rejection reason',
}
response = self.client.post(url, data, follow=True)
assert response.status_code == status.HTTP_200_OK
assert Refund.objects.count() == 1
refund = Refund.objects.first()
assert refund.order.pk == data['order']
assert refund.status == data['status']
assert refund.requested_on == now_datetime
assert refund.requested_by.pk == data['requested_by']
assert refund.requested_amount == data['requested_amount']
assert refund.refund_reason == data['refund_reason']
assert refund.level1_approved_on == now_datetime
assert refund.level1_approved_by.pk == data['level1_approved_by']
assert refund.level1_approval_notes == data['level1_approval_notes']
assert refund.level2_approved_on == now_datetime
assert refund.level2_approved_by.pk == data['level2_approved_by']
assert refund.level2_approval_notes == data['level2_approval_notes']
assert refund.method == data['method']
assert refund.net_amount == data['net_amount']
assert refund.vat_amount == data['vat_amount']
assert refund.additional_reference == data['additional_reference']
assert refund.rejection_reason == data['rejection_reason']
assert refund.total_amount == order.total_cost
assert refund.created_by == self.user
assert refund.modified_by == self.user
assert not refund.payment
@pytest.mark.parametrize(
'refund_factory',
(
RequestedRefundFactory,
ApprovedRefundFactory,
RejectedRefundFactory,
),
)
def test_change(self, refund_factory):
"""Test changing a refund record, its status cannot change at this point."""
refund = refund_factory()
order = OrderPaidFactory()
now_datetime = now()
now_date_str = now_datetime.date().isoformat()
now_time_str = now_datetime.time().isoformat()
url = reverse('admin:omis_payment_refund_change', args=(refund.id,))
data = {
'order': order.pk,
'status': refund.status,
'requested_on_0': now_date_str,
'requested_on_1': now_time_str,
'requested_by': AdviserFactory().pk,
'requested_amount': order.total_cost,
'refund_reason': 'lorem ipsum refund reason',
'level1_approved_on_0': now_date_str,
'level1_approved_on_1': now_time_str,
'level1_approved_by': AdviserFactory().pk,
'level1_approval_notes': 'lorem ipsum level 1',
'level2_approved_on_0': now_date_str,
'level2_approved_on_1': now_time_str,
'level2_approved_by': AdviserFactory().pk,
'level2_approval_notes': 'lorem ipsum level 2',
'method': PaymentMethod.BACS,
'net_amount': order.total_cost - 1,
'vat_amount': 1,
'additional_reference': 'additional reference',
'rejection_reason': 'lorem ipsum rejection reason',
}
response = self.client.post(url, data, follow=True)
assert response.status_code == status.HTTP_200_OK
refund.refresh_from_db()
assert refund.order.pk == data['order']
assert refund.status == data['status']
assert refund.requested_on == now_datetime
assert refund.requested_by.pk == data['requested_by']
assert refund.requested_amount == data['requested_amount']
assert refund.refund_reason == data['refund_reason']
assert refund.level1_approved_on == now_datetime
assert refund.level1_approved_by.pk == data['level1_approved_by']
assert refund.level1_approval_notes == data['level1_approval_notes']
assert refund.level2_approved_on == now_datetime
assert refund.level2_approved_by.pk == data['level2_approved_by']
assert refund.level2_approval_notes == data['level2_approval_notes']
assert refund.method == data['method']
assert refund.net_amount == data['net_amount']
assert refund.vat_amount == data['vat_amount']
assert refund.additional_reference == data['additional_reference']
assert refund.rejection_reason == data['rejection_reason']
assert refund.total_amount == order.total_cost
assert refund.created_by != self.user
assert refund.modified_by == self.user
assert not refund.payment
@pytest.mark.parametrize(
'data_delta,errors',
(
# invalid status
(
{'status': RefundStatus.REJECTED},
{
'status': [
'Select a valid choice. rejected is not one of the available choices.',
],
},
),
# invalid order status
(
{'order': lambda *_: OrderWithOpenQuoteFactory()},
{'order': ['This order has not been paid for.']},
),
# requested on < order.paid_on
(
{
'order': lambda *_: OrderPaidFactory(
paid_on=dateutil_parse('2018-01-01T13:00Z'),
),
'requested_on_0': '2018-01-01',
'requested_on_1': '12:59',
},
{
'requested_on': [
'Please specify a value greater than or equal to Jan. 1, 2018, 1 p.m..',
],
},
),
# level1 approved on < order.paid_on
(
{
'order': lambda *_: OrderPaidFactory(
paid_on=dateutil_parse('2018-01-01T13:00Z'),
),
'level1_approved_on_0': '2018-01-01',
'level1_approved_on_1': '12:59',
},
{
'level1_approved_on': [
'Please specify a value greater than or equal to Jan. 1, 2018, 1 p.m..',
],
},
),
# level2 approved on < order.paid_on
(
{
'order': lambda *_: OrderPaidFactory(
paid_on=dateutil_parse('2018-01-01T13:00Z'),
),
'level2_approved_on_0': '2018-01-01',
'level2_approved_on_1': '12:59',
},
{
'level2_approved_on': [
'Please specify a value greater than or equal to Jan. 1, 2018, 1 p.m..',
],
},
),
# same level1 and level2 approver
(
{
'level1_approved_by': lambda *_: AdviserFactory().pk,
'level2_approved_by': lambda _, d: d['level1_approved_by'],
},
{
'level1_approved_by': ['Approvers level1 and level2 have to be different.'],
},
),
# net_amount + vat_amount > order.total_cost
(
{
'net_amount': lambda o, _: o.total_cost,
'vat_amount': lambda *_: 1,
},
{
'net_amount': lambda o, _: [
f'Remaining amount that can be refunded: {o.total_cost}.',
],
},
),
),
)
def test_validation_error(self, data_delta, errors):
"""Test validation errors."""
def resolve(value, order, data):
if callable(value):
return value(order, data)
return value
order = data_delta.pop('order', None) or OrderPaidFactory()
order = resolve(order, None, None)
now_datetime = now()
now_date_str = now_datetime.date().isoformat()
now_time_str = now_datetime.time().isoformat()
url = reverse('admin:omis_payment_refund_add')
data = {
'order': order.pk,
'status': RefundStatus.APPROVED,
'requested_on_0': now_date_str,
'requested_on_1': now_time_str,
'requested_by': AdviserFactory().pk,
'requested_amount': order.total_cost,
'refund_reason': 'lorem ipsum refund reason',
'level1_approved_on_0': now_date_str,
'level1_approved_on_1': now_time_str,
'level1_approved_by': AdviserFactory().pk,
'level1_approval_notes': 'lorem ipsum level 1',
'level2_approved_on_0': now_date_str,
'level2_approved_on_1': now_time_str,
'level2_approved_by': AdviserFactory().pk,
'level2_approval_notes': 'lorem ipsum level 2',
'method': PaymentMethod.BACS,
'net_amount': order.total_cost - 1,
'vat_amount': 1,
'additional_reference': 'additional reference',
}
for data_key, data_value in data_delta.items():
data[data_key] = resolve(data_value, order, data)
response = self.client.post(url, data, follow=True)
assert response.status_code == status.HTTP_200_OK
form = response.context['adminform'].form
assert not form.is_valid()
for error_key, error_value in errors.items():
errors[error_key] = resolve(error_value, order, errors)
assert form.errors == errors
@pytest.mark.parametrize(
'refund_factory,required_fields',
(
(
RequestedRefundFactory,
(
'order',
'status',
'requested_on',
'requested_amount',
),
),
(
ApprovedRefundFactory,
(
'order',
'status',
'requested_on',
'requested_amount',
'level1_approved_on',
'level1_approved_by',
'level2_approved_on',
'level2_approved_by',
'method',
'net_amount',
'vat_amount',
),
),
(
RejectedRefundFactory,
(
'order',
'status',
'requested_on',
'requested_amount',
),
),
),
)
def test_required_fields(self, refund_factory, required_fields):
"""Test required fields depending on the status of the refund."""
refund = refund_factory()
url = reverse('admin:omis_payment_refund_change', args=(refund.id,))
data = {
'order': '',
'status': '',
'requested_on_0': '',
'requested_on_1': '',
'requested_by': '',
'requested_amount': '',
'refund_reason': '',
'level1_approved_on_0': '',
'level1_approved_on_1': '',
'level1_approved_by': '',
'level1_approval_notes': '',
'level2_approved_on_0': '',
'level2_approved_on_1': '',
'level2_approved_by': '',
'level2_approval_notes': '',
'method': '',
'net_amount': '',
'vat_amount': '',
'additional_reference': '',
'rejection_reason': '',
}
response = self.client.post(url, data, follow=True)
form = response.context['adminform'].form
assert not form.is_valid()
assert form.errors == {
required_field: ['This field is required.']
for required_field in required_fields
}
@pytest.mark.parametrize(
'refund_factory',
(
RequestedRefundFactory,
ApprovedRefundFactory,
RejectedRefundFactory,
),
)
def test_cannot_change_status(self, refund_factory):
"""Test that the status field cannot be changed at any point."""
refund = refund_factory()
now_datetime = now()
date_str = now_datetime.date().isoformat()
time_str = now_datetime.time().isoformat()
url = reverse('admin:omis_payment_refund_change', args=(refund.id,))
default_data = {
'order': refund.order.pk,
'requested_on_0': date_str,
'requested_on_1': time_str,
'requested_amount': refund.requested_amount,
'refund_reason': refund.refund_reason,
'level1_approved_on_0': date_str,
'level1_approved_on_1': time_str,
'level1_approved_by': AdviserFactory().pk,
'level2_approved_on_0': date_str,
'level2_approved_on_1': time_str,
'level2_approved_by': AdviserFactory().pk,
'method': refund.method or '',
'net_amount': '' if refund.net_amount is None else refund.net_amount,
'vat_amount': '' if refund.vat_amount is None else refund.vat_amount,
}
for changed_status, _ in RefundStatus.choices:
if changed_status == refund.status:
continue
data = {
**default_data,
'status': changed_status,
}
response = self.client.post(url, data, follow=True)
assert response.status_code == status.HTTP_200_OK
form = response.context['adminform'].form
assert not form.is_valid()
assert form.errors == {
'status': [
f'Select a valid choice. {changed_status} is not one of the available '
f'choices.',
],
}
|
the-stack_106_18984
|
from __future__ import division
import numpy as np
import dolfin as df
import pytest
import os
import finmag
from finmag.field import Field
#from finmag.energies import Zeeman, TimeZeeman, DiscreteTimeZeeman, OscillatingZeeman
from finmag.energies import Zeeman
#from finmag.util.consts import mu0
from finmag.util.meshes import pair_of_disks
from finmag.util.helpers import vector_valued_function
#from math import sqrt, pi, cos, sin
class MultiDomainTest(object):
def __init__(self, mesh, get_domain_id, m_vals, Ms, unit_length=1e-9):
"""
`get_domain_id` is a function of the form (x, y, z) -> id which maps
some point coordinates in the mesh to an integer identifying the domain
which the point belongs to.
"""
self.mesh = mesh
self.get_domain_id = get_domain_id
self.domain_ids = [get_domain_id(pt) for pt in mesh.coordinates()]
self.Ms = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)
self.unit_length = unit_length
#self.rtol = rtol
domain_classes = {}
for k in self.domain_ids:
class DomainK(df.SubDomain):
def inside(self, pt, on_boundary):
return get_domain_id(pt) == k
domain_classes[k] = DomainK()
domains = df.CellFunction("size_t", mesh)
domains.set_all(0)
for k, d in domain_classes.items():
d.mark(domains, k)
self.submeshes = [df.SubMesh(mesh, domains, i)
for i in self.domain_ids]
self.dx = df.Measure("dx")[domains]
def m_init(pt):
return m_vals[self.get_domain_id(pt)]
self.V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
self.m = Field(self.V)
self.m.set(m_init, normalised=True)
def compute_energies_on_subdomains(self, interaction):
"""
Given some interaction (such as Zeeman, Demag, Exchange, etc.),
compute the associated energies on each subdomain as well as the
total energy.
*Returns*
A pair (E_subdmns, E_total), where E_subdmns is a dictionary of
energies indexed by the subdomain indices, and E_total is the total
energy of the interaction.
"""
interaction.setup(self.m, self.Ms, unit_length=self.unit_length)
return {k: interaction.compute_energy(dx=self.dx(k)) for k in self.domain_ids},\
interaction.compute_energy(df.dx)
def check_energy_consistency(self, interaction):
E_domains, E_total = self.compute_energies_on_subdomains(interaction)
finmag.logger.debug("Energies on subdomains: {}".format(E_domains))
finmag.logger.debug("Sum of energies on subdomains: {}; total energy: {}".format(
sum(E_domains.values()), E_total))
assert np.allclose(
sum(E_domains.values()), E_total, atol=0, rtol=1e-12)
@pytest.mark.slow
def test_energies_in_separated_subdomains(tmpdir):
"""
Create a mesh with two subdomains. For each energy class compute the energy
on each subdomain and compare with the total energy on the whole mesh. Also
compare with analytical expressions if feasible.
"""
os.chdir(str(tmpdir))
# Create a mesh consisting of two disks (with different heights)
d = 30.0
h1 = 5.0
h2 = 10.0
sep = 10.0
maxh = 2.5
Ms = 8.6e5
unit_length = 1e-9
RTOL = 5e-3 # achievable relative tolerance depends on maxh
zeeman = Zeeman(1e6 * np.array([1, 0, 0]))
mesh = pair_of_disks(d, d, h1, h2, sep, theta=0, maxh=maxh)
def get_domain_id(pt):
x, y, z = pt
return 1 if (np.linalg.norm([x, y]) < 0.5 * (d + sep)) else 2
m_vals = {1: [1, 0, 0],
2: [0.5, -0.8, 0]}
multi_domain_test = MultiDomainTest(
mesh, get_domain_id, m_vals, Ms, unit_length=unit_length)
multi_domain_test.check_energy_consistency(zeeman)
# The same test for a mesh with subdomains that touch will fail for some reason.
# XXX TODO: need to investigate this.
@pytest.mark.xfail
def test_energies_in_touching_subdomains():
# Max, I fixed some things in here (missing m_vals, Ms, Zeeman and unit_length.)
# Also changed 'get_domain_id' to 'get_domain_id2' in the MultiDomainTest
# call below. Could you check this is what you meant? (Maybe the
# test even passes now?) XXX TODO, 5 Oct 2013, Hans
zeeman = Zeeman(1e6 * np.array([1, 0, 0]))
m_vals = {1: [1, 0, 0],
2: [0.5, -0.8, 0]}
box_mesh = df.BoxMesh(df.Point(-50, -20, 0), df.Point(50, 20, 5), 30, 10, 2)
Ms = 8.6e5
unit_length = 1e-9
def get_domain_id2(pt):
return 1 if (pt[0] < 0) else 2
multi_domain_test = MultiDomainTest(
box_mesh, get_domain_id2, m_vals, Ms, unit_length=unit_length)
# The next line fails for touching subdomains. Need to investigate this.
multi_domain_test.check_energy_consistency(zeeman)
|
the-stack_106_18986
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 Abram Hindle, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
class HTTPClient(object):
def get_host_port(self,url):
host = urllib.parse.urlparse(url).hostname
port = urllib.parse.urlparse(url).port
path = urllib.parse.urlparse(url).path
if port == None:
port = 80
if path == "":
path == "/"
return host, port, path
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def get_code(self, data):
status = data.split("\r\n")[0]
code = status.split(" ")[1]
code = int(code)
return code
def get_headers(self,data):
headers = data.split("\r\n\r\n")[0]
return headers
def get_body(self, data):
body = data.split("\r\n\r\n")[1]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
host, port, path = self.get_host_port(url)
body = f"GET {path} HTTP/1.1\r\nHost: {host}\r\n\r\n"
self.connect(host, port)
self.sendall(body)
self.socket.shutdown(socket.SHUT_WR)
data = self.recvall(self.socket)
code = self.get_code(data)
body = self.get_body(data)
self.close()
return HTTPResponse(code, body)
def POST(self, url, args=None):
code = 500
host, port, path = self.get_host_port(url)
body = f"POST {path} HTTP/1.1\r\nHost: {host}\r\nContent-type: application/x-www-form-urlencoded\r\nContent-length: "
if args != None:
content = ""
for key in args:
content = content + key + "=" + args[key] + "&"
content = content[:-1]
content_len = len(content)
body = body + str(content_len) + "\r\n\r\n" + content
else:
body = body + "0\r\n\r\n"
self.connect(host, port)
self.sendall(body)
self.socket.shutdown(socket.SHUT_WR)
data = self.recvall(self.socket)
code = self.get_code(data)
body = self.get_body(data)
self.close()
return HTTPResponse(code, body)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
|
the-stack_106_18987
|
import karkkainen_sanders as tks
import sys
sys.stdin = open('input.txt')
while True:
N = int(input())
if N == 0:
break
sStr = []
for i in range(N):
line = raw_input().strip()
for c in line:
sStr.append(ord(c) + 10000)
# sStr.append(c)
sStr.append(i + 100)
# sStr.append(chr(i + ord('1')))
# for s in sStr: print ord(s)
L = len(sStr)
SA = tks.simple_kark_sort(sStr)
LCP = tks.LCP(sStr, SA)
# for v in SA[:L]: print ''.join(sStr[v:])
# print LCP
belongLine = [0] * L
for i, v in enumerate(SA[:L]):
j = v
while sStr[j] > 10000:
j += 1
belongLine[i] = sStr[j]
# print belongLine
# find the start and end
result = 0
maxStart = []
for start in range(0, L - 1):
usedLine = set()
usedLine.add(belongLine[start])
usedLine.add(belongLine[start + 1])
end = start
while len(usedLine) < (N + 1) // 2 and end < L:
usedLine.add(belongLine[end])
end += 1
lcp = min(LCP[start:end + 1])
if result < lcp:
maxStart, result = [start], lcp
elif result == lcp:
maxStart.append(start)
if result == 0:
print('?')
else:
for start in maxStart:
rStr = []
for i in range(SA[start], SA[start] + result):
rStr.append(unichr(sStr[i] - 10000))
print(''.join(rStr))
print('')
|
the-stack_106_18988
|
from setuptools import setup, find_packages
import os
try:
long_description = open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'README.rst')).read()
except:
long_description = 'Please refer to https://pytenable.readthedocs.io'
print('! could not read README.rst file.')
setup(
name='pyTenable',
version='1.1.0',
description='Python library to interface into Tenable\'s products and applications',
author='Tenable, Inc.',
long_description=long_description,
author_email='[email protected]',
url='https://github.com/tenable/pytenable',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='tenable tenable_io securitycenter containersecurity',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=[
'requests>=2.19',
'python-dateutil>=2.6',
'semver>=2.8.1',
'ipaddress>=1.0.22'
],
extras_require={
'NessusReportv2': ['defusedxml>=0.5.0'],
'PWCertAuth': ['requests-pkcs12>=1.3'],
'docker': ['docker>=3.7.2'],
'complete': [
'defusedxml>=0.5.0',
'requests-pkcs12>=1.3',
'docker>=3.7.2',
]
}
)
|
the-stack_106_18991
|
"""A helper function for parsing and executing Recast.AI skills."""
import logging
import json
import aiohttp
from voluptuous import Required
from opsdroid.const import DEFAULT_LANGUAGE
from opsdroid.const import SAPCAI_API_ENDPOINT
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = {Required("token"): str, "min-score": float}
async def call_sapcai(message, config, lang=DEFAULT_LANGUAGE):
"""Call the SAP Conversational AI api and return the response."""
async with aiohttp.ClientSession(trust_env=True) as session:
payload = {"language": lang, "text": message.text}
headers = {
"Authorization": "Token " + config["token"],
"Content-Type": "application/json",
}
resp = await session.post(
SAPCAI_API_ENDPOINT, data=json.dumps(payload), headers=headers
)
result = await resp.json()
_LOGGER.info(_("SAP Conversational AI response - %s."), json.dumps(result))
return result
async def parse_sapcai(opsdroid, skills, message, config):
"""Parse a message against all SAP Conversational AI intents."""
matched_skills = []
language = config.get("lang") or opsdroid.config.get("lang", DEFAULT_LANGUAGE)
if "token" in config:
try:
result = await call_sapcai(message, config, language)
except aiohttp.ClientOSError:
_LOGGER.error(
_("No response from SAP Conversational.AI, check your network.")
)
return matched_skills
if result["results"] is None:
_LOGGER.error(_("SAP Conversational AI error - %s."), result["message"])
return matched_skills
if not result["results"]["intents"]:
_LOGGER.error(
_(
"SAP Conversational AI error - "
"No intent found "
"for the message %s"
),
str(message.text),
)
return matched_skills
confidence = result["results"]["intents"][0]["confidence"]
if "min-score" in config and confidence < config["min-score"]:
_LOGGER.debug(_("SAP Conversational AI score lower than min-score."))
return matched_skills
if result:
for skill in skills:
for matcher in skill.matchers:
if "sapcai_intent" in matcher:
if (
matcher["sapcai_intent"]
in result["results"]["intents"][0]["slug"]
):
message.sapcai = result
for key, entity in (
result["results"].get("entities", {}).items()
):
await message.update_entity(
key, entity[0]["raw"], entity[0]["confidence"]
)
_LOGGER.debug(
_("Matched against skill %s."), skill.config["name"]
)
matched_skills.append(
{
"score": confidence,
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
|
the-stack_106_18992
|
from tokenizers import Tokenizer, AddedToken, decoders, trainers
from tokenizers.models import WordPiece
from tokenizers.normalizers import BertNormalizer
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.processors import BertProcessing
from .base_tokenizer import BaseTokenizer
from typing import Optional, List, Union, Dict
class BertWordPieceTokenizer(BaseTokenizer):
""" Bert WordPiece Tokenizer """
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
unk_token: Union[str, AddedToken] = "[UNK]",
sep_token: Union[str, AddedToken] = "[SEP]",
cls_token: Union[str, AddedToken] = "[CLS]",
pad_token: Union[str, AddedToken] = "[PAD]",
mask_token: Union[str, AddedToken] = "[MASK]",
clean_text: bool = True,
handle_chinese_chars: bool = True,
strip_accents: Optional[bool] = None,
lowercase: bool = True,
wordpieces_prefix: str = "##",
):
if vocab is not None:
tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(unk_token)))
else:
tokenizer = Tokenizer(WordPiece(unk_token=str(unk_token)))
# Let the tokenizer know about special tokens if they are part of the vocab
if tokenizer.token_to_id(str(unk_token)) is not None:
tokenizer.add_special_tokens([str(unk_token)])
if tokenizer.token_to_id(str(sep_token)) is not None:
tokenizer.add_special_tokens([str(sep_token)])
if tokenizer.token_to_id(str(cls_token)) is not None:
tokenizer.add_special_tokens([str(cls_token)])
if tokenizer.token_to_id(str(pad_token)) is not None:
tokenizer.add_special_tokens([str(pad_token)])
if tokenizer.token_to_id(str(mask_token)) is not None:
tokenizer.add_special_tokens([str(mask_token)])
tokenizer.normalizer = BertNormalizer(
clean_text=clean_text,
handle_chinese_chars=handle_chinese_chars,
strip_accents=strip_accents,
lowercase=lowercase,
)
tokenizer.pre_tokenizer = BertPreTokenizer()
if vocab is not None:
sep_token_id = tokenizer.token_to_id(str(sep_token))
if sep_token_id is None:
raise TypeError("sep_token not found in the vocabulary")
cls_token_id = tokenizer.token_to_id(str(cls_token))
if cls_token_id is None:
raise TypeError("cls_token not found in the vocabulary")
tokenizer.post_processor = BertProcessing(
(str(sep_token), sep_token_id), (str(cls_token), cls_token_id)
)
tokenizer.decoder = decoders.WordPiece(prefix=wordpieces_prefix)
parameters = {
"model": "BertWordPiece",
"unk_token": unk_token,
"sep_token": sep_token,
"cls_token": cls_token,
"pad_token": pad_token,
"mask_token": mask_token,
"clean_text": clean_text,
"handle_chinese_chars": handle_chinese_chars,
"strip_accents": strip_accents,
"lowercase": lowercase,
"wordpieces_prefix": wordpieces_prefix,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab: str, **kwargs):
vocab = WordPiece.read_file(vocab)
return BertWordPieceTokenizer(vocab, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
limit_alphabet: int = 1000,
initial_alphabet: List[str] = [],
special_tokens: List[Union[str, AddedToken]] = [
"[PAD]",
"[UNK]",
"[CLS]",
"[SEP]",
"[MASK]",
],
show_progress: bool = True,
wordpieces_prefix: str = "##",
):
""" Train the model using the given files """
trainer = trainers.WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
initial_alphabet=initial_alphabet,
special_tokens=special_tokens,
show_progress=show_progress,
continuing_subword_prefix=wordpieces_prefix,
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
|
the-stack_106_18995
|
"""Initiator transport"""
import asyncio
from datetime import tzinfo, time
import logging
from ssl import SSLContext
from typing import Optional, Callable, Type, Tuple
from jetblack_fixparser.meta_data import ProtocolMetaData
from jetblack_fixparser.fix_message import SOH
from ..types import Handler, Store
from ..utils.cancellation import register_cancellation_event
from .fix_transport import fix_stream_processor
from .fix_read_buffer import FixReadBuffer
from .fix_reader_async import fix_read_async
from .initiator_handler import InitiatorHandler
LOGGER = logging.getLogger(__name__)
InitiatorFactory = Callable[
[ProtocolMetaData, str, str, Store, int, asyncio.Event],
Handler
]
async def initiate(
host: str,
port: int,
handler: Handler,
cancellation_event: asyncio.Event,
*,
ssl: Optional[SSLContext] = None,
shutdown_timeout: float = 10.0,
sep: bytes = SOH,
convert_sep_to_soh_for_checksum: bool = False,
validate: bool = True
) -> None:
LOGGER.info(
'connecting to %s:%s%s',
host,
port,
" over ssl" if ssl else ""
)
reader, writer = await asyncio.open_connection(host, port, ssl=ssl)
read_buffer = FixReadBuffer(sep, convert_sep_to_soh_for_checksum, validate)
buffered_reader = fix_read_async(read_buffer, reader, 1024)
await fix_stream_processor(
handler,
shutdown_timeout,
buffered_reader,
writer,
cancellation_event
)
LOGGER.info(
'disconnected from %s:%s%s',
host,
port,
" over ssl" if ssl else ""
)
def create_initiator(
klass: Type[InitiatorHandler],
protocol: ProtocolMetaData,
sender_comp_id: str,
target_comp_id: str,
store: Store,
heartbeat_timeout: int,
cancellation_event: asyncio.Event,
*,
heartbeat_threshold: int = 1,
logon_time_range: Optional[Tuple[time, time]] = None,
tz: Optional[tzinfo] = None
) -> InitiatorHandler:
handler = klass(
protocol,
sender_comp_id,
target_comp_id,
store,
heartbeat_timeout,
cancellation_event,
heartbeat_threshold=heartbeat_threshold,
logon_time_range=logon_time_range,
tz=tz
)
return handler
def start_initiator(
klass: Type[InitiatorHandler],
host: str,
port: int,
protocol: ProtocolMetaData,
sender_comp_id: str,
target_comp_id: str,
store: Store,
heartbeat_timeout: int,
*,
ssl: Optional[SSLContext] = None,
shutdown_timeout: float = 10.0,
heartbeat_threshold: int = 1,
logon_time_range: Optional[Tuple[time, time]] = None,
tz: Optional[tzinfo] = None
) -> None:
cancellation_event = asyncio.Event()
loop = asyncio.get_event_loop()
register_cancellation_event(cancellation_event, loop)
handler = create_initiator(
klass,
protocol,
sender_comp_id,
target_comp_id,
store,
heartbeat_timeout,
cancellation_event,
heartbeat_threshold=heartbeat_threshold,
logon_time_range=logon_time_range,
tz=tz
)
loop.run_until_complete(
initiate(
host,
port,
handler,
cancellation_event,
ssl=ssl,
shutdown_timeout=shutdown_timeout
)
)
|
the-stack_106_18996
|
from datetime import datetime
import pandas as pd
import pytest
from feast import Field
from feast.errors import SpecifiedFeaturesNotPresentError
from feast.infra.offline_stores.file_source import FileSource
from feast.types import Float64
from tests.integration.feature_repos.universal.entities import customer, driver, item
from tests.integration.feature_repos.universal.feature_views import (
conv_rate_plus_100_feature_view,
create_conv_rate_request_source,
create_driver_hourly_stats_batch_feature_view,
create_item_embeddings_batch_feature_view,
create_similarity_request_source,
similarity_feature_view,
)
@pytest.mark.integration
@pytest.mark.universal
@pytest.mark.parametrize("infer_features", [True, False], ids=lambda v: str(v))
def test_infer_odfv_features(environment, universal_data_sources, infer_features):
store = environment.feature_store
(entities, datasets, data_sources) = universal_data_sources
driver_hourly_stats = create_driver_hourly_stats_batch_feature_view(
data_sources.driver
)
request_source = create_conv_rate_request_source()
driver_odfv = conv_rate_plus_100_feature_view(
[driver_hourly_stats, request_source], infer_features=infer_features,
)
feast_objects = [driver_hourly_stats, driver_odfv, driver(), customer()]
store.apply(feast_objects)
odfv = store.get_on_demand_feature_view("conv_rate_plus_100")
assert len(odfv.features) == 3
@pytest.mark.integration
@pytest.mark.parametrize("infer_features", [True, False], ids=lambda v: str(v))
def test_infer_odfv_list_features(environment, infer_features, tmp_path):
fake_embedding = [1.0, 1.0]
items_df = pd.DataFrame(
data={
"item_id": [0],
"embedding_float": [fake_embedding],
"embedding_double": [fake_embedding],
"event_timestamp": [pd.Timestamp(datetime.utcnow())],
"created": [pd.Timestamp(datetime.utcnow())],
}
)
output_path = f"{tmp_path}/items.parquet"
items_df.to_parquet(output_path)
fake_items_src = FileSource(
path=output_path,
timestamp_field="event_timestamp",
created_timestamp_column="created",
)
item_feature_view = create_item_embeddings_batch_feature_view(fake_items_src)
sim_odfv = similarity_feature_view(
[item_feature_view, create_similarity_request_source()],
infer_features=infer_features,
)
store = environment.feature_store
store.apply([item(), item_feature_view, sim_odfv])
odfv = store.get_on_demand_feature_view("similarity")
assert len(odfv.features) == 2
@pytest.mark.integration
@pytest.mark.universal
def test_infer_odfv_features_with_error(environment, universal_data_sources):
store = environment.feature_store
(entities, datasets, data_sources) = universal_data_sources
features = [Field(name="conv_rate_plus_200", dtype=Float64)]
driver_hourly_stats = create_driver_hourly_stats_batch_feature_view(
data_sources.driver
)
request_source = create_conv_rate_request_source()
driver_odfv = conv_rate_plus_100_feature_view(
[driver_hourly_stats, request_source], features=features,
)
feast_objects = [driver_hourly_stats, driver_odfv, driver(), customer()]
with pytest.raises(SpecifiedFeaturesNotPresentError):
store.apply(feast_objects)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.