content
stringlengths 5
1.05M
|
---|
from .wso import WSO
|
# -*- coding: utf-8 -*-
class BackoffGenerator(object):
"""Generates a sequence of values for use in backoff timing"""
def __init__(self, initial_value, max_value=None, exponent=2):
"""Sets the initial value, max value, and exponent for the backoff"""
super(BackoffGenerator, self).__init__()
self.value = initial_value
self.initial_value = initial_value
self.max_value = max_value
self.exponent = exponent
def next(self):
"""Returns the next value for the backoff"""
next_value = self.value
# Update the value for the next call
if self.max_value is None or self.value != self.max_value:
self.value **= self.exponent
if not self.max_value is None and self.value > self.max_value:
self.value = self.max_value
return next_value
def reset(self):
self.value = self.initial_value
|
import calendar
import re
import datetime
class InvalidCard(Exception):
pass
class CardNotSupported(Exception):
pass
class CreditCard(object):
# The regexp attribute should be overriden by the subclasses.
# Attribute value should be a regexp instance
regexp = None
# Has to be set by the user after calling `validate_card`
# method on the gateway
card_type = None
# Required mainly for PayPal. PayPal expects to be sent
# the card type also with the requests.
card_name = None
def __init__(self, **kwargs):
if ("first_name" not in kwargs
or "last_name" not in kwargs) and "cardholders_name" not in kwargs:
raise TypeError("You must provide cardholders_name or first_name and last_name")
self.first_name = kwargs.get("first_name", None)
self.last_name = kwargs.get("last_name", None)
self.cardholders_name = kwargs.get("cardholders_name", None)
self.month = int(kwargs["month"])
self.year = int(kwargs["year"])
self.number = kwargs["number"]
self.verification_value = kwargs["verification_value"]
def is_luhn_valid(self):
"""Checks the validity of card number by using Luhn Algorithm.
Please see http://en.wikipedia.org/wiki/Luhn_algorithm for details."""
try:
num = [int(x) for x in str(self.number)]
except ValueError:
return False
return not sum(num[::-2] + [sum(divmod(d * 2, 10)) for d in num[-2::-2]]) % 10
def is_expired(self):
"""Check whether the credit card is expired or not"""
return datetime.date.today() > datetime.date(self.year, self.month, calendar.monthrange(self.year, self.month)[1])
def valid_essential_attributes(self):
"""Validate that all the required attributes of card are given"""
return (((self.first_name and
self.last_name) or
self.cardholders_name)
and self.month
and self.year
and self.number
and self.verification_value)
def is_valid(self):
"""Check the validity of the card"""
return self.is_luhn_valid() and \
not self.is_expired() and \
self.valid_essential_attributes()
@property
def expire_date(self):
"""Returns the expiry date of the card in MM-YYYY format"""
return '%02d-%04d' % (self.month, self.year)
@property
def name(self):
"""Concat first name and last name of the card holder"""
return '%s %s' % (self.first_name, self.last_name)
class Visa(CreditCard):
card_name = "Visa"
regexp = re.compile('^4\d{12}(\d{3})?$')
class MasterCard(CreditCard):
card_name = "MasterCard"
regexp = re.compile('^(5[1-5]\d{4}|677189)\d{10}$')
class Discover(CreditCard):
card_name = "Discover"
regexp = re.compile('^(6011|65\d{2})\d{12}$')
class AmericanExpress(CreditCard):
card_name = "Amex"
regexp = re.compile('^3[47]\d{13}$')
class DinersClub(CreditCard):
card_name = "DinersClub"
regexp = re.compile('^3(0[0-5]|[68]\d)\d{11}$')
class JCB(CreditCard):
card_name = "JCB"
regexp = re.compile('^35(28|29|[3-8]\d)\d{12}$')
class Switch(CreditCard):
# Debit Card
card_name = "Switch"
regexp = re.compile('^6759\d{12}(\d{2,3})?$')
class Solo(CreditCard):
# Debit Card
card_name = "Solo"
regexp = re.compile('^6767\d{12}(\d{2,3})?$')
class Dankort(CreditCard):
# Debit cum Credit Card
card_name = "Dankort"
regexp = re.compile('^5019\d{12}$')
class Maestro(CreditCard):
# Debit Card
card_name = "Maestro"
regexp = re.compile('^(5[06-8]|6\d)\d{10,17}$')
class Forbrugsforeningen(CreditCard):
card_name = "Forbrugsforeningen"
regexp = re.compile('^600722\d{10}$')
class Laser(CreditCard):
# Debit Card
card_name = "Laser"
regexp = re.compile('^(6304|6706|6771|6709)\d{8}(\d{4}|\d{6,7})?$')
# A few helpful (probably) attributes
all_credit_cards = [Visa, MasterCard, Discover, AmericanExpress,
DinersClub, JCB]
all_debit_cards = [Switch, Solo, Dankort, Maestro,
Forbrugsforeningen, Laser]
all_cards = all_credit_cards + all_debit_cards
|
import os
from pathlib import Path, PurePath
DEFAULT_CONFIG = """
- files_location:
path: REPO_LOCATION
- files:
"""
CONFIG_PATH = str(PurePath(os.environ.get("MOJETO_CONFIG_LOCATION", str(PurePath(Path.home(), ".config", "mojeto"))),
".mojeto"))
CONFIG_OVERRIDE_QUESTION = "Mojeto config already exist. Do you want to override it?"
|
from pathlib import Path
from oneibl.one import ONE
import alf.io
from brainbox.core import Bunch
import qt
import numpy as np
import pyqtgraph as pg
import atlaselectrophysiology.ephys_atlas_gui as alignment_window
import data_exploration_gui.gui_main as trial_window
# some extra controls
class AlignmentWindow(alignment_window.MainWindow):
def __init__(self, offline=False, probe_id=None, one=None):
super(AlignmentWindow, self).__init__(probe_id=probe_id, one=one)
self.trial_gui = None
def cluster_clicked(self, item, point):
clust = super().cluster_clicked(item, point)
print(clust)
self.trial_gui.on_cluster_chosen(clust)
def add_trials_to_raster(self, trial_key='feedback_times'):
self.selected_trials = self.trial_gui.data.trials[trial_key]
x, y = self.vertical_lines(self.selected_trials, 0, 3840)
trial_curve = pg.PlotCurveItem()
trial_curve.setData(x=x, y=y, pen=self.rpen_dot, connect='finite')
trial_curve.setClickable(True)
self.fig_img.addItem(trial_curve)
self.fig_img.scene().sigMouseClicked.connect(self.on_mouse_clicked)
trial_curve.sigClicked.connect(self.trial_line_clicked)
def vertical_lines(self, x, ymin, ymax):
x = np.tile(x, (3, 1))
x[2, :] = np.nan
y = np.zeros_like(x)
y[0, :] = ymin
y[1, :] = ymax
y[2, :] = np.nan
return x.T.flatten(), y.T.flatten()
def trial_line_clicked(self, ev):
self.clicked = ev
def on_mouse_clicked(self, event):
if not event.double() and type(self.clicked) == pg.PlotCurveItem:
self.pos = self.data_plot.mapFromScene(event.scenePos())
x = self.pos.x() * self.x_scale
trial_id = np.argmin(np.abs(self.selected_trials - x))
# highlight the trial in the trial gui
self.clicked = None
class TrialWindow(trial_window.MainWindow):
def __init__(self):
super(TrialWindow, self).__init__()
self.alignment_gui = None
self.scat = None
def on_scatter_plot_clicked(self, scatter, point):
super().on_scatter_plot_clicked(scatter, point)
self.add_clust_scatter()
def on_cluster_list_clicked(self):
super().on_cluster_list_clicked()
self.add_clust_scatter()
def on_next_cluster_clicked(self):
super().on_next_cluster_clicked()
self.add_clust_scatter()
def on_previous_cluster_clicked(self):
super().on_previous_cluster_clicked()
self.add_clust_scatter()
def add_clust_scatter(self):
if not self.scat:
self.scat = pg.ScatterPlotItem()
self.alignment_gui.fig_img.addItem(self.scat)
self.scat.setData(self.data.spikes.times[self.data.clus_idx],
self.data.spikes.depths[self.data.clus_idx], brush='r')
# need to get out spikes.times and spikes.depths
def load_data(eid, probe, one=None):
one = one or ONE()
session_path = one.path_from_eid(eid).joinpath('alf')
probe_path = session_path.joinpath(probe)
data = Bunch()
data['trials'] = alf.io.load_object(session_path, 'trials', namespace='ibl')
data['spikes'] = alf.io.load_object(probe_path, 'spikes')
data['clusters'] = alf.io.load_object(probe_path, 'clusters')
return data
def viewer(probe_id=None, one=None):
"""
"""
probe = one.alyx.rest('insertions', 'list', id=probe_id)[0]
data = load_data(probe['session'], probe['name'], one=one)
qt.create_app()
av = AlignmentWindow(probe_id=probe_id, one=one)
bv = TrialWindow()
bv.on_data_given(data)
av.trial_gui = bv
bv.alignment_gui = av
av.show()
bv.show()
return av, bv
|
# -*- coding: utf-8 -*-
#******************************************************************************************
# Copyright (c) 2019
# School of Electronics and Computer Science, University of Southampton and Hitachi, Ltd.
# All rights reserved. This program and the accompanying materials are made available under
# the terms of the MIT License which accompanies this distribution, and is available at
# https://opensource.org/licenses/mit-license.php
#
# March 1st, 2019 : First version.
#******************************************************************************************
"""
# DataSet loading script for DNN Coverage used with DeepSaucer
## Requirement
Same as DNN Coverage project
## Directory Structure
Any Directory
`-- DeepSaucer
`-- mnist
`-- data
|-- dataset_neuron_coverage_tensorflow_native.py @
|-- dataset.py
`-- dataset_test.py (call data_create)
"""
from pathlib import Path
from tensorflow.examples.tutorials.mnist import input_data
# input image dimensions
img_rows, img_cols = 28, 28
def data_create(downloaded_data):
data_path = Path(downloaded_data).absolute().joinpath(
'mnist_tensorflow_neuron_coverage')
dataset = input_data.read_data_sets(str(data_path), one_hot=True)
return [dataset.test.images, dataset.test.labels, 1.0]
|
import argparse
from sdsparser.parser import SDSParser
from sdsparser.configs import Configs
from tqdm import tqdm
import csv
import os
import pprint
import sys
from tabulate import tabulate
def tabulate_sds_data(request_keys, sds_data):
print('='*100)
headers = ['file name'] + request_keys
out = list()
for file_name, data in sds_data.items():
row = [os.path.basename(file_name)]
for request_key in request_keys:
row.append(data[request_key])
out.append(row)
# out.sort(key=lambda x: int(x[0].split('_')[-2]))
print(tabulate(out, headers=headers, tablefmt='orgtbl'))
print('='*100)
print()
def main(passed_args=None):
args = get_args(passed_args)
request_keys = get_request_keys(args)
if args.txt_dir is not None:
sds_txt_dir = args.txt_dir
else:
sds_txt_dir = ''
if args.sds_dir is not None:
sds_dir = args.sds_dir
else:
sds_dir = os.getcwd()
sds_parser = SDSParser(request_keys=request_keys,
file_info=args.file_info)
sds_data = dict()
if args.file_name:
file_path = os.path.join(sds_dir, args.file_name)
sds_data[args.file_name] = sds_parser.get_sds_data(file_path)
else:
list_dir = os.listdir(sds_dir)
pbar = tqdm(list_dir, position=0)
for file in pbar:
pbar.set_description(f'Processing {file}')
if file.endswith('.pdf'):
file_path = os.path.join(sds_dir, file)
sds_data[file] = sds_parser.get_sds_data(file_path)
if args.as_dict:
return sds_data
if args.csv:
with open('sds_data.csv', 'w') as _:
writer = csv.writer(_)
writer.writerow(list(list(sds_data.values())[0].keys()))
for _, dict_row in sds_data.items():
row = list(dict_row.values())
writer.writerow(row)
else:
if len(request_keys) < 4:
tabulate_sds_data(request_keys, sds_data)
else:
pprint.pprint(sds_data)
def get_request_keys(args_list):
request_keys = []
for arg in vars(args_list):
if arg in Configs.REQUEST_KEYS and getattr(args_list, arg):
request_keys.append(arg)
if not request_keys:
request_keys = Configs.REQUEST_KEYS
return request_keys
def get_args(passed_args):
arg_parser = argparse.ArgumentParser(description='select request keys to extract data')
subparsers = arg_parser.add_subparsers()
parse_parser = subparsers.add_parser('parse', help='extract sds data from sds documents')
request_key_flags = ['--' + r for r in Configs.REQUEST_KEYS]
for flag in request_key_flags:
request_key = flag[2:]
parse_parser.add_argument(flag, action='store_true', help=f'extract {request_key}')
parse_parser.add_argument('-f', '--file_name', type=str, help='extract chemical data from a specific file')
parse_parser.add_argument('--txt_dir', type=str, help='path to pre-extracted sds text')
parse_parser.add_argument('--sds_dir', type=str, help='path to sds directory')
parse_parser.add_argument('--file_info', action='store_true')
parse_parser.add_argument('--csv', action='store_true', help='output data to csv file')
parse_parser.add_argument('--as_dict', action='store_true', help='return values as a python dictionary')
if len(sys.argv) == 1 and passed_args is None:
arg_parser.print_help(sys.stderr)
sys.exit(1)
if passed_args is not None:
args = arg_parser.parse_args(passed_args)
else:
args = arg_parser.parse_args()
return args
if __name__ == '__main__':
main()
|
# coding: utf-8
# ML parameters.
import os
import torch
# Train params
BATCH_SIZE = int(os.environ.get('BATCH_SIZE', '32'))
EPOCHS = int(os.environ.get('EPOCHS', '50'))
EARLY_STOPPING_TEST_SIZE = float(os.environ.get('EARLY_STOPPING_TEST_SIZE', '0.2'))
LEARNING_RATE = float(os.environ.get('LEARNING_RATE', '0.01'))
MOMENTUM = float(os.environ.get('MOMENTUM','0.9'))
WEIGHT_DECAY = float(os.environ.get('WEIGHT_DECAY','1e-4'))
USE_CACHE = bool(os.environ.get('USE_CACHE', 'True').lower() == 'true')
USE_ON_MEMORY = bool(os.environ.get('USE_ON_MEMORY', 'True').lower() == 'true')
if USE_ON_MEMORY:
USE_CACHE = True
NUM_DATA_LOAD_THREAD = int(os.environ.get('NUM_DATA_LOAD_THREAD', '1'))
if NUM_DATA_LOAD_THREAD > BATCH_SIZE:
NUM_DATA_LOAD_THREAD = BATCH_SIZE
RANDOM_SEED = int(os.environ.get('RANDOM_SEED', '42'))
# fcn_resnet101, deeplabv3_resnet101
SEGMENTATION_MODEL = os.environ.get('SEGMENTATION_MODEL', 'deeplabv3_resnet101')
DEVICE = os.environ.get('DEVICE','cuda')
FINE_TUNING = bool(os.environ.get('FINE_TUNING', 'True').lower() == 'true')
PRINT_FREQ = int(os.environ.get('PRINT_FREQ', '10'))
RESUME = os.environ.get('RESUME', '')
AUX_LOSS = bool(os.environ.get('AUX_LOSS','False').lower() == 'true')
PRETRAINED = bool(os.environ.get('PRETRAINED', 'True').lower() == 'true')
# distributed training parameters
DISTRIBUTED = True
WORLD_SIZE = 1
RANK = 1
DIST_URL=os.environ.get("DIST_URL", "env://")
GPU = 0
DIST_BACKEND = ''
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
RANK = int(os.environ["RANK"])
WORLD_SIZE = int(os.environ['WORLD_SIZE'])
GPU = int(os.environ['LOCAL_RANK'])
DIST_BACKEND = 'nccl'
elif 'SLURM_PROCID' in os.environ:
RANK = int(os.environ['SLURM_PROCID'])
GPU = RANK % torch.cuda.device_count()
DIST_BACKEND = 'nccl'
else:
DISTRIBUTED = False
# inference
RESIZE_TO_ORIGINAL = bool(os.environ.get('RESIZE_TO_ORIGINAL','False').lower() == 'true')
# For print
parameters = {
'BATCH_SIZE': BATCH_SIZE,
'EPOCHS': EPOCHS,
'EARLY_STOPPING_TEST_SIZE': EARLY_STOPPING_TEST_SIZE,
'LEARNING_RATE': LEARNING_RATE,
'MOMENTUM': MOMENTUM,
'SEGMENTATION_MODEL': SEGMENTATION_MODEL,
'DEVICE': DEVICE,
'FINE_TUNING': FINE_TUNING,
'PRETRAINED': PRETRAINED,
'WEIGHT_DECAY': WEIGHT_DECAY,
'USE_CACHE': USE_CACHE,
'USE_ON_MEMORY': USE_ON_MEMORY,
'NUM_DATA_LOAD_THREAD': NUM_DATA_LOAD_THREAD,
'RANDOM_SEED': RANDOM_SEED,
'PRINT_FREQ': PRINT_FREQ,
'AUX_LOSS': AUX_LOSS,
'DISTRIBUTED': DISTRIBUTED,
'WORLD_SIZE': WORLD_SIZE,
'DIST_URL': DIST_URL,
'RANK': RANK,
'GPU': GPU,
'DIST_BACKEND': DIST_BACKEND
}
|
import paramiko
import shlex
import subprocess
def ssh_command(ip, port, user, passwd, command):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ip, port=port, username=user, password=passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.send(command)
response = ssh_session.recv(1024)
print(response.decode())
while True:
command = response
try:
cmd = command.decode()
if cmd == 'exit':
client.close()
break
cmd_output = subprocess.check_output(shlex.split(cmd), shell=True)
ssh_session.send(cmd_output or 'Okay')
except Exception as e:
ssh_session.send(str(e))
client.close()
return
if __name__ == '__main__':
import getpass
user = input('Enter username: ') or 'tim'
password = getpass.getpass()
ip = input('Enter server IP: ') or '127.0.0.1'
port = input('Enter port: ') or 2222
ssh_command(ip, port, user, password, 'ClientConnected')
|
import numpy as np
class GridWorld:
def __init__(self, states, initial_state, transition_table, reward_table, terminal_table):
self.states = states
self.initial_state = initial_state
self.transition_table = transition_table
self.reward_table = reward_table
self.terminal_table = terminal_table
self.state = None
def step(self, a):
s = self.state
assert self.terminal_table[self.states.index(s)] != 1, 'Trying to step from a terminal state.'
possible_s_primes_idxs = self.transition_table[self.states.index(s),a]
s_prime_idx = np.random.choice(len(self.states), p=possible_s_primes_idxs)
s_prime = self.states[s_prime_idx]
r = self.reward_table[self.states.index(s), a]
t = self.terminal_table[self.states.index(s_prime)]
self.state = s_prime
return s_prime, r, t
def reset(self):
self.state = self.initial_state
return self.state[:]
a_index_to_symbol = {0: '\u2191', 1: '\u2192', 2: '\u2193', 3: '\u2190'}
def get_next_s(s, a, states):
# Up
if a == 0:
next_s = [s[0], s[1] + 1]
# Right
elif a == 1:
next_s = [s[0] + 1, s[1]]
# Down
elif a == 2:
next_s = [s[0], s[1] - 1]
# Left
elif a == 3:
next_s = [s[0] - 1, s[1]]
else:
raise ValueError(f'Action {a} not supported. Allowed values are [0, 1, 2, 3].')
return next_s if next_s in states else s
def get_next_s_idx(s, a, states):
return states.index(get_next_s(s, a, states))
def get_pmf_possible_s_primes(env, s, a):
"""
Returns a pmf over the states which it's possible to transition to from state s when performing action a.
Returns:
possible_s_prime_idxs: List of indexes of the possible next states (to be used to index env.states).
s_pmf: List of numbers ]0, 1] corresponding to the probability of transitioning to the states in
possible_s_prime_idxs.
"""
s_pmf = env.transition_table[env.states.index(s), a]
possible_s_prime_idxs = np.argwhere(s_pmf > 0.001).flatten()
return possible_s_prime_idxs, s_pmf[possible_s_prime_idxs]
def create_default_transition_table(states):
transition_table = np.zeros((len(states), 4, len(states)))
for i_s, s in enumerate(states):
for a in range(4):
s_prime = get_next_s(s, a, states)
transition_table[i_s, a, states.index(s_prime)] = 1
return transition_table
def create_stochastic_transition_table(states):
transition_table = np.zeros((len(states), 4, len(states)))
for i_s, s in enumerate(states):
for a in range(4):
for outcome_a in range(4):
if a == outcome_a:
transition_table[i_s, a, get_next_s_idx(s, outcome_a, states)] += 0.7
else:
transition_table[i_s, a, get_next_s_idx(s, outcome_a, states)] += 0.1
return transition_table
# Deterministic 3x3 gridworld
states = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
transition_table = create_default_transition_table(states)
reward_table = -np.ones((len(states), 4))
terminal_table = np.zeros(len(states))
terminal_table[-1] = 1
grid_world_3x3 = GridWorld(states=states,
initial_state=[0, 0],
transition_table=transition_table,
reward_table=reward_table,
terminal_table=terminal_table)
# Deterministic 2x2 gridworld
states = [[0, 0], [0, 1], [1, 0], [1, 1]]
transition_table = create_default_transition_table(states)
reward_table = -np.ones((len(states), 4))
terminal_table = np.zeros(len(states))
terminal_table[-1] = 1
grid_world_2x2 = GridWorld(states=states,
initial_state=[0, 0],
transition_table=transition_table,
reward_table=reward_table,
terminal_table=terminal_table)
# Random 3x3 gridworld
states = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
transition_table = create_stochastic_transition_table(states)
reward_table = -np.ones((len(states), 4))
terminal_table = np.zeros(len(states))
terminal_table[-1] = 1
grid_world_3x3_stoch = GridWorld(states=states,
initial_state=[0, 0],
transition_table=transition_table,
reward_table=reward_table,
terminal_table=terminal_table)
# Random line-gridworld
states = [[0, 0], [1, 0], [2, 0]]
transition_table = create_stochastic_transition_table(states)
reward_table = -np.ones((len(states), 4))
terminal_table = np.zeros(len(states))
terminal_table[-1] = 1
grid_world_line_stoch = GridWorld(states=states,
initial_state=[0, 0],
transition_table=transition_table,
reward_table=reward_table,
terminal_table=terminal_table)
def create_3x3_env_randomly_permuted_actions(stochastic=False):
"""
Creates a 3x3 grid-world where actions work differently for each state.
"""
states = [[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2], [2, 0], [2, 1], [2, 2]]
if stochastic:
transition_table = create_stochastic_transition_table(states)
else:
transition_table = create_default_transition_table(states)
# Randomly permute what actions do in each state
for s_i in range(len(states)):
transition_table[s_i] = np.random.permutation(transition_table[s_i])
reward_table = -np.ones((len(states), 4))
terminal_table = np.zeros(len(states))
terminal_table[-1] = 1
grid_world = GridWorld(states=states,
initial_state=[0, 0],
transition_table=transition_table,
reward_table=reward_table,
terminal_table=terminal_table)
return grid_world
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in order_packing/__init__.py
from order_packing import __version__ as version
setup(
name="order_packing",
version=version,
description="This module is to ease kitchen packing management",
author="Mubeen Ali",
author_email="[email protected]",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
from django.contrib import admin
from gc_apps.gis_basic_file.models import GISDataFile
from shared_dataverse_information.dataverse_info.admin import DataverseInfoAdmin
class GISDataFileAdmin(DataverseInfoAdmin):
"""
Use the ModelAdmin from DataverseInfoAdmin and extend it to include GISDataFile specific fields
"""
search_fields = DataverseInfoAdmin.search_fields + ['dv_file']
readonly_fields = DataverseInfoAdmin.readonly_fields + ['md5']#, 'dv_session_token']
list_display = DataverseInfoAdmin.list_display + ['md5', 'dv_file']
# fieldsets: Use DataverseInfoAdmin fieldsets and add a GeoConnect specific row
#
fieldsets = [fs for fs in DataverseInfoAdmin.fieldsets]
geoconnect_fieldset = ('GeoConnect specific', {'fields': ['registered_dataverse', 'dv_session_token', 'dv_file', 'gis_scratch_work_directory']})
fieldsets.insert(0, geoconnect_fieldset) # second to last in admin
# register the model
admin.site.register(GISDataFile, GISDataFileAdmin)
|
"""Constants for the securitas direct integration."""
# domain
DOMAIN = "securitas_direct"
# configuration properties
CONF_COUNTRY = "country"
CONF_LANG = "lang"
CONF_INSTALLATION = "installation"
# config flow
STEP_USER = "user"
STEP_REAUTH = "reauth_confirm"
MULTI_SEC_CONFIGS = "multiple_securitas_configs"
UNABLE_TO_CONNECT = "unable_to_connect"
SECURITAS_DIRECT_PLATFORMS = [
"alarm_control_panel",
]
|
"""merge bd438447496a and fd0f86cc5705
Revision ID: bcdf1134f0df
Revises: bd438447496a, fd0f86cc5705
Create Date: 2017-09-27 11:14:01.763062
"""
# revision identifiers, used by Alembic.
revision = 'bcdf1134f0df'
down_revision = ('bd438447496a', 'fd0f86cc5705')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
pass
def downgrade_data_broker():
pass
|
from Themes.Check import Check
from Themes.AbsolutePin import AbsolutePin
themes = [Check(), AbsolutePin()]
|
# occiput - Tomographic Inference
# Stefano Pedemonte
# Harvard University, Martinos Center for Biomedical Imaging
# Dec. 2013, Boston, MA
from __future__ import absolute_import, print_function
from . import Shapes
|
from . import providers
def socialaccount(request):
ctx = { 'providers': providers.registry.get_list() }
return dict(socialaccount=ctx)
|
# flake8: noqa
from catalyst.contrib.runners import *
|
# -*- coding: UTF-8 -*-
from django import forms
from apps.postitulos.models import ValidezNacional
from apps.registro.models import Establecimiento, Anexo, Jurisdiccion
from apps.postitulos.models import CohortePostitulo
ANIOS_COHORTE_CHOICES = [('', '-------')] + [(i, i) for i in range(CohortePostitulo.PRIMER_ANIO, CohortePostitulo.ULTIMO_ANIO)]
class ValidezNacionalFormFilters(forms.Form):
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.all().order_by('nombre'), required=False)
cue = forms.CharField(max_length=11, label='CUE', required=False)
carrera_postitulo = forms.CharField(max_length=40, label='Carrera', required=False)
postitulo_nacional = forms.CharField(max_length=40, label='Postítulo', required=False)
primera_cohorte = forms.ChoiceField(label='Primera Cohorte Autorizada', choices=ANIOS_COHORTE_CHOICES, required=False)
nro_infd = forms.CharField(max_length=40, label='Número INFD', required=False)
def __init__(self, *args, **kwargs):
self.jur = kwargs.pop('jurisdiccion')
super(ValidezNacionalFormFilters, self).__init__(*args, **kwargs)
if self.jur:
choices = [(j.id, j.nombre) for j in Jurisdiccion.objects.filter(pk=self.jur.id)]
self.fields['jurisdiccion'] = forms.ChoiceField(choices=choices, required=False)
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = ValidezNacional.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if self.jur:
from django.db.models import Q
q = q.filter(
(Q(tipo_unidad_educativa='Sede') & Q(unidad_educativa_id__in=[e.pk for e in Establecimiento.objects.filter(dependencia_funcional__jurisdiccion__id=self.jur.id)])) |
(Q(tipo_unidad_educativa='Anexo') & Q(unidad_educativa_id__in=[a.pk for a in Anexo.objects.filter(establecimiento__dependencia_funcional__jurisdiccion__id=self.jur.id)]))
)
else:
if filter_by('jurisdiccion'):
# Puede ser sede, anexo o jurisdicción, determinarlo
from django.db.models import Q
q = q.filter(
(Q(tipo_unidad_educativa='Sede') & Q(unidad_educativa_id__in=[e.pk for e in Establecimiento.objects.filter(dependencia_funcional__jurisdiccion__id=self.cleaned_data['jurisdiccion'].id)])) |
(Q(tipo_unidad_educativa='Anexo') & Q(unidad_educativa_id__in=[a.pk for a in Anexo.objects.filter(establecimiento__dependencia_funcional__jurisdiccion__id=self.cleaned_data['jurisdiccion'].id)]))
)
if filter_by('cue'):
q = q.filter(cue__icontains=self.cleaned_data['cue'])
if filter_by('carrera_postitulo'):
q = q.filter(carrera_postitulo__icontains=self.cleaned_data['carrera_postitulo'])
if filter_by('postitulo_nacional'):
q = q.filter(postitulo_nacional__icontains=self.cleaned_data['postitulo_nacional'])
if filter_by('primera_cohorte'):
q = q.filter(primera_cohorte=self.cleaned_data['primera_cohorte'])
if filter_by('nro_infd'):
q = q.filter(nro_infd__icontains=self.cleaned_data['nro_infd'])
return q
|
"""
Test Factory to make fake objects for testing
"""
import factory
from factory.fuzzy import FuzzyChoice, FuzzyFloat
from service.models import Supplier
class SupplierFactory(factory.Factory):
"""Creates fake suppliers"""
class Meta:
"""Meta class
"""
model = Supplier
id = factory.Sequence(lambda n: n)
name = factory.Faker("company")
phone = factory.Faker("phone_number")
address = factory.Faker("address")
available = FuzzyChoice(choices=[True, False])
product_list = FuzzyChoice(choices=[[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6]])
rating = FuzzyFloat(0, 5, 2)
|
from helper import greeting
def main():
greeting("Doing something really simply by using helper.py to print this message.")
if __name__=='__main__':
main()
|
import sys
import itertools
#print "This is the name of the script: ", sys.argv[0]
#print "Number of arguments: ", len(sys.argv)
#print "The arguments are: " , str(sys.argv)
use_circle_of_fifths = False
includeArpeggios = False
includeBackupArpeggiosOnly = True
UNIQUE_NOTES_MODE = True
IncludeAllOutput = False
colours = ("-fill \"rgb(255,155,200)\"",
"-fill \"rgb(255,200,155)\"",
"-fill \"rgb(200,255,200)\"",
"-fill \"rgb(200,200,255)\"",
"-fill \"rgb(155,200,255)\"",
"-fill \"rgb(200,155,255)\"",
"-fill \"rgb(155,255,200)\"")
##### Semitone Math:
semitones = [
"A",
"A#",
"B",
"C",
"C#",
"D",
"D#",
"E",
"F",
"F#",
"G",
"G#"
]
# Notes that are the same as one another:
equivilents = {
'F#': 'Gb',
'Gb': 'F#',
'G#': 'Ab',
'Ab': 'G#',
'A#': 'Bb',
'Bb': 'A#',
'C#': 'Db',
'Db': 'C#',
'D#': 'Eb',
'Eb': 'D#'
}
semitone_lookup = {}
i = 0
for tone in semitones:
semitone_lookup[tone] = i
if tone in equivilents:
semitone_lookup[equivilents[tone]] = i
i += 1
def create_circle_of_fifths(start_point):
o = []
end = start_point
first = 1
while (end != start_point or first):
first = 0
low = len(o) < 6
add_note = semitones[start_point]
if low and add_note in equivilents:
add_note = equivilents[add_note]
o.append(add_note)
start_point = (start_point + 7) % 12
return o
class Note:
def __init__(self, musical_note, press, row_index, button_index):
self.note = musical_note
spl = self.NoteSplit(musical_note)
# we need to resolve any note into a basic version of itself that will share language with chords:
# Probably we should juse use semitone number
self.semitone = semitone_lookup[spl[0]]
#self.basic = semitones[self.semitone]
self.basic = spl[0]
self.octave = int(spl[1])
self.isPress = press
self.all_basics = [self.basic]
if self.basic in equivilents:
equiv_note = equivilents[self.basic]
self.all_basics.append(equiv_note)
# Generate an absolute value that correspondes to this musical tone:
semitone_score = semitone_lookup[spl[0]]
if semitone_score < semitone_lookup["C"]:
semitone_score += 12
semitone_score += int(spl[1]) * 12
self.score = semitone_score
self.row_index = int(row_index)
self.button_index = int(button_index) / 2
self.unique = row_index * 100 + button_index
def Key(self):
return self.score
def __str__(self):
return self.note + " " + str(self.unique)
def __repr__(self):
if self.isPress:
return self.note + "+"
else:
return self.note + "-"
def NoteMatch(self,other):
return self.score == other.score and self.isPress == other.isPress
def __eq__(self, other):
# use a semitone score comparison:
return self.unique == other.unique
def __lt__(self, other):
# Use the semitone score when sorting, too
return self.score < other.score
def __hash__(self):
return self.unique
def NoteSplit(self, note):
# This splits A5 into (A, 5)
note_name = note.rstrip("0123456789")
octave = note[len(note_name):]
return (note_name, octave)
class Layout:
def __init__(self):
self.rows = []
self.raw_rows = []
self.notes = []
self.chords = []
self.row_count = 0
def __str__(self):
return str("Diatonic layout with %d rows and %d buttons" % (len(self.raw_rows), len(self.notes) / 2) )
def __repr__(self):
return str(self.notes)
def CompareNote(note1, note2):
if note1[0] == note2[0]:
return True
return False
def CompareNoteSameDirection(note1, note2):
if CompareNote(note1,note2):
if note1[4] == note2[4]:
return True
return False
def AddRow(self, r):
# Row list is expected to be a stream of PUSH PULL notes from low to high
self.raw_rows.append(r)
# Generate a list of notes.
# This is a list of tuples in the format (N#, N, #, isPush)
# Start on a push:
push = True
for n in range(len(r)):
self.notes.append( Note(r[n], push, self.row_count, n) )
# Maybe I should use a dictionary?
#self.notes.append( (r[n], t[0], int(t[1]), semitone_score, push, (self.row_count, n) ) )
push = not push
self.row_count += 1
self.notes.sort()
def BuildChords(self, chord_list):
# This method will build a list of all the chords
# that the instrument includes the notes for, as well as include each one of those note structures in
# a list
# I need to handle notes that appear on multiple buttons at once
dddebug = False
for name, makeup, tone_makeup in chord_list:
# For each Chord Name, examine the CSV note makeup. The prefered inversion is assumed from the order of the notes
# in the layout.
l = tone_makeup
# We will generate a dictionary of all the notes in the chord, keyed to the basic note that this is/
# We will also generate a list of all the notes that are involved just in a pile, as this is helpful for
# keeping track of buttons that aren't part of a full preferred inversion (Eg you have 3 Cs but 2 Gs)
notes_in_chord = {}
loose_notes = {"PUSH": [], "PULL": [], "ARP": []}
for n in self.notes:
tone = str(n.semitone)
if tone in tone_makeup:
# if the basic note is in the chord
# Add this note to the list of this note:
if tone in notes_in_chord:
notes_in_chord[tone].append(n)
else:
o = []
o.append(n)
notes_in_chord[tone] = o
loose_notes["ARP"].append(n)
if n.isPress:
loose_notes["PUSH"].append(n)
else:
loose_notes["PULL"].append(n)
# notes_in_chord now contains all notes that we have, push or pull, that are in this chord
# Let's built every single combination of this chord and score them:
fixed_structure = []
for a in l:
if a in notes_in_chord:
fixed_structure.append(notes_in_chord[a])
else:
print fixed_structure
print notes_in_chord
print a
exit()
all_permutes = list(itertools.product(*fixed_structure))
scored_permutes = []
for perm in all_permutes:
scored_permutes.append( [0, "unknown", perm] )
#print scored_permutes
# All permutes now contains every permutation of the chord. Now, let's score all of these results and pick the best one
# After that we shall remove these notes as options, and then find the next best chord
def score_chords(chords):
for schord in chords:
score = 0.0
chord = schord[2]
root_note = chord[0]
# Step one: the lower the root note, the higher the score:
base = root_note.score
score += 100 - base
z = "PULL"
average_location = [0.0, 0.0]
for n in chord:
nsc = n.score
if nsc < base:
score -= 50
dist = abs(base - nsc)
# the closer the note is to the root note, the better:
score -= dist
# if the direction is mixed, remove a lot of points
if z == "PULL" and n.isPress:
z = "PUSH"
if n.isPress != root_note.isPress and z != "ARP":
score -= 1000
z = "ARP"
average_location[0] += n.button_index
average_location[1] += n.row_index
# Use a very small amount of distance calculation to prefer certain chords over others:
distance = pow(pow((root_note.button_index - average_location[0]),2) + pow((root_note.row_index - average_location[1]),2),0.5) / 100
score -= distance
schord[1] = z
schord[0] = score
#print schord
score_chords(scored_permutes)
scored_permutes.sort()#(key = lambda x: x[0])
if not includeArpeggios:
replacement = []
for x in range(len(scored_permutes)):
if not scored_permutes[x][1] == "ARP":
replacement.append(scored_permutes[x])
scored_permutes = replacement
#print name
#for test in scored_permutes:
# print test
#print "Beginning best chord selection for %s." % (name)
dddebug = False
chord_results = []
loop_protection = 0
while len(scored_permutes) > 0 and loop_protection < 50:
best_chord = scored_permutes[-1]
chord_results.append(best_chord)
if dddebug:
print "Best chord: " + str(best_chord)
print "There are %d records" % (len(scored_permutes))
next_list = []
best_score = best_chord[0]
best_typ = best_chord[1]
best_notes = best_chord[2]
# we know the best chord
# We do have duplicate notes, maybe we shouldn't include them?
for note in best_notes:
#for key in loose_notes.keys():
if note in loose_notes[best_typ]:
loose_notes[best_typ].remove(note)
for a, t, prev_result in scored_permutes:
exclude = 0
# We scan through all our permutations and remove any that include this button
# this however specifically represents a unique button, rather than a given pitch, eg C#5
# However, If this isn't an arpeggio chord, we should not remove appreggio chords:
for note in prev_result:
if UNIQUE_NOTES_MODE:
for other_note in best_notes:
if note.NoteMatch(other_note):
exclude += 1
else:
if note in best_notes:
exclude += 1
if best_typ != "ARP" and t == "ARP":
exclude = 0
if exclude == 0:
next_list.append([a,t,prev_result])
if dddebug:
print "There are now %d records" % (len(next_list))
#score_chords(next_list)
next_list.sort()
scored_permutes = next_list
loop_protection += 1
#for test in chord_results:
#print test
#print loose_notes
self.chords.append( ( (name, makeup), chord_results, loose_notes) )
return False
def FindChordNotes(self, chord):
return False
def DrawLayout(self):
# Calculates the height of our output:
maxlen = 0
for row in self.raw_rows:
maxlen = max(maxlen, len(row) / 2)
self.width = maxlen * (distance_x) + 2 * x_border
# Height should be, the borders on top, the total distance betwen the top of the top row and the top of the bottom row, and then
# a single width of a circle
self.height = y_border * 2 + (self.row_count - 1) * distance_y + circle_size
half_circle = circle_size * 0.5
# The starting height will be the border plus half of a circle
row_base_height = y_border + half_circle
# Calculate center position for text:
center_x = self.width / 2
center_y = self.height / 2
# Set up the layout drawing:
layout_output = "convert -size %sx%s xc:transparent -fill transparent -strokewidth 2 -stroke black" % (self.width, self.height)
text_extra = " -fill black -font Arial -stroke transparent -pointsize 36 -gravity center"
text_small = " -pointsize 24"
self.arc_lookup = {}
# for each row, we will draw all the buttons and lines:
buttons_in_row = {}
for r in range(len(self.raw_rows)):
row = self.raw_rows[r]
buttons = len(row) / 2
buttons_in_row[r] = buttons
for j in range(buttons):
row_height = r * distance_y + row_base_height
circle_other = row_height - half_circle
line_low = row_height + half_circle
q = float(j) - float( buttons - 1) / 2
x = self.width/2 + q * distance_x
layout_output += " -draw \"circle %d,%d %d,%d\"" % (x ,row_height, x,circle_other)
layout_output += " -draw \"line %d,%d %d,%d\"" % (x, circle_other, x, line_low)
# Then draw the notes afterwards:
for note in self.notes:
basic = note.basic
octave = note.octave
push = note.isPress
location_row = note.row_index
location_button_index = note.button_index
row_height = location_row * distance_y + row_base_height
q = float(location_button_index) - float( buttons_in_row[location_row] - 1) / 2
x = self.width/2 + q * distance_x
text_x = x - center_x
arc_start = 90
arc_end = 270
if push:
basic_text_x = text_x - circle_size/4 + font_shift
octave_text_x = text_x - octave_x + font_shift
else:
basic_text_x = text_x + circle_size/4 + font_shift
octave_text_x = text_x + octave_x + font_shift
arc_start = 270
arc_end = 90
text_extra += " -draw \"text %d,%d '%s'\"" % (basic_text_x,row_height - center_y, basic)
text_small += " -draw \"text %d,%d '%s'\"" % (octave_text_x,row_height - center_y+octave_shift,octave)
tup = (x - half_circle + font_label_width,row_height - half_circle, x + half_circle + font_label_width, row_height + half_circle, arc_start, arc_end)
self.arc_lookup[note] = tup
layout_output += text_extra
layout_output += text_small
layout_output += " layout.png"
#print self.arc_lookup
#for a,b in self.arc_lookup.items():
# print a, b
return [self.width, self.height, layout_output]
def DrawChords(self):
if not self.chords:
return
output_base = "convert -size %dx%d xc:transparent " % (self.width + font_label_width,self.height)
output_base += " -stroke black -strokewidth 3 -draw \"line %d,%d %d,%d\" -draw \"line %d,%d %d,%d\" -draw \"line %d,%d %d,%d\"" % ( font_label_width, 0, font_label_width, height, 0, height, font_label_width + width, height, 0, 0, font_label_width + width, 0)
chord_draw = []
chord_files = []
for chord in self.chords:
name = chord[0][0]
makeup = chord[0][1]
# We may need to create up to 3 files depending on how we want this information to be displayed
# We have Push, Pull and Arpeggio
drawings = {"PUSH": "", "PULL": "", "ARP": ""}
colour_indexes = {"PUSH": 0, "PULL": 0, "ARP": 0}
has_non_arpeggio_solutions = False
solutions = chord[1]
for solution in solutions:
snew = ""
score = solution[0]
typ = solution[1]
notes = solution[2]
if typ != "ARP":
has_non_arpeggio_solutions = True
if typ != "ARP" or includeArpeggios or (not has_non_arpeggio_solutions and not includeBackupArpeggiosOnly):
c_index = colour_indexes[typ]
col = colours[c_index]
if (score < 0 and typ != "ARP") or (typ == "ARP" and has_non_arpeggio_solutions) or (typ == "ARP" and not has_non_arpeggio_solutions and score < -1000):
col = "-fill \"rgb(200,200,200)\""
else:
colour_indexes[typ] = colour_indexes[typ] + 1
#if name == "F +7":
#print typ, score, has_non_arpeggio_solutions, col, notes
snew = " %s" % col
for note in notes:
arc = self.arc_lookup[note]
snew += " -draw \"arc %d,%d %d,%d %d,%d\"" % arc
drawings[typ] += snew
for key, data in drawings.items():
snew = ""
# Do we have any output for this chord:
#print key, data
#print chord[2]
if data != "" or IncludeAllOutput:
# Grey boxes for for loose notes:
snew = " -fill \"rgb(200,200,200)\""
for loose in chord[2][key]:
arc = self.arc_lookup[loose]
snew += " -draw \"arc %d,%d %d,%d %d,%d\"" % arc
drawings[key] += snew
for key, data in drawings.items():
if data != "":
chord_output = output_base
# Write down what chord this is
chord_output += " -stroke transparent -fill black -font Arial -pointsize 48 -gravity center -draw \"text %d,%d '%s'\" -draw \"text %d,%d '%s'\"" % ( - self.width / 2, -50, name + " " + key, -self.width / 2, 50, ",".join(makeup))
chord_output += " -gravity NorthWest"
chord_output += data
chord_output += " layout.png -geometry +%d+0 -composite -flatten \"%s.png\"" % (font_label_width, name + " " + key)
chord_files.append(name + " " + key)
drawings[key] = (chord_output)
for data in drawings.values():
if data != "":
chord_draw.append(data)
return (chord_draw, chord_files)
# Define the layout: We should be able to take this as a command line maybe?
row1 = ["F5","Eb5","D4","F#4","G4","A4","B4","C5","D5","E5",
"G5","F#5","B5","A5","D6","C6","G6","E6","B6","F#6"]
row2 = ["G#4","Bb4","A3","C#4","D4","E4","F#4","G4","A4","B4",
"D5","C#5","F#5","E5","A5","G5","D6","B5","F#6","C#6","A6","E6"]
row3 = []
### BC layout:
#row1 = ["E3","A3", "G3","B3", "C4","D4", "E4","F4", "G4","A4",
# "C5","B4", "E5","D5", "G5","F5", "C6","A5", "E6","B5"]
#row2 = ["D#3","G#3", "F#3","A#3", "B3","C#4", "D#4","E4", "F#4","G#4", "B4","A#4",
# "D#5","C#5", "F#5","E5", "B5","G#5", "D#6","A#5", "F#6", "C#6"]
lay = Layout()
lay.AddRow(row1)
lay.AddRow(row2)
if use_circle_of_fifths:
anchor_note = "G#"
circle_start_point = (semitones.index(anchor_note.upper())) % 12
output_order = (create_circle_of_fifths(circle_start_point))
else:
output_order = []
for j in range(12):
output_order.append(semitones[j])
output_order = ["G","A","B","C","D","E","F#"]
chords = []
def create_chords(root_notes, chord_patterns):
o = []
for root_note in root_notes:
for pattern in chord_patterns:
name = pattern[0]
offsets = pattern[1].split(",")
chord = []
chord_tones = []
semitone_number = semitone_lookup[root_note]
for offset in offsets:
num = (semitone_number + int(offset)) % 12
chord_tones.append(str(num))
chord.append(semitones[num])
root_name = root_note;
if root_note in equivilents:
root_name += "~" + equivilents[root_name]
o.append(( "%s %s" % (root_name, name) , tuple(chord), tuple(chord_tones) ) )
return o
all_chords = create_chords(output_order,(
# ("Major", "0,4,7"),
# ("+7", "0,4,7,10"),
# ("+Maj7", "0,4,7,11"),
# ("Minor", "0,3,7"),
# ("-7", "0,3,7,10"),
# ("-Maj7", "0,3,7,11"),
("I V", "0,7"),
# ("I +III", "0,4"),
# ("I -III", "0,3"),
# ("-III V", "4,7"),
# ("+III V", "3,7"),
))
# Now let's generate information for the layout
lay.BuildChords(all_chords)
circle_size = 120
distance_x = 150
distance_y = 120
x_border = 50
y_border = 50
font_shift = 2
octave_shift = 40
octave_x = 15
font_label_width = 500
chords_per_page = 9
layout_result = lay.DrawLayout()
width = layout_result[0]
height = layout_result[1]
layout_output = layout_result[2]
print layout_output
chord_draw_data = lay.DrawChords()
chord_draws = chord_draw_data[0]
chord_files = chord_draw_data[1]
for draw_string in chord_draws:
print draw_string
# Generate pages:
succeeded = 0
remaining = chords_per_page
page_number = 1
page = ""
while succeeded == 0:
remaining -= 1
if chord_files:
extra = chord_files[0]
del chord_files[0]
if remaining == chords_per_page - 1 and extra:
page = "convert \"%s.png\"" % (extra)
else:
page += " \"%s.png\" -append" % (extra)
else:
extra = "";
if remaining == 0:
if chord_files:
page += " page%d.png" % (page_number)
page_number += 1
print page
page = ""
remaining = chords_per_page
else:
succeeded = 1
if page:
page += " page%d.png" % (page_number)
print page
#print layout_output
#print push_arc_lookup |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import default, format
config = Script.get_config()
pid_dir = config['configurations']['storm-env']['storm_pid_dir']
pid_nimbus = format("{pid_dir}/nimbus.pid")
pid_supervisor = format("{pid_dir}/supervisor.pid")
pid_drpc = format("{pid_dir}/drpc.pid")
pid_ui = format("{pid_dir}/ui.pid")
pid_logviewer = format("{pid_dir}/logviewer.pid")
pid_rest_api = format("{pid_dir}/restapi.pid")
pid_files = {"logviewer":pid_logviewer,
"ui": pid_ui,
"nimbus": pid_nimbus,
"supervisor": pid_supervisor,
"drpc": pid_drpc,
"rest_api": pid_rest_api}
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path()
tmp_dir = Script.get_tmp_dir()
conf_dir = "/etc/storm/conf"
storm_user = config['configurations']['storm-env']['storm_user']
storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
|
import sys
def print_help():
print('Syntax: py charcases.py <path/to/input/file> <encoding> <path/to/output/file>')
def gen_case_permutations_matrix(n):
matrix_str = [bin(x)[2:].rjust(n, '0') for x in range(2 ** n)]
matrix = []
for line_str in matrix_str:
permutation = []
for digit in line_str:
if digit == '0':
permutation.append(False)
else:
permutation.append(True)
matrix.append(permutation)
return matrix
def gen_case_permutations(s):
result = []
matrix = gen_case_permutations_matrix(len(s))
for i in range(len(matrix)):
permutation = ''
for char_pos in range(len(s)):
if matrix[i][char_pos]:
permutation += s[char_pos].upper()
else:
permutation += s[char_pos].lower()
result.append(permutation)
return result
def apply_case(s, case_array):
if len(s) != len(case_array):
raise ValueError('apply_case(s, case_array): Length of s must equal length of case_array')
else:
result = ''
for i in range(len(s)):
if case_array[i]:
result += s[i].upper()
else:
result += s[i].lower()
return result
if __name__ == '__main__':
if len(sys.argv) == 1:
print_help()
else:
encoding = 'utf-8'
if len(sys.argv) > 2:
encoding = sys.argv[2]
lines = []
file = open(sys.argv[1], 'r', encoding=encoding)
for line in file:
line = line.strip()
lines.append(line)
file.close()
outs = []
for line in lines:
for case in gen_case_permutations(line):
outs.append(case)
if len(sys.argv) <= 2:
file = open(sys.argv[1], 'w', encoding=encoding)
else:
file = open(sys.argv[3], 'w', encoding=encoding)
for line in outs:
file.write(line)
file.write('\n')
file.close()
|
import gi
gi.require_version('Gst', '1.0')
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from lxml import etree
class XML(object):
@classmethod
def Text(cls, tagName, text):
node = etree.Element(tagName)
node.text = unicode(text)
return node
@classmethod
def CDATA(cls, tagName, text):
CDATA = getattr(etree, 'CDATA', None)
if CDATA is None:
return cls.Text(tagName, text)
node = etree.Element(tagName)
node.text = CDATA(unicode(text))
return node
@classmethod
def Element(cls, tagName, *children, **attributes):
node = etree.Element(tagName,
dict((k, unicode(v)) for k, v in attributes.items()))
node.extend(children)
return node
@classmethod
def toString(cls, elt):
return etree.tostring(elt, encoding='UTF-8')
@classmethod
def fromString(cls, strng):
return etree.fromstring(strng)
|
import csv
import psycopg2
def seed_table(conn):
# evidence: id, link
with open('dbsetup/training_data.csv', 'r') as f:
reader = csv.reader(f)
next(f) # skipping the header row
# order: id 6, link 13
data = []
for row in reader:
# Converts strings like this:
# ['a', 'b', 'c']
# ..into a Python list of strings
links = row[13][2:-2].split("', '")
for link in links:
data.append([row[6], link])
sql = """
INSERT INTO evidences
(incident_id, link)
VALUES %s
"""
psycopg2.extras.execute_values(
conn, sql, data, template=None, page_size=10000) |
input_id = input('id : ')
id = 'joshua'
if input_id == id:
print('Welcome')
# if input_id != id:
# print('Who?')
else:
print('Who?') |
# -*- coding: utf-8 -*-
from flask import Markup
from flask.ext.wtf import Form
from wtforms import (ValidationError, HiddenField, BooleanField, TextField,
PasswordField, SubmitField)
from wtforms.validators import Required, Length, EqualTo, Email
from flask.ext.wtf.html5 import EmailField
from ..utils import (PASSWORD_LEN_MIN, PASSWORD_LEN_MAX,
USERNAME_LEN_MIN, USERNAME_LEN_MAX)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Negamax import Negamax
class HumanPlayer:
def __init__(self, sign):
self.sign = sign
def get_move(self, board, opponent):
while True:
move = raw_input()
if self.__is_valid(move, board):
break
print 'Please give a valid column number'
return int(move)
@staticmethod
def __is_valid(move, board):
try:
move = int(move)
except ValueError:
return False
return 0 < move <= board.width
class AIPlayer:
def __init__(self, sign, depth):
self.sign = sign
self.depth = depth
def get_move(self, board, opponent):
n = Negamax(board, self.depth)
move = n.calculate_move(board, self.sign, opponent.sign)
return move
|
import copy
from collections import OrderedDict
from .run import Run
from .tables import Tables
class ParameterSet:
def __init__(self, ps_id, simulator, params):
"""
Note
---
Do not call the constructor directory.
Instead, use `simulator.find_or_create_parameter_set` method to create a ParameterSet.
"""
self._id = ps_id
self._sim_id = simulator.id()
assert isinstance(params, dict)
self._params = params
self._run_ids = []
def id(self):
"""
Returns
---
ps_id : int
"""
return self._id
def v(self):
"""
returns the parameter dictionary.
Returned value is a copied value, so you may modify the content.
Returns
---
v : dictionary
"""
return copy.deepcopy(self._params)
def simulator(self):
"""
Returns
---
sim : Simulator
"""
return Tables.get().sim_table[self._sim_id]
def create_runs_upto(self, target_num):
"""
creates Runs up to the specified number.
If the PS has no Run yet and target_num=5, 5 Runs are newly created.
If the PS has 5 or more Runs already, no runs are created.
List of Runs of size `target_num` is returned.
Parameters
---
target_num : int
Returns
---
runs : list of runs
"""
current = len(self._run_ids)
while target_num > current:
t = Tables.get()
next_id = len(t.tasks_table)
run = Run(next_id, self, current)
self._run_ids.append(run.id())
t.tasks_table.append(run)
current += 1
return self.runs()[:target_num]
def runs(self):
"""
Returns
---
runs : list of runs
"""
return [Tables.get().tasks_table[rid] for rid in self._run_ids]
def finished_runs(self):
"""
returns list of runs that are completed (irrespective of the return code).
Returns
---
runs : list of runs
"""
return [r for r in self.runs() if r.is_finished()]
def is_finished(self):
"""
returns True if all its Runs are completed.
Returns
---
flag : boolean
"""
return all([r.is_finished() for r in self.runs()])
def outputs(self):
"""
returns the list of outputs of Runs that are completed with return code zero.
Returns
---
out : list of json-like object
"""
return [r.output() for r in self.finished_runs() if r.rc() == 0]
def to_dict(self):
"""
serialize to a dictionary
Returns
---
serialized : dictionary
"""
o = OrderedDict()
o["id"] = self._id
o["sim_id"] = self._sim_id
o["params"] = self._params
o["run_ids"] = self._run_ids
return o
@classmethod
def all(cls):
"""
returns all ParameterSets
Returns
---
ps_list : list of ParameterSet
"""
return copy.copy(Tables.get().ps_table) # shallow copy
@classmethod
def find(cls, ps_id):
"""
find a ParameterSet
Parameters
---
ps_id : int
Returns
---
ps : ParameterSet or None
"""
return Tables.get().ps_table[ps_id]
def dumps(self):
"""
serialize to a JSON string
Returns
---
str : string
"""
runs_str = ",\n".join([" " + r.dumps() for r in self.runs()])
return "{\"id\": %d, \"params\": %s, \"runs\": [\n%s\n]}" % (self._id, str(self._params), runs_str)
|
# ----------------------------------------------------
# Test Bench for Chess AI v1.0.2
# Created By: Jonathan Zia
# Last Edited: Thursday, May 10 2018
# Georgia Institute of Technology
# ----------------------------------------------------
import tensorflow as tf
import numpy as np
import pieces as p
import random as r
import state as s
import time as t
import copy as c
import math
import os
# This program compares the performance of the specified trained model versus
# a second model for validation purposes. The program calculates wins/losses
# of the trained model versus a model that follows a random policy.
# ----------------------------------------------------
# User-Defined Constants
# ----------------------------------------------------
# Value Function Approximator Training
NUM_TESTING = 100 # Number of testing steps
HIDDEN_UNITS = 100 # Number of hidden units
BATCH_SIZE = 5 # Batch size
# Simulation Parameters
MAX_MOVES = 100 # Maximum number of moves
EPSILON = 0.0 # Defining epsilon for e-greedy policy (0 for testing -> greedy policy)
# Load File
LOAD_FILE = True # Load trained model from saved checkpoint (True for testing)
VISUALIZE = True # Select True to visualize games and False to suppress game output
PRINT = True # Select True to print moves as text and False to suppress printing
ALGEBRAIC = True # Specify long algebraic notation (True) or descriptive text (False)
# ----------------------------------------------------
# Data Paths
# ----------------------------------------------------
# Specify filenames
# Root directory:
dir_name = "D:\\"
with tf.name_scope("Model_Data"): # Model save/load paths
load_path = os.path.join(dir_name, "checkpoints/model") # Model load path
with tf.name_scope("Filewriter_Data"): # Filewriter save path
filewriter_path = os.path.join(dir_name, "output")
with tf.name_scope("Output_Data"): # Output data filenames (.txt)
# These .txt files will contain loss data for Matlab analysis
outcome_file = os.path.join(dir_name, "outcomes.txt")
# ----------------------------------------------------
# User-Defined Methods
# ----------------------------------------------------
def initialize_board(random=False, keep_prob=1.0):
"""
Initialize Game Board
Returns: Game board state parameters
"""
# Initialize board pieces
pieces = s.initialize_pieces(random=random,keep_prob=keep_prob)
# Initialize state space
board_state = s.board_state(pieces)
# Initialize current player:
if random:
if r.randint(0,1) == 1:
player = 'white'
else:
player = 'black'
else:
player = 'white'
# Initialize move counter:
move = 0
# Return values
return pieces, board_state, player, move
def visualize_board(pieces, player, move):
"""
Visualize Game Board
Returns: Void
"""
print("\nCurrent Board at Move " + str(move) + " for Player " + player)
print(s.visualize_state(pieces))
def move_piece(piece,move_index,player,pieces,switch_player=False,print_move=False,algebraic=True):
"""
Perform specified move
Returns: Void
"""
if player == 'white':
pieces[piece].move(move_index,pieces,print_move=print_move,algebraic=algebraic)
else:
pieces[piece+16].move(move_index,pieces,print_move=print_move,algebraic=algebraic)
if switch_player:
if player == 'white':
player = 'black'
else:
player = 'white'
return player
def generate_outcome(batch_size,max_moves,epsilon,visualize,print_move,algebraic):
"""
Generating feature and target batches
Returns: (1) feature batch, (2) label batch, (3) visualize board?, (4) print move?, (5) print algebraic notation?
"""
# Generates training data based on batches of full-depth Monte-Carlo simulations
# performing epsilon-greedy policy evalutaion.
# Initialize placeholder for outcome batch
outcome_batch = []
# Loop through batch steps
for batch_step in range(0,batch_size):
# Print beginning of game notification for visualization
if visualize or print_move:
print("\n----------BEGIN GAME----------")
# ----------------------------------------------------
# Initialize Board State
# ----------------------------------------------------
# Create placeholders for board states and return for each state
all_states = []
all_returns = []
# Generating board parameters
pieces, initial_state, player, move = initialize_board(random=False, keep_prob=1.0)
point_diff_0 = s.points(pieces)
# ----------------------------------------------------
# Monte Carlo Simulations
# ----------------------------------------------------
# Run Monte Carlo Simulation until terminal event(s):
# Terminal events: Kings.is_active == False or move_counter > MAX_MOVES
while pieces[4].is_active and pieces[28].is_active and move < max_moves:
# Obtain board state
if move == 0:
board_state = initial_state
else:
board_state = s.board_state(pieces)
# Visualize board state
if visualize:
visualize_board(pieces,player,move)
# Obtain current point differential
net_diff = s.points(pieces) - point_diff_0
point_diff_0 = s.points(pieces)
# Append initial board state to all_states
all_states.append(board_state)
# Add net_diff to all existing returns
for i in range(0,len(all_returns)):
all_returns[i] += net_diff
# Append 0 to end of all_returns representing return for current state
all_returns.append(0)
# Obtain action space
action_space = s.action_space(pieces,player)
# ----------------------------------------------------
# Value Function Approximation
# ----------------------------------------------------
# For each action in the action space, obtain subsequent board space
# and calculate estimated return with the partially-trained approximator
# Create placeholder for expected return values
return_array = np.zeros((16,56))
# For each possible move...
for i in range(0,16):
for j in range(0,56):
# If the move is legal...
if action_space[i,j] == 1:
# Perform move and obtain temporary board state
temp_pieces = c.deepcopy(pieces) # Reset temporary pieces variable
move_piece(i,j,player,temp_pieces) # Perform temporary move
temp_board_state = s.board_state(temp_pieces) # Obtain temporary state
# With temporary state, calculate expected return
expected_return = sess.run(predictions, feed_dict={inputs: np.reshape(temp_board_state,(1,768))})
# Write estimated return to return_array
return_array[i,j] = expected_return
# ----------------------------------------------------
# Policy
# ----------------------------------------------------
# Player white chooses greedy policy, player black chooses random policy
# For player black, choose a random action
if player == 'black':
while True:
# If the action is valid...
piece_index = r.randint(0,15)
move_index = r.randint(0,55)
if return_array[piece_index,move_index] != 0:
# Perform move and update player
player = move_piece(piece_index,move_index,player,pieces,switch_player=True,print_move=print_move,algebraic=algebraic)
break
# Else, act greedy w.r.t. expected return
else:
# Identify indices of maximum return (white) or minimum return (black)
move_choice = np.nonzero(return_array.max() == return_array)
piece_index = move_choice[0][0]
move_index = move_choice[1][0]
# Perform move and update player
player = move_piece(piece_index,move_index,player,pieces,switch_player=True,print_move=print_move,algebraic=algebraic)
# Increment move counter
move += 1
# Print end of game notification for visualization
if visualize or print_move:
print("----------END OF GAME----------")
# Append outcome
# If player white won the game...
if all_returns[0] > 0:
outcome_batch.append(1) # Return 1
# Else, for a draw...
elif all_returns[0] == 0:
outcome_batch.append(0) # Return 0
# Else, if player black won the game...
else:
outcome_batch.append(-1) # Return -1
# Return outcome batch
outcome_batch = np.array(outcome_batch)
return outcome_batch
# ----------------------------------------------------
# Importing Session Parameters
# ----------------------------------------------------
# Create placeholders for inputs and target values
# Input dimensions: 8 x 8 x 12
# Target dimensions: 1 x 1
inputs = tf.placeholder(tf.float32,[None,768],name='Inputs')
targets = tf.placeholder(tf.float32,shape=(None,1),name='Targets')
# ----------------------------------------------------
# Implementing Feedforward NN
# ----------------------------------------------------
# First fully-connected layer
hidden1 = tf.contrib.layers.fully_connected(inputs,num_outputs=HIDDEN_UNITS)
# Second fully-connected layer
hidden2 = tf.contrib.layers.fully_connected(hidden1,num_outputs=HIDDEN_UNITS)
# Output layer
predictions = tf.contrib.layers.fully_connected(hidden2,num_outputs=1,activation_fn=None)
# ----------------------------------------------------
# Run Session
# ----------------------------------------------------
saver = tf.train.Saver() # Instantiate Saver class
outcomes = [] # Initialize placeholder for outcomes
with tf.Session() as sess:
# Create Tensorboard graph
#writer = tf.summary.FileWriter(filewriter_path, sess.graph)
#merged = tf.summary.merge_all()
# Restore saved session
saver.restore(sess, load_path)
# Obtain start time
start_time = t.time()
# For each training step, generate a random board
for step in range(0,NUM_TESTING):
# Run game and determine outcome
outcome = generate_outcome(batch_size=BATCH_SIZE,max_moves=MAX_MOVES,epsilon=EPSILON,visualize=VISUALIZE,print_move=PRINT,algebraic=ALGEBRAIC)
outcomes.append(outcome)
# Conditional statement for calculating time remaining and percent completion
if step % 1 == 0:
# Report percent completion
p_completion = 100*step/NUM_TESTING
print("\nPercent Completion: %.3f%%" % p_completion)
# Print time remaining
avg_elapsed_time = (t.time() - start_time)/(step+1)
sec_remaining = avg_elapsed_time*(NUM_TESTING-step)
min_remaining = round(sec_remaining/60)
print("Time Remaining: %d minutes" % min_remaining)
# Print mean outcome
print(outcome)
print("Mean Outcome: %.3f" % np.mean(outcomes))
# Write outcomes to file
outcomes = np.array(outcomes)
with open(outcome_file, 'a') as file_object:
np.savetxt(file_object, outcomes)
# Close the writer
# writer.close() |
from lxml import html,etree
from urllib.parse import urljoin
import requests
import os, pyodbc
import pandas as pd
import csv
url='https://www.yelp.com/search?find_desc=Restaurants&find_loc=Cleveland%2C+OH'
server='HASNAIN2020'
database='YELP_RESTAURANTS'
table='REVIEWS'
def main():
#load the yelp main page
page=requests.get(url)
tree=html.fromstring(page.content)
html_etree=etree.ElementTree(tree)
#getting the names and links of all restaurants
print("getting a list of all the restaurants\n")
links=html_etree.xpath(".//span[@class='lemon--span__09f24__3997G text__09f24__2tZKC text-color--black-regular__09f24__1QxyO text-align--left__09f24__3Drs0 text-weight--bold__09f24__WGVdT text-size--inherit__09f24__2rwpp']/a")
#putting them in a data frame and then later to a csv file
Rest_df=pd.DataFrame(columns=['Name','link'])
for link in links:
Rest_df=Rest_df.append({'Name':link.attrib['name'], 'link':'http://yelp.com/'+link.attrib['href']},ignore_index=True)
print(Rest_df)
Rest_df.to_csv('links.csv')
#creating a table and connecting to the server
cursor=connect_to_sql_server()
#fetching and putting the reviews for each restaurant
df=pd.DataFrame()
for i in range(1,len(Rest_df)):
print("Extracting Reviews for :"+Rest_df['Name'].values[i])
makeDirectory(Rest_df['Name'].values[i])
df=df.append(extract_info(cursor,Rest_df['Name'].values[i],Rest_df['link'].values[i]))
df.to_csv('Reviews.csv')
def connect_to_sql_server():
#connect to sql server
print("Connecting to the server")
odbc_conn=pyodbc.connect('DRIVER={SQL SERVER};SERVER='+server+';Trusted_Connection=yes;')
odbc_conn.autocommit=True
cursor=odbc_conn.cursor()
#create db if does not exist
transaction="IF DB_ID('{0}') IS NULL CREATE DATABASE {0};".format(database)
cursor.execute(transaction)
if(cursor==True):
print("created db")
transaction="USE {0}".format(database)
cursor.execute(transaction)
if(cursor==True):
print("USe db")
#drop table if exists
transaction="IF OBJECT_ID('dbo.{0}') IS NOT NULL DROP TABLE dbo.{0};".format(table)
cursor.execute(transaction)
#create table
print("table created")
transaction=" CREATE TABLE dbo.{0} (RESTAURANT_NAME VARCHAR(30),LOCATION VARCHAR(50), REVIEWER_NAME VARCHAR(45),RATINGS VARCHAR(15), REVIEW_TEXT NVARCHAR(MAX));".format(table)
cursor.execute(transaction)
return cursor
def insert_row_into_table(cursor,name,location,reviewer,ratings,text):
text=text.replace("'","''")
reviewer=reviewer.replace("'","''")
name=name.replace("'","")
#create databse if doesnt exist
transaction="INSERT INTO {0} VALUES( '{1}','{2}','{3}','{4}','{5}');".format(table,name,location,reviewer,ratings,text)
cursor.execute(transaction)
write_to_file(name,reviewer,text)
def extract_info(cursor, restName,rest_link):
Reviews_df=pd.DataFrame(columns=['Restaurant','Location','Reviewer','Ratings','Review'])
print("\nExtracting the reviews for:")
print("Restaurant Name is:"+restName)
print("link :"+rest_link)
page=requests.get(rest_link)
tree=html.fromstring(page.content)
html_etree=etree.ElementTree(tree)
location=html_etree.xpath(".//address//text()")
location=''.join(location)
print("Location:"+location)
#Gets a list of all the reviews
listing = html_etree.xpath(".//li[@class='lemon--li__373c0__1r9wz margin-b3__373c0__q1DuY padding-b3__373c0__342DA border--bottom__373c0__3qNtD border-color--default__373c0__3-ifU']")
for results in listing:
names=results.xpath(".//span/a[@class='lemon--a__373c0__IEZFH link__373c0__1G70M link-color--inherit__373c0__3dzpk link-size--inherit__373c0__1VFlE']/text()")
ratings=results.xpath(".//span[@class='lemon--span__373c0__3997G display--inline__373c0__3JqBP border-color--default__373c0__3-ifU']/div/@aria-label")
text=results.xpath(".//span[@class='lemon--span__373c0__3997G raw__373c0__3rKqk']/text()")
insert_row_into_table(cursor,restName, location, names[0], ratings[0], text[0])
dict={'Restaurant':restName,'Location':location,'Reviewer':names[0],'Ratings':ratings[0],'Review':text[0]}
Reviews_df=Reviews_df.append(dict,ignore_index=True)
print(Reviews_df)
return(Reviews_df)
def write_to_file(resname,file_name,text):
#print("inside write to file function")
file_path=os.path.dirname(os.path.realpath(__file__))
file_path=os.path.join(file_path, 'Output')
file_path=os.path.join(file_path, resname)
file_name=file_name.replace("'", "").replace(".", "")
file_path=os.path.join(file_path,file_name)
#print(file_path)
file=open(file_path,"a")
file.write(text)
file.close()
return file_path
def makeDirectory(resName):
path=os.path.dirname(os.path.realpath(__file__))
path=os.path.join(path,'Output')
path=os.path.join(path, resName)
#print(path)
os.mkdir(path)
main() |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import subprocess
import unittest
import unittest.mock
from antlir.common import run_stdout_to_err
from antlir.fs_utils import Path, temp_dir
from antlir.loopback import (
BtrfsLoopbackVolume,
LoopbackVolume,
MIN_CREATE_BYTES,
MIN_SHRINK_BYTES,
MiB,
)
from antlir.unshare import Unshare, Namespace
class LoopbackTestCases(unittest.TestCase):
# All these tests make mounts, so to avoid leaking them we run in a mount
# namespace. Moreover, we don't want to leak `sudo`ed commands on crash,
# so use a PID namespace to ensure they get garbage-collected.
@contextlib.contextmanager
def _test_workspace(self):
with Unshare([Namespace.MOUNT, Namespace.PID]) as ns, temp_dir() as td:
yield (ns, td)
def test_loopback(self) -> None:
with self._test_workspace() as (ns, td):
image_path = td / "image.btrfs"
test_message = "I am a beautiful loopback"
# Make a btrfs loopback
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
size_bytes=128 * MiB,
compression_level=1,
) as vol:
self.assertEqual(128 * MiB, vol.get_size())
cmd = f"echo '{test_message}' > {vol.dir()}/msg"
run_stdout_to_err(
ns.nsenter_as_root(
"/bin/bash",
"-uec",
cmd,
),
)
# Mount it as a generic LoopbackVolume
# and confirm the contents
with LoopbackVolume(
unshare=ns,
image_path=image_path,
fs_type="btrfs",
) as vol:
msg_file = vol.dir() / "msg"
msg_text = subprocess.run(
ns.nsenter_as_root(
"cat",
msg_file,
),
text=True,
capture_output=True,
).stdout.strip("\n")
self.assertEqual(test_message, msg_text)
@unittest.mock.patch("antlir.loopback.kernel_version")
def test_btrfs_loopback_rounded_size(self, kernel_version) -> None:
# Mock a kernel version that requires the size to be
# rounded up.
kernel_version.return_value = (4, 6)
with self._test_workspace() as (ns, td):
image_path = td / "image.btrfs"
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
# We want to make this a non-multiple of 4096
size_bytes=128 * MiB - 3,
compression_level=1,
) as vol:
# Confirm it has been rounded up
self.assertEqual(128 * MiB, vol.get_size())
def test_btrfs_loopback_min_create_size(self) -> None:
with self._test_workspace() as (ns, td):
image_path = td / "image.btrfs"
# Make a btrfs loopback that is smaller than the min
with self.assertRaisesRegex(
AttributeError,
f"A btrfs loopback must be at least {MIN_CREATE_BYTES}",
):
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
size_bytes=32768, # Pretty small
compression_level=1,
):
pass
def test_btrfs_loopback_minimize(self) -> None:
# Make a btrfs loopback that is smaller than the min
# shrink size to confirm that we don't shrink
size = MIN_SHRINK_BYTES - (1 * MiB)
with self._test_workspace() as (ns, td):
image_path = td / "image.btrfs"
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
size_bytes=size,
compression_level=1,
) as vol:
self.assertEqual(size, vol.get_size())
self.assertEqual(size, vol.minimize_size())
# Make a btrfs loopback that slightly larger
# than the min shrink size to confirm that we shrink
size = MIN_SHRINK_BYTES + (1 * MiB)
with self._test_workspace() as (ns, td):
image_path = td / "image.btrfs"
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
size_bytes=size,
compression_level=1,
) as vol:
self.assertEqual(size, vol.get_size())
self.assertEqual(MIN_SHRINK_BYTES, vol.minimize_size())
def test_btrfs_loopback_receive(self) -> None:
with Unshare([Namespace.MOUNT, Namespace.PID]) as ns, temp_dir() as td:
image_path = td / "image.btrfs"
with BtrfsLoopbackVolume(
unshare=ns,
image_path=image_path,
size_bytes=MIN_CREATE_BYTES,
compression_level=1,
) as vol, open(
Path(__file__).dirname() / "create_ops.sendstream"
) as f:
# pyre-fixme[6]: For 1st param expected `int` but got `TextIOWrapper`.
ret = vol.receive(f)
self.assertEqual(0, ret.returncode)
self.assertIn(b"At subvol create_ops", ret.stderr)
|
import re
class Day13Solution:
def __init__(self):
file = open("day13.txt", "r")
self.start = int(file.readline())
line = file.readline()
self.buses = [int(i) for i in re.findall("(?P<num>\d+)", line)]
self.items = line.split(",")
def partOne(self) -> int:
wait = self.start
result = 0
for bus in self.buses:
next = bus - (self.start % bus)
if next < wait:
wait = next
result = wait * bus
return result
def partTwo(self) -> int:
items = {}
for index in range(0,len(self.items)):
if self.items[index] == "x":
continue
items[index] = int(self.items[index])
x = 118889450154
result = 0
while not result:
x += 161028941683
for index in items:
if (x + index) % items[index] != 0:
break
if index == len(self.items)-1:
result = x
return result
solution = Day13Solution()
print(solution.partOne())
print(solution.partTwo())
|
import logging
import os
import json
import azure.functions as func
from azure.data.tables import TableClient
from azure.core.exceptions import HttpResponseError
from furl import furl
import requests
from azure.data.tables import UpdateMode
BASE_URL = "http://dev.virtualearth.net/REST/V1/Routes/Driving"
LOCATIONS_BASE_URL = 'http://dev.virtualearth.net/REST/v1/Locations'
def get_location(street, zipCode):
f = furl(LOCATIONS_BASE_URL)
f.args["o"] = "json"
f.args["key"] = os.getenv("BING_MAPS_KEY")
f.args["countryRegion"] = "CH"
f.args["postalCode"] = zipCode
f.args["addressLine"] = street
logging.info(f.url)
r = requests.get(f.url)
j = r.json()
first_resource = j['resourceSets'][0]['resources'][0]
coordinates = first_resource['point']['coordinates']
return str(coordinates[0]) + ',' + str(coordinates[1])
def get_duration_with_passenger(driver_start, passenger_start, passenger_end, driver_end):
f = furl(BASE_URL)
f.args["o"] = "json"
f.args["key"] = os.getenv("BING_MAPS_KEY")
f.args["optimize"] = "time"
f.args["wp.0"] = driver_start
f.args["vwp.1"] = passenger_start
f.args["vwp.2"] = passenger_end
f.args["wp.3"] = driver_end
r = requests.get(f.url)
j = r.json()
first_resource = j['resourceSets'][0]["resources"][0]
result = {}
result['travelDuration'] = first_resource["travelDuration"]
return result
def get_duration_without_passenger(driver_start, driver_end):
f = furl(BASE_URL)
f.args["o"] = "json"
f.args["key"] = os.getenv("BING_MAPS_KEY")
f.args["optimize"] = "time"
f.args["wp.0"] = driver_start
f.args["wp.1"] = driver_end
r = requests.get(f.url)
j = r.json()
first_resource = j['resourceSets'][0]["resources"][0]
result = {}
result['travelDuration'] = first_resource["travelDuration"]
return result
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
driver_name = req.params.get('driver')
passenger_name = req.params.get('passenger')
access_key = os.getenv("TABLES_PRIMARY_STORAGE_ACCOUNT_KEY")
endpoint_suffix = os.getenv("TABLES_STORAGE_ENDPOINT_SUFFIX")
account_name = os.getenv("TABLES_STORAGE_ACCOUNT_NAME")
bing_maps_key = os.getenv("BING_MAPS_KEY")
endpoint = "{}.table.{}".format(account_name, endpoint_suffix)
connection_string = "DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}".format(
account_name, access_key, endpoint_suffix)
table_name = "Route"
with TableClient.from_connection_string(connection_string, table_name) as table_client:
try:
driver = table_client.get_entity('testing', driver_name)
passenger = table_client.get_entity('testing', passenger_name)
driver_start_address = (driver['StartAddressStreet'] + ', ' + driver['StartAddressZipCode'] +
' ' + driver['StartAddressCity'])
passenger_start_address = (passenger['StartAddressStreet'] + ', ' + passenger['StartAddressZipCode'] +
' ' + passenger['StartAddressCity'])
passenger_destination_address = (passenger['DestinationAddressStreet'] + ', ' + passenger['DestinationAddressZipCode'] +
' ' + passenger['DestinationAddressCity'])
driver_destination_address = (driver['DestinationAddressStreet'] + ', ' + driver['DestinationAddressZipCode'] +
' ' + driver['DestinationAddressCity'])
duration_without_passenger = get_duration_without_passenger(driver_start_address, driver_destination_address)
duration_with_passenger = get_duration_with_passenger(driver_start_address,
passenger_start_address, passenger_destination_address, driver_destination_address)
driver['DurationWithoutPassenger'] = duration_without_passenger['travelDuration']
table_client.upsert_entity(mode=UpdateMode.REPLACE, entity = driver)
logging.info(str(duration_without_passenger['travelDuration']) + ' vs ' + str(duration_with_passenger['travelDuration']))
result = {
'PartitionKey': 'testing',
'RowKey': driver_name + '_' + passenger_name,
'Driver': driver_name,
'Passenger': passenger_name,
'Duration': duration_with_passenger['travelDuration'],
'DriverStartCoordinates': get_location(driver['StartAddressStreet'], driver['StartAddressZipCode']),
'PassengerStartCoordinates': get_location(passenger['StartAddressStreet'], passenger['StartAddressZipCode']),
'PassengerDestinationCoordinates': get_location(passenger['DestinationAddressStreet'], passenger['DestinationAddressZipCode']),
'DriverDestinationCoordinates': get_location(driver['DestinationAddressStreet'], driver['DestinationAddressZipCode'])
}
with TableClient.from_connection_string(connection_string, 'TravelDuration') as duration_client:
duration_client.upsert_entity(mode=UpdateMode.REPLACE, entity=result)
return func.HttpResponse(
json.dumps(result),
mimetype='application/json',
status_code=200
)
except HttpResponseError as e:
logging.error(e)
return func.HttpResponse(json.dumps([]), mimetype = 'application/json')
|
import unittest
from isobutane import *
class TestMethods(unittest.TestCase):
def test_get_oxygen_volume_of_air(self):
self.assertAlmostEqual(
get_oxygen_volume_of_air(100),
21
)
def test_get_air_volume_of_oxygen(self):
self.assertAlmostEqual(
get_air_volume_of_oxygen(0.21),
1.0
)
def test_calc_volume_to_mol(self):
self.assertAlmostEqual(
calc_volume_to_mol(0.02271095464),
1
)
def test_calc_mol_to_volume(self):
self.assertAlmostEqual(
calc_mol_to_volume(1),
0.02271095464
)
def test_oxygen_needed_for_isobutan(self):
self.assertAlmostEqual(
oxygen_needed_for_isobutane(1),
6.5
)
def test_isobutane_needed_for_oxygen(self):
self.assertAlmostEqual(
isobutane_needed_for_oxygen(6.5),
1
)
def test_isobutan_vol_to_air_vol(self):
self.assertAlmostEqual(
isobutane_vol_to_air_vol(.1),
3.095238095
)
def test_air_vol_to_isobutan_vol(self):
self.assertAlmostEqual(
air_vol_to_isobutane_vol(3.095238095),
.1
)
def test_air_volume_for_isobutan_mols(self):
self.assertAlmostEqual(
air_volume_for_isobutane_mols(1.422559853501),
1.0
)
def test_print_needed_air(self):
print_needed_air(0.1)
def test_print_needed_isobutane(self):
print_needed_isobutane(0.1)
def test_main(self):
main()
if __name__ == '__main__':
unittest.main()
|
def is_valid_password(candidate: int) -> bool:
has_two_adjacent_digits = False
has_increasing_digits = True
digits = [int(digit) for digit in str(candidate)]
previous_digit = digits[0]
repeat_count = 0
for digit in digits[1:]:
if digit < previous_digit:
has_increasing_digits = False
break
elif digit == previous_digit:
repeat_count += 1
else:
if repeat_count == 1:
has_two_adjacent_digits = True
repeat_count = 0
previous_digit = digit
if repeat_count == 1:
has_two_adjacent_digits = True
return has_two_adjacent_digits and has_increasing_digits
assert is_valid_password(112233)
assert not is_valid_password(123444)
assert is_valid_password(111122)
valid_passwords = 0
for candidate in range(356261, 846303):
if is_valid_password(candidate):
valid_passwords += 1
print(f'task 2: {valid_passwords}') |
from releash import *
myself = add_package(".", "releash")
myself.version_source = VersionSource(myself, 'releash.py')
myself.version_targets.append(VersionTarget(myself, 'releash.py'))
myself.release_targets.append(ReleaseTargetSourceDist(myself))
myself.release_targets.append(ReleaseTargetGitTagVersion(myself.version_source))
myself.release_targets.append(ReleaseTargetGitPush())
#myself.release_targets.append(ReleaseTargetCondaForge(myself, 'releash-fake-feedstock'))
|
# Created on Mar 9, 2012
#
# @author: Richard Plevin
# @author: Sam Fendell
# @author: Ryan Jones
#
# Copyright (c) 2012-2015. The Regents of the University of California (Regents)
# and Richard Plevin. See the file COPYRIGHT.txt for details.
'''
This module includes contributions by Sam Fendell and Ryan Jones.
'''
from __future__ import print_function
from collections import Iterable
from contextlib import contextmanager
from datetime import datetime
from six import string_types, iteritems, MAXSIZE
from six.moves import xrange
import sys
from sqlalchemy import create_engine, Table, Column, String, Float, text, MetaData, event
from sqlalchemy.engine import Engine
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker, load_only
from sqlalchemy.orm.exc import NoResultFound
#from sqlalchemy.pool import QueuePool
from pygcam.config import getSection, getParam, getParamAsBoolean
from pygcam.log import getLogger
from . import util as U
from .constants import RegionMap
from .error import PygcamMcsUserError, PygcamMcsSystemError
from .schema import (ORMBase, Run, Sim, Input, Output, InValue, OutValue, Experiment,
Program, Code, Region, TimeSeries)
_logger = getLogger(__name__)
def usingSqlite():
'''
Return True if the DbURL indicates that we're using Sqlite, else return False
'''
url = getParam('MCS.DbURL')
return url.lower().startswith('sqlite')
def usingPostgres():
'''
Return True if the DbURL indicates that we're using Postgres, else return False
'''
url = getParam('MCS.DbURL')
return url.lower().startswith('postgres')
@event.listens_for(Engine, "connect")
def sqlite_FK_pragma(dbapi_connection, connection_record):
'''Turn on foreign key support in sqlite'''
if usingSqlite():
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# TODO: might be useful:
# pd.read_sql_table('data', engine, columns=['Col_1', 'Col_2'])
# pd.read_sql_table('data', engine, index_col='id')
# pd.read_sql_table('data', engine, parse_dates=['Date'])
# pd.read_sql_table('data', engine, parse_dates={'Date': '%Y-%m-%d'})
RegionAliases = {
'all regions': 'global',
'rest of world': 'multiple',
'row': 'multiple'
}
# The name of the program as stored in the "program" table
GCAM_PROGRAM = 'gcam'
# Constants to avoid misspelling errors
DFLT_PROGRAM = 'default'
# Status strings for Run table
RUN_NEW = 'new'
RUN_QUEUED = 'queued'
RUN_RUNNING = 'running'
RUN_SUCCEEDED = 'succeeded'
RUN_FAILED = 'failed'
RUN_KILLED = 'killed'
RUN_ABORTED = 'aborted'
RUN_ALARMED = 'alarmed'
RUN_UNSOLVED = 'unsolved'
RUN_GCAMERROR = 'gcamerror' # any other GCAM runtime error
ENG_TERMINATE = 'terminate'
RUN_FAILURES = [RUN_FAILED, RUN_KILLED, RUN_ABORTED, RUN_ALARMED, RUN_UNSOLVED,
RUN_GCAMERROR]
RUN_STATUSES = [RUN_NEW, RUN_QUEUED, RUN_RUNNING, RUN_SUCCEEDED] + RUN_FAILURES
# TBD: maybe drop this and store it from Context instead
def beforeSavingRun(_mapper, _connection, run):
'''
Before inserting/updating a Run instance, set numerical status and
timestamps according to the status string.
'''
if run.status in (RUN_NEW, RUN_QUEUED):
run.queueTime = datetime.now()
run.startTime = None
run.endTime = None
run.duration = None
elif run.status == RUN_RUNNING:
run.startTime = datetime.now()
run.endTime = None
run.duration = None
elif run.startTime:
run.endTime = datetime.now()
delta = run.endTime - run.startTime
run.duration = delta.seconds // 60
# Associate a listener function with Run, to execute before inserts and updates
event.listen(Run, 'before_insert', beforeSavingRun)
event.listen(Run, 'before_update', beforeSavingRun)
def parseSqlScript(filename=None, text=None):
'''
Parse a SQL script into semi-colon-terminated statements.
:param filename: (str) the path of the file of SQL statements
:param text: (str) optionally pass the contents of the file
rather than the filename
:return: (list of str) the individual statements
'''
from six.moves import StringIO
if not (filename or text):
raise PygcamMcsSystemError('Called parseSqlScript with neither filename nor text')
if text:
lines = StringIO(text).readlines()
else:
with open(filename) as f:
lines = f.readlines()
statements = []
buffer = ''
for line in lines:
line = line.strip()
if not line or line.startswith('--'):
continue
buffer += line
if buffer[-1] == ';':
statements.append(buffer)
buffer = ''
else:
buffer += ' '
return statements
class CoreDatabase(object):
def __init__(self):
from sqlalchemy.orm import scoped_session
factory = sessionmaker() # N.B. sessionmaker returns a class object
self.Session = scoped_session(factory)
self.url = None
self.engine = None
self.appId = None
def endSession(self, session):
'''
Helper method to handle thread-scoped session objects for use with ipyparallel
:param session: the open session
:return: none
'''
try:
self.Session.remove()
except Exception as e:
_logger.debug("Can't remove Session: %s", e)
session.close()
@contextmanager
def sessionScope(self, withRetry=True):
"""
Provide a transactional scope around a series of operations.
Usage:
with sessionScope() as session:
"""
session = self.Session()
session.expire_on_commit = False
try:
yield session
if withRetry:
self.commitWithRetry(session)
else:
session.commit()
except:
session.rollback()
raise
finally:
self.endSession(session)
# create_engine(*args, **kwargs)
# The string form of the URL is dialect+driver://user:password@host/dbname[?key=value..],
# where dialect is a database name such as mysql, oracle, postgresql, etc., and driver
# the name of a DBAPI, such as psycopg2, pyodbc, cx_oracle, etc.
# psycopg2: http://www.stickpeople.com/projects/python/win-psycopg/
#
# postgres|mysql: engine = create_engine('postgresql://scott:tiger@localhost/mydatabase')
#
# sqlite: engine = create_engine('sqlite:///foo.db') -- if relative pathname, or
# engine = create_engine('sqlite:////absolute/path/to/foo.db') if abs pathname.
#
# To add a user and database in postgres:
# createuser -P mcsuser # -P => prompt for password
# createdb -O mcsuser mcs # -O => make mcsuser owner of database mcs
#
def startDb(self, checkInit=True):
'''
Links session to the database file identified in the config file as 'dbfile'.
This needs to be called before any database operations can occur. It is called
in getDatabase() when a new database instance is created.
'''
url = getParam('MCS.DbURL')
echo = getParamAsBoolean('MCS.EchoSQL')
_logger.info('Starting DB: %s' % url)
self.createDatabase()
connect_args = {'connect_timeout': 15} if usingPostgres() else {}
self.engine = engine = create_engine(url, echo=echo, connect_args=connect_args,
# poolclass=QueuePool
#, pool_pre_ping=True
)
self.Session.configure(bind=engine)
self.url = url
if checkInit:
# Load metadata from the existing database, not from the ORMBase,
# to see if the "run" table exists. If not, initialize the DB.
# We don't do this if calling from Runner.py, which requires that
# the database be set up already.
meta = MetaData(bind=engine, reflect=True)
if 'run' not in meta.tables:
self.initDb()
def initDb(self, args=None):
'''
Initialize the database, including loading required inserts.
'''
_logger.info('Initializing DB: %s' % self.url)
meta = ORMBase.metadata # accesses declared tables
meta.bind = self.engine
meta.reflect()
meta.drop_all()
session = self.Session()
meta.create_all()
session.commit()
if args and args.empty:
return
# Deprecated?
_logger.debug('Adding standard codes')
# Add standard app status codes
session.add(Code(codeName=RUN_QUEUED, description='Trial queued'))
session.add(Code(codeName=RUN_RUNNING, description='Trial running'))
session.add(Code(codeName=RUN_SUCCEEDED, description='Trial succeeded'))
session.add(Code(codeName=RUN_FAILED, description='Trial failed'))
session.add(Code(codeName=RUN_ABORTED, description='Runtime error'))
session.add(Code(codeName=RUN_KILLED, description='System timeout'))
session.add(Code(codeName=RUN_ALARMED, description='Runner timeout.'))
#session.add(Program(name=DFLT_PROGRAM, description='Program name used when none is specified'))
_logger.debug('Committing standard codes')
session.commit()
initialData = [
['Program', [{'name': GCAM_PROGRAM,
'description' : 'The GCAM executable program'}]],
]
_logger.debug('Adding initial data')
for key, value in initialData:
# If no module is specified with the class name, it is
# assumed to be in this module, otherwise everything
# up to the last '.' is treated as the module name
items = key.rsplit('.', 1)
modName = items[0] if len(items) == 2 else __name__
className = items[1] if len(items) == 2 else items[0]
table = className.lower()
if table in ORMBase.metadata.tables:
module = sys.modules[modName]
dataClass = getattr(module, className)
if not dataClass:
raise PygcamMcsSystemError("Table class %s not found in module %s" % (className, modName))
for row in value:
session.add(dataClass(**row))
_logger.debug('committing row')
session.commit() # commit each row so each can refer to prior rows
else:
raise KeyError(table)
def createDatabase(self):
'''
Ensure that the database directory (in the case of sqlite3) or the database is available.
'''
if usingSqlite():
# Make sure required directory exists
dbDir = getParam('MCS.RunDbDir')
U.mkdirs(dbDir)
return
if usingPostgres() and getParam('MCS.Postgres.CreateDbExe'):
import subprocess, shlex, re
from .error import PygcamMcsSystemError
# Make sure required database exists
dbName = getSection()
createdb = getParam('MCS.Postgres.CreateDbExe')
argStr = getParam('MCS.Postgres.CreateDbArgs')
command = "%s %s" % (createdb, argStr)
_logger.debug("Trying command: %s" % command)
args = shlex.split(command)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
if not proc:
raise PygcamMcsSystemError("Could not run: %s" % command)
status = proc.wait() # wait for process to complete
if status == 0:
_logger.debug("Created postgres database '%s'", dbName)
else:
output = list(proc.stdout)
existsMsg = 'database "%s" already exists' % dbName
dbExists = any(map(lambda line: re.search(existsMsg, line), output))
if not dbExists:
raise PygcamMcsSystemError("%s failed: %s" % (command, dbExists)) # fail if unexpected error occurs
_logger.debug("Postgres database '%s' already exists", dbName)
def commitWithRetry(self, session, maxTries=10, maxSleep=2.0):
# N.B. With master/worker architecture, this should no longer be necessary, but
# there are still occasional failures due to inability to acquire file lock.
# import sqlite3
import random
import time
tries = 0
done = False
while not done:
try:
session.commit()
done = True
except Exception as e:
# except sqlite3.OperationalError as e:
_logger.debug('sqlite3 operational error: %s', e)
if tries >= maxTries:
raise PygcamMcsSystemError("Failed to acquire database lock")
delay = random.random() * maxSleep # sleep for a random number of seconds up to maxSleep
_logger.warn("Database locked (retry %d); sleeping %.1f sec" % (tries, delay))
time.sleep(delay)
tries += 1
# except Exception as e:
# raise PygcamMcsSystemError("commitWithRetry error: %s" % e)
def execute(self, sql):
'Execute the given SQL string'
_logger.debug('Executing SQL: %s' % sql)
with self.sessionScope() as session:
session.execute(sql)
def executeScript(self, filename=None, text=None):
if not (filename or text):
raise PygcamMcsSystemError('Called executeScript with neither filename nor text')
lines = parseSqlScript(filename=filename, text=text)
with self.sessionScope() as session:
for sql in lines:
_logger.debug('Executing script SQL: %s' % sql)
session.execute(sql)
def dropAll(self):
'''
Drop all objects in the database, even those not defined in SqlAlchemy.
'''
with self.sessionScope() as session:
engine = session.get_bind()
meta = MetaData(bind=engine)
meta.reflect()
if usingPostgres():
self.dropAllPostgres(meta)
else:
meta.drop_all()
def dropAllPostgres(self, meta):
"""
Drop all postgres tables and sequences from schema 'public'
"""
for type in ('table', 'sequence'):
sql = "select {type}_name from information_schema.{type}s where {type}_schema='public'".format(type=type)
names = [name for (name, ) in meta.execute(text(sql))]
for name in names:
try:
meta.execute(text("DROP %s %s CASCADE" % (type, name)))
except SQLAlchemyError as e:
print(e)
def getTable(self, tableClass, orderBy=None):
'''
Get all the contents of a table, optionally ordered by the given column.
:param tableClass: (SqlAlchemy table class) the table to query
:param orderBy: (SqlAlchemy table.col spec) optional column to order by
:return: (list of elements of tableClass) the rows of the table
'''
with self.sessionScope() as session:
query = session.query(tableClass)
if orderBy:
query = query.order_by(orderBy)
rows = query.all()
return rows
def setSqlEcho(self, value=True):
with self.sessionScope() as session:
engine = session.get_bind()
engine.echo = value
def addColumns(self, tableClass, columns):
'''
Adds a new column or columns to an existing table, emitting an
ALTER TABLE statement and updating the metadata.
:param tableClass: the class defining the table to alter
:param column: a Column instance describing the column to add
:return: none
'''
if not isinstance(columns, Iterable):
columns = [columns]
with self.sessionScope() as session:
engine = session.get_bind()
table = tableClass.__table__
tableName = table.description
for column in columns:
table.append_column(column) # add it to the metadata
setattr(tableClass, column.name, column) # add attribute to class, which maps it to the column
columnName = column.name # column.compile(dialect=engine.dialect)
columnType = column.type.compile(engine.dialect)
engine.execute('ALTER TABLE %s ADD COLUMN "%s" %s' % (tableName, columnName, columnType))
def createOutput(self, name, description=None, program=DFLT_PROGRAM, unit=None, session=None):
'''
Create an Output record with the given arguments unless the record
exists for this name and program. Uses caller's session if provided. If no
session is provided, the row is committed and the new outputId is returned.
If the caller passes a session, None is returned unless the object was found,
in which case the outputId is returned.
'''
sess = session or self.Session()
programId = self.getProgramId(program)
outputId = sess.query(Output.outputId).filter_by(programId=programId, name=name).scalar()
if outputId is None:
output = Output(name=name, programId=programId, description=description, units=unit)
sess.add(output)
if not session:
sess.commit()
self.endSession(sess)
outputId = output.outputId
return outputId
def getOutputIds(self, nameList):
# cache on first call
if not self.outputIds:
with self.sessionScope() as session:
rows = session.query(Output.name, Output.outputId).all()
self.outputIds = dict(rows)
# lookup ids in cache
ids = [self.outputIds[name] for name in nameList]
return ids
# return zip(*ids)[0] if ids else []
def getOutputs(self):
rows = self.getTable(Output)
return [obj.name for obj in rows]
def getOutputUnits(self, name):
with self.sessionScope() as session:
query = session.query(Output.units).filter_by(name=name)
row = query.one()
units = row[0] if row else ''
return units
def getOutputsWithValues(self, simId, scenario):
with self.sessionScope() as session:
query = session.query(Output.name).\
join(OutValue).join(Run).filter_by(simId=simId).\
join(Experiment).filter_by(expName=scenario). \
distinct(Output.name)
rows = query.all()
return [row[0] for row in rows]
#
# Very much like setAttrVal. So much so that perhaps the Result table can be
# eliminated in favor of using the generic attribute/value system?
#
def setOutValue(self, runId, paramName, value, program=GCAM_PROGRAM, session=None):
'''
Set the given named output parameter to the given numeric value. Overwrite a
previous value for this runId and attribute, if found, otherwise create a new value
record. If session is not provided, one is allocated, and the transaction is
committed. If a session is provided, the caller is responsible for calling commit.
'''
#_logger.debug('setOutValue(%s, %s, %s, session=%s', runId, paramName, value, session)
sess = session or self.Session()
outputId = sess.query(Output.outputId).filter_by(name=paramName).join(Program).filter_by(name=program).scalar()
if not outputId:
raise PygcamMcsSystemError("%s output %s was not found in the Output table" % (program, paramName))
results = sess.query(OutValue).filter_by(runId=runId, outputId=outputId).all()
# If previous value is found, overwrite it; otherwise create a new one
if results:
#_logger.debug("setOutValue: updating value for outputId=%d" % outputId)
#result = resultQuery.one()
result = results[0]
result.value = value
else:
#_logger.debug("setOutValue: adding value for outputId=%d" % outputId)
sess.add(OutValue(runId=runId, outputId=outputId, value=value))
if session is None:
self.commitWithRetry(sess)
self.endSession(sess)
def getOutValues(self, simId, expName, outputName, limit=None):
'''
Return a pandas DataFrame with columns trialNum and name outputName,
for the given sim, exp, and output variable.
'''
from pandas import DataFrame
session = self.Session()
limit = MAXSIZE if limit is None or limit <= 0 else limit
# This is essentially this query, but with "JOIN xx ON" syntax generated:
# select r.trialNum, v.value from run r, outvalue v, experiment e, output o
# where e.scenario='test exp' and r.expid=e.expid and r.simid=1 and
# o.name='p1' and o.outputid=v.outputid;
query = session.query(Run.trialNum).add_columns(OutValue.value).filter_by(simId=simId).\
join(Experiment).filter_by(expName=expName).join(OutValue).join(Output).filter_by(name=outputName).\
order_by(Run.trialNum).limit(limit)
#print "getOutValues query: %s" % str(query.statement.compile())
rslt = query.all()
self.endSession(session)
if not rslt:
return None
resultDF = DataFrame.from_records(rslt, columns=['trialNum', outputName], index='trialNum')
return resultDF
def deleteRunResults(self, runId, outputIds=None, session=None):
sess = session or self.Session()
query = sess.query(OutValue).filter_by(runId=runId)
if outputIds:
query = query.filter(OutValue.outputId.in_(outputIds))
#query.delete(synchronize_session='fetch')
query.delete(synchronize_session=False)
if session is None:
self.commitWithRetry(sess)
self.endSession(sess)
# def queryToDataFrame(self, query): # TBD: Not used anywhere yet...
# from pandas import DataFrame # lazy import
#
# session = self.Session()
# result = session.execute(query)
# columnNames = result.keys()
# values = result.fetchall()
# self.endSession(session)
# return DataFrame(values, columns=columnNames)
def getParameterValues(self, simId, program='gcam', asDataFrame=False):
from pandas import DataFrame # lazy import
session = self.Session()
query = session.query(InValue.row, InValue.col, InValue.value, InValue.trialNum, Input.paramName).\
filter(InValue.simId == simId).join(Input).join(Program).filter(Program.name == program).order_by(InValue.trialNum)
rslt = query.all()
cols = [d['name'] for d in query.column_descriptions] if rslt else None
self.endSession(session)
if not rslt:
return None
if not asDataFrame:
return rslt
resultDF = DataFrame.from_records(rslt, columns=cols, index='trialNum')
return resultDF
def getParameterValues2(self, simId):
from pandas import DataFrame # lazy import
session = self.Session()
query = session.query(InValue.trialNum, InValue.value, Input.paramName).\
filter(InValue.simId == simId).join(Input).order_by(InValue.trialNum)
rslt = query.all()
self.endSession(session)
if not rslt:
return None
cols = ['trialNum', 'value', 'paramName']
resultDF = DataFrame.from_records(rslt, columns=cols)
resultDF = resultDF.pivot(index='trialNum', columns='paramName', values='value')
return resultDF
def getParameters(self):
session = self.Session()
query = session.query(Input.paramName, InValue.row, InValue.col).\
filter(Input.inputId == InValue.inputId).\
distinct(Input.paramName, InValue.row, InValue.col)
rslt = query.all()
self.endSession(session)
return rslt
def getInputs(self):
rows = self.getTable(Input, orderBy=Input.paramName)
return [row.paramName for row in rows]
def scenariosWithResults(self, simId):
# Definition of view 'result':
# select o."runId", r."simId", r."expId", r."trialNum", e."expName", op.name, o.value
# from outvalue o, output op, run r, experiment e
# where e."expId" = r."expId" and o."runId" = r."runId" and o."outputId" = op."outputId"
try:
with self.sessionScope() as session:
query = session.query(Experiment.expName).join(Run).filter_by(simId=simId).\
join(OutValue).distinct(Experiment.expName)
rows = query.all()
names = [row[0] for row in rows]
except Exception as e:
_logger.error("scenariosWithResults failed: %s", e)
names = []
_logger.debug("scenariosWithResults returning %s", names)
return names
def createRun(self, simId, trialNum, expName=None, expId=None, status=RUN_NEW, session=None):
"""
Create an entry for a single model run, initially in "new" state
"""
assert (expName or expId), "Database createRun called with neither expName nor expId"
sess = session or self.Session()
if expId is None:
exp = sess.query(Experiment.expId).filter_by(expName=expName).one()
expId = exp.expId
# if prior run record exists for this {simId, trialNum, expId} tuple, delete it
with session.no_autoflush:
sess.query(Run).filter_by(simId=simId, trialNum=trialNum, expId=expId).delete()
run = Run(simId=simId, trialNum=trialNum, expId=expId, status=status, jobNum=None)
sess.add(run)
if not session: # if we created the session locally, commit; else call must do so
self.commitWithRetry(sess)
self.endSession(sess)
return run
def getSim(self, simId):
with self.sessionScope() as session:
sim = session.query(Sim).filter_by(simId=simId).scalar()
return sim # N.B. scalar() returns None if no rows are found
def getSims(self):
rows = self.getTable(Sim, orderBy=Sim.simId)
return rows
def getRun(self, simId, trialNum, expName):
with self.sessionScope() as session:
run = session.query(Run).filter_by(simId=simId, trialNum=trialNum).\
join(Experiment).filter_by(expName=expName).scalar()
return run
def getRunByRunId(self, runId):
with self.sessionScope() as session:
run = session.query(Run).filter_by(runId=runId).scalar()
return run
def getRunFromContext(self, context):
run = self.getRun(context.simId, context.trialNum, context.scenario)
#_logger.debug("getRunIdFromContext returning runId %s", run.runId if run else None)
return run
def setRunStatus(self, runId, status, session=None):
'''
Set the runStatus to the value for the given string and
optionally set the job number.'''
sess = session or self.Session()
try:
run = sess.query(Run).filter_by(runId=runId).one()
if run.status == status:
return # nothing to do here
run.status = status # insert/update listener sets status code and timestamps
if not session:
self.commitWithRetry(sess)
return
except NoResultFound:
_logger.warn("db.setRunStatus failed to find record for runId %d", runId)
return
finally:
if not session:
self.endSession(sess)
def getRunsWithStatus(self, simId, expList, statusList):
# Allow expList and statusList to be a single string,
# which we convert to lists
if isinstance(expList, string_types):
expList = [expList]
if isinstance(statusList, string_types):
statusList = [statusList]
session = self.Session()
query = session.query(Run.trialNum).filter_by(simId=simId)
if expList:
query = query.join(Experiment).filter(Run.status.in_(statusList), Experiment.expName.in_(expList))
rslt = query.order_by(Run.trialNum).all()
self.endSession(session)
if rslt:
rslt = [r[0] for r in rslt] # collapse list of singleton tuples into a single list
#_logger.debug("for simid=%d, expList=%s, status=%s, rslt=%s" % (simId, expList, status, rslt))
return rslt
def getRunsByStatus(self, simId, scenario, statusList, groupName=None, projectName=None):
'''
By default, returns tuples of (runId, trialNum) for the given scenario that have
any of the statuses in statusList (which can be a single status string or a list
of strings.) If groupName or projectName are not None, results are converted to
a list of Context instances.
'''
from .context import Context
if isinstance(statusList, string_types):
statusList = [statusList]
if len(statusList) == 0:
return []
with self.sessionScope() as session:
# expId = self.getExpId(scenario, session=session)
# query = session.query(Run.runId, Run.trialNum).filter_by(simId=simId, expId=expId).filter(Run.status.in_(statusList))
# Return all data required to create Context (except projectName and groupName)
query = session.query(Run.runId, Run.simId, Run.trialNum, Run.status).filter_by(simId=simId).filter(Run.status.in_(statusList))
query = query.add_columns(Experiment.expName, Experiment.parent).join(Experiment).filter_by(expName=scenario)
rslt = query.order_by(Run.trialNum).all()
if groupName or projectName:
rslt = [Context(runId=r[0], simId=r[1], trialNum=r[2], status=r[3], scenario=r[4],
baseline=r[5], groupName=groupName, projectName=projectName) for r in rslt]
return rslt
def createSim(self, trials, description, simId=None):
'''
Creates a new simulation with the given number of trials and description
'''
with self.sessionScope() as session:
if simId is None:
newSim = Sim(trials=trials, description=description)
else:
session.query(Sim).filter_by(simId=simId).delete()
newSim = Sim(trials=trials, description=description, simId=simId)
session.add(newSim)
return newSim.simId
def updateSimTrials(self, simId, trials):
with self.sessionScope() as session:
sim = session.query(Sim).filter_by(simId=simId).one()
sim.trials = trials
def getTrialCount(self, simId):
with self.sessionScope() as session:
trialCount = session.query(Sim.trials).filter_by(simId=simId).scalar()
return trialCount
def getMissingTrials(self, simId, scenario):
"""
Return a list of trial numbers that are not present in the database
for the given simId and scenario.
:param simId: (int) simulation ID
:param scenario: (str) scenario name
:return: (list of int) trial numbers of missing trials
"""
count = self.getTrialCount(simId)
possible = set(xrange(count))
df = self.getRunInfo(simId, scenario, includeSucceededRuns=True, asDataFrame=True)
present = set() if df is None else set(df.trialNum)
missing = possible - present
return sorted(missing)
def getRunInfo(self, simId, scenario, includeSucceededRuns=False, asDataFrame=False):
"""
Return info for runs for the given simId and scenario, as a list of tuples
or as a pandas.DataFrame.
:param simId: (int) simulation ID
:param scenario: (str) scenario name
:param includeSucceededRuns: (bool) if True, runs of status 'succeeded' are
included; by default they are not.
:param asDataFrame: (bool) whether to return the result as a list of tuples
(the default) or as a DataFrame
:return:
"""
from pandas import DataFrame
with self.sessionScope() as session:
q = session.query(Run.runId, Run.simId, Run.trialNum, Run.status).\
filter_by(simId=simId).join(Experiment).filter_by(expName=scenario)
if not includeSucceededRuns:
q = q.filter(Run.status != 'succeeded')
rows = q.all()
if asDataFrame:
if rows:
cols = [d['name'] for d in q.column_descriptions]
df = DataFrame.from_records(rows, columns=cols, index='runId')
return df
return None
return rows
def createExp(self, name, description=None):
'''
Insert a row for the given experiment
'''
with self.sessionScope() as session:
exp = Experiment(expName=name, description=description)
session.add(exp)
return exp.expId
def getExpId(self, expName, session=None):
exp = self.getExp(expName, session)
return exp.expId
def getExp(self, expName, session=None, raiseError=True):
sess = session or self.Session()
try:
exp = sess.query(Experiment).filter_by(expName=expName).one()
except NoResultFound:
msg = "The experiment '%s' is not defined" % expName
if raiseError:
_logger.fatal(msg)
raise PygcamMcsUserError(msg)
else:
_logger.info(msg)
exp = None
finally:
if session:
self.endSession(session)
return exp
def addExperiments(self, scenarioNames, baseline, filename):
from .error import PygcamMcsSystemError
desc = 'Added from ' + filename
for name in scenarioNames:
parent = None if name == baseline else baseline
try:
self.createExp(name, description=desc, parent=parent)
except Exception as e:
raise PygcamMcsSystemError("Failed to create experiment: %s" % e)
def getProgramId(self, program):
with self.sessionScope() as session:
programId = session.query(Program.programId).filter_by(name=program).scalar()
return programId
class GcamDatabase(CoreDatabase):
_yearColsAdded = False
_expColsAdded = False
instance = None # singleton class
def __init__(self):
super(GcamDatabase, self).__init__()
self.paramIds = {} # parameter IDs by name
self.outputIds = None # output IDs by name
self.canonicalRegionMap = {}
# Cache these to avoid database access in saveResults loop
for regionName, regionId in RegionMap.items():
canonName = canonicalizeRegion(regionName)
self.canonicalRegionMap[canonName] = regionId
@classmethod
def getDatabase(cls, checkInit=True):
if cls.instance is None:
cls.instance = GcamDatabase()
cls.instance.startDb(checkInit=checkInit)
return cls.instance
@classmethod
def close(cls):
if cls.instance:
cls.instance.engine.dispose()
cls.instance = None
def initDb(self, args=None):
'Add GCAM-specific tables to the database'
super(GcamDatabase, self).initDb(args=args)
self.addYearCols()
self.addExpCols()
if args and args.empty:
return
self.addRegions(RegionMap)
def startDb(self, checkInit=True):
super(GcamDatabase, self).startDb(checkInit=checkInit)
self.addYearCols(alterTable=False)
self.addExpCols(alterTable=False)
def createExp(self, name, parent=None, description=None):
'''
Insert a row for the given experiment. Replaces superclass method
to add 'parent' argument. Also, if it fails, updates existing row.
'''
from sqlalchemy.exc import IntegrityError
session = self.Session()
exp = Experiment(expId=None, expName=name, description=description)
exp.parent = parent # not in Experiment's __init__ signature
try:
session.add(exp)
session.commit()
expId = exp.expId
except IntegrityError:
session.rollback()
expId = self.updateExp(name, description=description, parent=parent, session=session)
finally:
self.endSession(session)
return expId
def updateExp(self, name, parent=None, description=None, session=None):
sess = session or self.Session()
exp = self.getExp(name, session=sess)
exp.description = description
exp.parent = parent
sess.commit()
if not session:
self.endSession(sess)
return exp.expId
def getExp(self, expName, session=None, raiseError=True):
sess = session or self.Session()
try:
exp = sess.query(Experiment).filter_by(expName=expName).one()
except NoResultFound:
msg = "The experiment '%s' is not defined" % expName
if raiseError:
raise PygcamMcsUserError(msg)
else:
_logger.info(msg)
exp = None
finally:
if not session:
self.endSession(sess)
return exp
def getExps(self):
rows = self.getTable(Experiment, orderBy=Experiment.expName)
return rows
# TBD: generalize and add to CoreDatabase since often modified? Or just add to base schema.
def addExpCols(self, alterTable=True):
'''
Add required columns to the Experiment table.
'''
if self._expColsAdded:
return
session = self.Session()
engine = session.get_bind()
meta = ORMBase.metadata
meta.bind = engine
meta.reflect()
expTable = Experiment.__table__
cols = [('parent', String)]
for (colName, colType) in cols:
if colName not in expTable.columns:
column = Column(colName, colType)
if alterTable:
self.addColumns(Experiment, column)
else:
setattr(Experiment, column.name, column) # just add the mapping
self._expColsAdded = True
self.endSession(session)
@staticmethod
def yearCols():
from .util import activeYears, YEAR_COL_PREFIX
# Create the time series table with years (as columns) specified in the config file.
years = activeYears()
cols = [YEAR_COL_PREFIX + y for y in years]
return cols
def addYearCols(self, alterTable=True):
'''
Define year columns (y1990, y2005, y2010, etc.) dynamically. If alterTable
is True, the SQL table is altered to add the column; otherwise the column
is just mapped to an attribute of the TimeSeries class.
'''
if self._yearColsAdded:
return
with self.sessionScope() as session:
engine = session.get_bind()
meta = ORMBase.metadata
meta.bind = engine
meta.reflect()
colNames = self.yearCols()
timeSeries = Table('timeseries', meta, autoload=True, autoload_with=engine)
# Add columns for all the years used in this analysis
for colName in colNames:
if colName not in timeSeries.columns:
column = Column(colName, Float)
if alterTable:
self.addColumns(TimeSeries, column)
else:
setattr(TimeSeries, column.name, column) # just add the mapping
self._yearColsAdded = True
def addRegions(self, regionMap):
# TBD: read region map from file identified in config file, or use default values
# For now, use default mapping
with self.sessionScope() as session:
for name, regId in iteritems(regionMap):
self.addRegion(regId, name, session=session)
def addRegion(self, regionId, name, session=None):
sess = session or self.Session()
obj = Region(regionId=regionId, displayName=name, canonName=canonicalizeRegion(name))
sess.add(obj)
if session:
sess.commit()
self.endSession(sess)
def getRegionId(self, name):
canonName = canonicalizeRegion(name)
regionId = self.canonicalRegionMap[canonName]
return regionId
def getParamId(self, pname):
return self.paramIds[pname]
def createOutput(self, name, program=GCAM_PROGRAM, description=None, unit=None, session=None):
_logger.debug("createOutput(%s)", name)
return super(GcamDatabase, self).createOutput(name, program=program, description=description,
unit=unit, session=session)
def saveParameterNames(self, tuples):
'''
Define parameter names in the database on the fly based on results of XPath queries.
"tuples" is a list of (paramName, description) pairs.
'''
session = self.Session()
programId = self.getProgramId(GCAM_PROGRAM)
# TBD: The following code is subject to a race condition, but we don't expect multiple users to
# TBD: generate simulations in the same model run dir simultaneously. If they do, this may break.
# TBD: Could handle this with a lock...
pnames = [tup[0] for tup in tuples]
rows = session.query(Input).filter(Input.programId == programId, Input.paramName.in_(pnames)).all()
found = [row.paramName for row in rows]
descByName = dict(tuples)
notFound = set(pnames) - set(found)
# Construct list of tuples holding only the parameters that were
# found, and whose description changed. These are updated automatically
# when the session is committed.
updTuples = []
for row in rows:
desc = descByName[row.paramName]
if row.description != desc:
row.description = desc
updTuples.append(row)
# Create a list of objects that need to be inserted, then add them all at once
newInputs = [Input(programId=programId, paramName=name, description=descByName[name]) for name in notFound]
session.add_all(newInputs)
# Insert new parameter descriptions for these
session.commit()
# Cache all parameter IDs for faster lookup
rows = session.query(Input.inputId, Input.paramName).all()
for row in rows:
self.paramIds[row.paramName] = row.inputId
self.endSession(session)
def saveParameterValues(self, simId, tuples):
'''
Save the value of the given parameter in the database. Tuples are
of the format: (trialNum, paramId, value, varNum)
'''
with self.sessionScope() as session:
for trialNum, paramId, value, varNum in tuples:
# We save varNum to distinguish among independent values for the same variable name.
# The only purpose this serves is to ensure uniqueness, enforced by the database.
inValue = InValue(inputId=paramId, simId=simId, trialNum=trialNum,
value=value, row=0, col=varNum)
session.add(inValue)
def deleteRunResults(self, runId, outputIds=None, session=None):
"""
Augment core method by deleting timeseries data, too.
"""
# _logger.debug("deleteRunResults: deleting results for runId %d, outputIds=%s" % (runId, outputIds))
sess = session or self.Session()
super(GcamDatabase, self).deleteRunResults(runId, outputIds=outputIds, session=sess)
query = sess.query(TimeSeries).filter_by(runId=runId)
if outputIds:
query = query.filter(TimeSeries.outputId.in_(outputIds))
query.delete(synchronize_session='fetch')
if session is None:
self.commitWithRetry(sess)
self.endSession(sess)
def saveTimeSeries(self, runId, regionId, paramName, values, units=None, session=None):
sess = session or self.Session()
programId = self.getProgramId(GCAM_PROGRAM)
# one() raises error if 0 or more than 1 row is found, otherwise returns a tuple.
try:
row = sess.query(Output).filter(Output.programId == programId, Output.name == paramName).one()
except Exception:
_logger.error("Can't find param %s for %s", paramName, GCAM_PROGRAM)
raise
outputId = row.outputId
ts = TimeSeries(runId=runId, outputId=outputId, regionId=regionId, units=units)
for name, value in iteritems(values): # Set the values for "year" columns
setattr(ts, name, value)
sess.add(ts)
if not session:
sess.commit()
self.endSession(sess)
def getTimeSeries(self, simId, paramName, expList):
'''
Retrieve all timeseries rows for the given simId and paramName.
:param simId: simulation ID
:param paramName: name of output parameter
:param expList: (list of str) the names of the experiments to select
results for.
:return: list of TimeSeries tuples or None
'''
cols = ['seriesId', 'runId', 'outputId', 'units'] + self.yearCols()
with self.sessionScope() as session:
query = session.query(TimeSeries, Experiment.expName).options(load_only(*cols)). \
join(Run).filter_by(simId=simId).filter_by(status='succeeded'). \
join(Experiment).filter(Experiment.expName.in_(expList)). \
join(Output).filter_by(name=paramName)
rslt = query.all()
return rslt
# Single instance of the class. Use 'getDatabase' constructor
# to ensure that this instance is returned if already created.
_DbInstance = None
def getDatabase(checkInit=True):
'''
Return the instantiated CoreDatabase, or created one and return it.
The optional dbClass argument is provided to facilitate subclassing.
'''
return GcamDatabase.getDatabase(checkInit=checkInit)
def dropTable(tableName, meta):
if tableName in meta.tables:
# Drop the table if it exists and remove it from the metadata
table = meta.tables[tableName]
table.drop()
meta.remove(table)
def canonicalizeRegion(name):
'''
Return the canonical name for a region, normalizing the use of capitalization
and underscores.
:param name: a GCAM region name
:return: region name in canonical format, i.e., lowercase with underscores
changed to spaces. (The use of underscores is inconsistent and thus hard
to remember, e.g., 'South America_Northern')
'''
name = name.lower()
if name in RegionAliases:
name = RegionAliases[name]
return name.replace('_', ' ')
|
from rest_framework.serializers import (
ModelSerializer,
)
from articles.models import Articles
class ArticleSerializer(ModelSerializer):
class Meta:
model = Articles
fields = [
'id',
'title',
'body',
'img',
]
def create(self, validated_data):
title = validated_data['title']
body = validated_data['body']
img = validated_data['img']
article = Articles(
title=title,
body=body,
img=img,
)
article.save()
return validated_data
|
import pandas as pd
class File:
def __init__(self, fs, filename, bucket):
self.headers, self.order, self.temp_values, self.fs, self.filename, self.bucket = {},{},{},fs,filename,bucket
self.process()
def process(self):
if self.filename != None:
with self.fs.open(self.bucket + '/' + self.filename, 'rb') as f:
model = pd.read_csv(f)
stats = model.describe()
for header in model.columns:
current_stat = stats[header]
self.headers[header] = {'min': current_stat['min'], 'max': current_stat['max'], 'mean': current_stat['mean'], 'std': current_stat['std'],'n': current_stat['count']} |
import argparse
import datetime
import json
from pathlib import Path
from tempfile import TemporaryDirectory
from tqdm import tqdm
from cltl.brain import LongTermMemory
def readCapsuleFromFile(jsonfile):
f = open(jsonfile, )
scenario = json.load(f)
return scenario
def main(log_path):
# Create brain connection
brain = LongTermMemory(address="http://localhost:7200/repositories/sandbox",
log_dir=log_path,
clear_all=False)
# Read scenario from file
scenario_file_name = 'carlani-4.json'
scenario_json_file = 'capsules/' + scenario_file_name
scenario = readCapsuleFromFile(scenario_json_file)
for capsule in tqdm(scenario['scenario']):
capsule['date'] = datetime.datetime.strptime(capsule['date'], "%Y:%m:%d")
if capsule['speech-act'] == 'statement':
x = brain.update(capsule, reason_types=True, create_label=True)
print(f'\n{capsule["triple"]}\n---------------------------------------------------------------\n\n')
else:
sa = capsule['speech-act']
print(f'\n{sa}\n---------------------------------------------------------------\n\n')
print(capsule)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Carl-Leolani scenario')
parser.add_argument('--logs', type=str,
help="Directory to store the brain log files. Must be specified to persist the log files.")
args, _ = parser.parse_known_args()
if args.logs:
main(Path(args.logs))
else:
with TemporaryDirectory(prefix="brain-log") as log_path:
main(Path(log_path))
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Logging utilities, for use with the standard logging module."""
import logging
def InitLogging(verbose_count):
"""Ensures that the logger (obtained via logging.getLogger(), as usual) is
initialized, with the log level set as appropriate for |verbose_count|
instances of --verbose on the command line."""
assert(verbose_count >= 0)
if verbose_count == 0:
level = logging.WARNING
elif verbose_count == 1:
level = logging.INFO
else: # verbose_count >= 2
level = logging.DEBUG
logging.basicConfig(format="%(relativeCreated).3f:%(levelname)s:%(message)s")
logger = logging.getLogger()
logger.setLevel(level)
logger.debug("Initialized logging: verbose_count=%d, level=%d" %
(verbose_count, level))
|
import pytest # NOQA
from SuzuriTest import SuzuriTest
class TestClass(SuzuriTest):
@pytest.mark.parametrize('productId, limit, offset', [
(2737748, 30, 100),
])
def test_getFavoriteOfProduct(self, client, productId, limit, offset):
r = client.getFavoriteOfProduct(productId, limit, offset)
assert r.status_code == 200
@pytest.mark.parametrize('userId', [
(217290),
])
def test_getFavoriteOfUser(self, client, userId):
r = client.getFavoriteOfUser(userId)
assert r.status_code == 200
@pytest.mark.parametrize('productId, annonymouse', [
(7, True),
])
def test_createFavoriteToProduct(self, client, productId, annonymouse):
r = client.createFavoriteToProduct(productId, annonymouse)
assert r.status_code == 200
|
import itertools
import json
import multiprocessing
import re
import networkx as nx
from quantlaw.utils.beautiful_soup import create_soup
from quantlaw.utils.files import list_dir
from quantlaw.utils.networkx import multi_to_weighted
from statics import DE_DECISIONS_NETWORK, DE_DECISIONS_REFERENCE_PARSED_XML
def count_characters(text, whites=False):
if whites:
return len(text)
else:
return len(re.sub(r"\s", "", text))
def count_tokens(text, unique=False):
if not unique:
return len(text.split())
else:
return len(set(text.split()))
def get_graph_data_from_decision(decision):
try:
soup = create_soup(f"{DE_DECISIONS_REFERENCE_PARSED_XML}/{decision}")
items = list(soup.find_all(["document", "item", "seqitem"]))
node_dicts = []
containment_edges = []
for item in items:
node_dict = dict(
key=item.attrs["key"],
heading=item.attrs.get("heading", ""),
level=int(item.attrs["level"]),
type=item.name,
)
text = item.get_text(" ")
node_dict["chars_n"] = count_characters(text, whites=True)
node_dict["chars_nowhites"] = count_characters(text, whites=False)
node_dict["tokens_n"] = count_tokens(text, unique=False)
node_dict["tokens_unique"] = count_tokens(text, unique=True)
if item.name == "document":
for key in ["az", "gericht", "datum", "doktyp", "spruchkoerper"]:
node_dict[key] = item.attrs.get(key, "")
parent_key = "root"
else:
node_dict["parent_key"] = item.parent.attrs["key"]
parent_key = item.parent.attrs["key"]
node_dicts.append(node_dict)
containment_edges.append((parent_key, item.attrs["key"]))
reference_edges = []
for item in items:
for node in item.find_all("reference"):
if (
node.lawname
and "parsed" in node.attrs
and node.lawname.get("type")
in [
"dict",
"sgb",
]
):
refs = json.loads(node.attrs["parsed"])
for ref in refs:
ref_key = "_".join(ref[:2])
reference_edges.append((item.attrs["key"], ref_key))
except Exception:
print(decision)
raise
return node_dicts, containment_edges, reference_edges
def network():
decisions = list_dir(DE_DECISIONS_REFERENCE_PARSED_XML, ".xml")
with multiprocessing.Pool() as p:
results = p.map(get_graph_data_from_decision, decisions)
node_dicts = list(itertools.chain.from_iterable([x[0] for x in results]))
containment_edges = list(itertools.chain.from_iterable([x[1] for x in results]))
reference_edges = list(itertools.chain.from_iterable([x[2] for x in results]))
hierarchy_G = nx.DiGraph()
hierarchy_G.add_node("root", level=-1, key="root", bipartite="decision")
hierarchy_G.add_nodes_from(
[(x["key"], x) for x in node_dicts], bipartite="decision"
)
hierarchy_G.add_edges_from(containment_edges, edge_type="containment")
reference_G = nx.MultiDiGraph(hierarchy_G)
print("created")
reference_G.add_nodes_from(
sorted({x[-1] for x in reference_edges}), bipartite="statute"
)
print("Statute nodes added")
reference_G.add_edges_from(reference_edges, edge_type="reference")
print("Reference edges added")
reference_weighted_G = multi_to_weighted(reference_G)
nx.write_gpickle(reference_weighted_G, DE_DECISIONS_NETWORK)
|
import sublime_plugin
from .scopes import *
from .parse import get_sub_scopes
class MenuNode():
"""
Attributes:
view - sublime view into text buffer
scope - scope object associated with this node
parent - the parent of this node in the hierarchy tree.
this node is root if parent is None
children - dictionary of scope type strings to a
list of menu nodes. These nodes are the
children of this node in the hierarchy tree
"""
def __init__(self, view, scope, parent=None):
self._view = view
self._scope = scope
self._parent = parent
# Dictonary of scope types to a list of nodes of that type
self._children = {scope_type: list() for scope_type in scope_types}
# TODO: do something with the _children.library_scope_type
def add_child(self, node):
"""
Appends node to the children list corresponding to
the node's type.
Parameters:
node - MenuNode whose scope region is within the region
of this menu node's scope region
"""
assert is_valid_type(node.scope.type)
self._children[node.scope.type].append(node)
def get_children(self, child_type=None):
"""
Returns list of children nodes of the specified scope type.
If scope type not given, returns list of available scope types;
these are the children scope types that map to a non-empty list
Parameters:
child_type - indicates which node list to return.
If None, returns available scope types.
"""
# Checks whether the child_type is valid and if any children
# of this type exists
if (child_type and (child_type in self._children) and
len(self._children[child_type])):
return self._children[child_type]
# Returns a list of all available children types.
# Does nothing if scope contains no children types.
if not child_type:
children_types = list()
for key, value in self._children.items():
if len(value):
children_types.append(key)
if len(children_types):
return children_types
return None
@property
def name(self):
return self._scope._name
@property
def parent(self):
return self._parent
@property
def scope(self):
return self._scope
@property
def is_global_node(self):
return self._parent is None
def get_libraries(view, parent):
# TODO ignore #include's that are under subscopes
# Look for libraries in view to make into scopes
result = []
lib_rgns = view.find_all(Library.regex_pattern)
for rgn in lib_rgns:
result.append(Library(view, rgn))
return result
def get_hierarchy_tree(view, node=None):
if not node:
node = MenuNode(view=view,
scope=GlobalScope(view))
# Base case: node is of scope type that cannot
# have children
if not node.scope.can_have_subscopes:
return node
subscopes = get_sub_scopes(view, node.scope.definition_region)
# Base case: node has no children
if not subscopes:
return node
# Add children nodes
for subscope in subscopes:
child_node = get_hierarchy_tree(view, MenuNode(view,
subscope,
parent=node))
node.add_child(child_node)
# get libraries
# if global scope:
if not node.parent:
libs = get_libraries(view, node)
for lib in libs:
node.add_child(MenuNode(view, lib, node))
return node
# Test
class MenuCommand(sublime_plugin.TextCommand):
# Preorder traversal
def print_hierarchy(self, node, prefix=""):
print(prefix + node.name)
# Base case: node is of scope type that cannot
# have children
if not node.scope.can_have_subscopes:
return
# Base case: node has no children
available_scope_types = node.get_children()
if not available_scope_types:
return
for scope_type in available_scope_types:
children = node.get_children(scope_type)
for child_node in children:
self.print_hierarchy(node=child_node, prefix=(prefix + "\t"))
def run(self, edit):
root = get_hierarchy_tree(self.view)
self.print_hierarchy(node=root)
# print(root.scope.definition_region)
# print(root.scope.name)
# print(root.get_children())
|
class CP2Kinput:
def __init__(self, version, year, compile_date, compile_revision):
self.version = version
self.year = year
self.compileDate = compile_date
self.compileRevision = compile_revision
@staticmethod
def __check_input_attribs(lxml_root):
root_attributes = []
for c in lxml_root.iterchildren():
if c.tag not in root_attributes:
root_attributes.append(c.tag)
return root_attributes
|
import ctypes
import os
import six
from cupy import cuda
MAX_NDIM = 25
def _make_carray(n):
class CArray(ctypes.Structure):
_fields_ = (('data', ctypes.c_void_p),
('size', ctypes.c_int),
('shape', ctypes.c_int * n),
('strides', ctypes.c_int * n))
return CArray
_carrays = [_make_carray(i) for i in six.moves.range(MAX_NDIM)]
def to_carray(data, size, shape, strides):
return _carrays[len(shape)](data, size, shape, strides)
def _make_cindexer(n):
class CIndexer(ctypes.Structure):
_fields_ = (('size', ctypes.c_int),
('shape', ctypes.c_int * n),
('index', ctypes.c_int * n))
return CIndexer
_cindexers = [_make_cindexer(i) for i in six.moves.range(MAX_NDIM)]
def to_cindexer(size, shape):
return _cindexers[len(shape)](size, shape, (0,) * len(shape))
class Indexer(object):
def __init__(self, shape):
size = 1
for s in shape:
size *= s
self.shape = shape
self.size = size
@property
def ndim(self):
return len(self.shape)
@property
def ctypes(self):
return to_cindexer(self.size, self.shape)
_header_source = None
def _get_header_source():
global _header_source
if _header_source is None:
header_path = os.path.join(os.path.dirname(__file__), 'carray.cuh')
with open(header_path) as header_file:
_header_source = header_file.read()
return _header_source
def compile_with_cache(source, options=(), arch=None, cachd_dir=None):
source = _get_header_source() + source
return cuda.compile_with_cache(source, options, arch, cachd_dir)
|
# Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Optional, Tuple, Any, Callable, Iterable
import numpy as np
import jax
from jax import numpy as jnp
from flax import linen as nn
from netket.hilbert import AbstractHilbert
from netket.graph import AbstractGraph, SymmGroup
from netket.utils import HashableArray
from netket.utils.types import PRNGKeyT, Shape, DType, Array, NNInitFunc
from netket import nn as nknn
from netket.nn.initializers import lecun_complex, zeros, variance_scaling
class GCNN(nn.Module):
"""Implements a Group Convolutional Neural Network (G-CNN) that outputs a wavefunction
that is invariant over a specified symmetry group.
The G-CNN is described in ` Cohen et. {\it al} <http://proceedings.mlr.press/v48/cohenc16.pdf>`_
and applied to quantum many-body problems in ` Roth et. {\it al} <https://arxiv.org/pdf/2104.05085.pdf>`_.
The G-CNN alternates convolution operations with pointwise non-linearities. The first
layer is symmetrized linear transform given by DenseSymm, while the other layers are
G-convolutions given by DenseEquivariant. The hidden layers of the G-CNN are related by
the following equation:
.. math ::
{\bf f}^{i+1}_h = \Gamma( \sum_h W_{g^{-1} h} {\bf f}^i_h).
"""
symmetries: Union[HashableArray, SymmGroup]
"""A group of symmetry operations (or array of permutation indices) over which the layer should be invariant.
Numpy/Jax arrays must be wrapped into an :class:`netket.utils.HashableArray`.
"""
layers: int
"""Number of layers (not including sum layer over output)."""
features: Union[Tuple, int]
"""Number of features in each layer starting from the input. If a single number is given,
all layers will have the same number of features."""
flattened_product_table: Optional[HashableArray] = None
"""Flattened product table generated by SymmGroup.product_table().ravel()
that specifies the product of the group with its inverse"""
dtype: Any = np.float64
"""The dtype of the weights."""
activation: Any = jax.nn.selu
"""The nonlinear activation function between hidden layers."""
output_activation: Any = None
"""The nonlinear activation before the output."""
use_bias: bool = True
"""if True uses a bias in all layers."""
precision: Any = None
"""numerical precision of the computation see `jax.lax.Precision`for details."""
kernel_init: NNInitFunc = variance_scaling(1.0, "fan_in", "normal")
"""Initializer for the Dense layer matrix."""
bias_init: NNInitFunc = zeros
"""Initializer for the hidden bias."""
def setup(self):
self.n_symm = np.asarray(self.symmetries).shape[0]
if self.flattened_product_table is None and not isinstance(
self.symmetries, SymmGroup
):
raise AttributeError(
"product table must be specified if symmetries are given as an array"
)
if self.flattened_product_table is None:
flat_pt = HashableArray(self.symmetries.product_table().ravel())
else:
flat_pt = self.flattened_product_table
if not np.asarray(flat_pt).shape[0] == np.square(self.n_symm):
raise ValueError("Flattened product table must have shape [n_symm*n_symm]")
if isinstance(self.features, int):
feature_dim = [self.features for layer in range(self.layers)]
else:
if not len(self.features) == self.layers:
raise ValueError(
"""Length of vector specifying feature dimensions must be the same as the number of layers"""
)
feature_dim = self.features
self.dense_symm = nknn.DenseSymm(
symmetries=self.symmetries,
features=feature_dim[0],
dtype=self.dtype,
use_bias=self.use_bias,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
precision=self.precision,
)
self.equivariant_layers = [
nknn.DenseEquivariant(
symmetry_info=flat_pt,
in_features=feature_dim[layer],
out_features=feature_dim[layer + 1],
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
kernel_init=self.kernel_init,
bias_init=self.bias_init,
)
for layer in range(self.layers - 1)
]
@nn.compact
def __call__(self, x_in):
x = self.dense_symm(x_in)
for layer in range(self.layers - 1):
x = self.activation(x)
x = self.equivariant_layers[layer](x)
if not self.output_activation == None:
x = self.output_activation(x)
# variance scaling for output layer
x = jnp.sum(x, axis=-1) / np.sqrt(x.shape[-1])
return x
|
from utils import pmts
from collections import namedtuple
from kivy.clock import Clock
from kivy.core.text import Label
from kivy.graphics import Color, Rectangle
from kivy.uix.widget import Widget
from kivy.metrics import pt
from kivy.uix.behaviors.focus import FocusBehavior
from annotations import Annotation
from channel import Channel, ClosableChannel
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
TextInsert,
TextReplace,
SwapSibbling,
)
from dsn.editor.construct import edit_note_play
from dsn.editor.structure import EditStructure
from dsn.pp.clef import PPUnset, PPSetSingleLine, PPSetMultiLineAligned, PPSetMultiLineIndented
from dsn.pp.construct import construct_pp_tree
from dsn.pp.in_context import (
construct_iri_top_down,
InheritedRenderingInformation,
IriAnnotatedSExpr,
MULTI_LINE_ALIGNED,
MULTI_LINE_INDENTED,
)
from s_address import node_for_s_address
from spacetime import t_address_for_s_address, best_s_address_for_t_address, get_s_address_for_t_address
from dsn.s_expr.clef import Note
from dsn.s_expr.structure import Atom, List
from dsn.s_expr.construct import play_score
from dsn.s_expr.score import Score
from dsn.s_expr.utils import bubble_history_up
from vim import Vim, DONE_SAVE, DONE_CANCEL
from widgets.utils import (
annotate_boxes_with_s_addresses,
apply_offset,
cursor_dimensions,
from_point,
no_offset,
BoxNonTerminal,
BoxTerminal,
bring_into_offset,
OffsetBox,
X,
Y,
)
from widgets.layout_constants import (
get_font_size,
set_font_size,
PADDING,
MARGIN,
)
from colorscheme import (
AQUA_GREEN,
BLACK,
LAUREL_GREEN,
WHITE,
)
from dsn.viewports.structure import ViewportStructure, VRTC, ViewportContext
from dsn.viewports.construct import play_viewport_note
from dsn.viewports.clef import (
ViewportContextChange,
MoveViewportRelativeToCursor,
CURSOR_TO_BOTTOM,
CURSOR_TO_CENTER,
CURSOR_TO_TOP,
ELSEWHERE,
HERE,
VIEWPORT_LINE_DOWN,
VIEWPORT_LINE_UP,
)
from dsn.selection.clef import AttachDetach, SwitchToOtherEnd, ClearSelection, SelectionContextChange
from dsn.selection.construct import selection_note_play
from dsn.selection.structure import Selection
# TSTTCPW for keeping track of the state of our single-line 'vim editor'
VimDS = namedtuple('VimDS', (
'insert_or_replace', # "I", "R"
's_address',
'vim' # a vim.py object
))
# These are standard Python (and common sense); still... one might occasionally be tempted to think that 'before' is
# moddeled as -1 rather than 0, which is why I made the correct indexes constants
INSERT_BEFORE = 0
INSERT_AFTER = 1
class TreeWidget(FocusBehavior, Widget):
def __init__(self, **kwargs):
# The we keep track of whether we received a "closed" signal from the history_channel; if so we turn grey and
# immutable. (the latter is implemented, for now, by simply no longer listening to the keyboard).
#
# Another option would be to stay mutable (though make it obvious that the changes will not propagate), but not
# communicate any information back to the (closed) history_channel. A problem with that (in the nerf0
# architecture) is that "Possibilities" flow over the same channel. This is not possible once the channel is
# closed, and we'll fail to fetch hashes back from the shared HashStoreChannelListener.
self._invalidated = False
self.closed = False
self.m = kwargs.pop('m')
# Note that we are mis-using history_channel somewhat. The idea of "channels" implies that there is a single
# channel, that mutiple receivers may tune into, on which messages of a certain single type are being broadcast.
# These channels are so constructed that the sender itself does not get its own message back, but all others do.
# Here, there are only 2 participants, that we might call a "parent" and a "child"; (we might use such
# terminology, because we always construct the actual parent, child pairs in a tree-like structure only)
#
# In our present configuration, such parent/child pairs are:
# * parent: the file (on disk); child: the "main tree".
# * parent: the tree in which a new window was created (pressing "n"); child: that new window.
#
# Because there are only 2 participants in each usage of history-channel, each send message has only one receive
# handler that accepts its messages. And that opens up the possibility for a different, non-designed usage of
# the channel: one in which the messages sent over the channel by the 2 participants are not of the same type.
# This non-designed usage has become the actual usage:
# * Down the hierarchy (parent to child) we send scores.
# * Up the hierarchy (child to parent) we send individual notes.
# Refactoring directions could be: [a] harmonizing the sent messages [b] replacing the channel with simple
# send/reive pairs. For now, we leave this note as the caveat.
self.history_channel = kwargs.pop('history_channel')
super(TreeWidget, self).__init__(**kwargs)
# There is no (proper) initial EditStructure, e.g. there is no initial tree. This lack of structure is assumed
# to be filled "immediately" after __init__, by some notes flowing in over the connected channels.
# As an implication of this, some of the tree-dependent datastructures are in an initally-uninitialized state
# too, e.g. viewport_ds has meaningful ViewportContext, because we don't know it yet
self.ds = EditStructure(None, [], [], None)
self.vim_ds = None
# at some point, we should generalize over "next keypress handlers" such as vim_ds & z_pressed
self.z_pressed = False
self.viewport_ds = ViewportStructure(
ViewportContext(0, 0, 0, 0),
VRTC(0), # The viewport starts out with the cursor on top.
)
self.selection_ds = Selection(
context=self.ds,
exists=False, # no initial selection
attached_to=None,
edge_0=None,
edge_1=None,
)
self.notify_children = {}
self.next_channel_id = 0
self.cursor_channel = Channel()
# See remarks about `history_channel` above
self.send_to_channel, _ = self.history_channel.connect(self.receive_from_channel, self.channel_closed)
self.bind(pos=self.invalidate)
self.bind(size=self.size_change)
self.bind(focus=self.on_focus_change)
# ## Section for channel-communication
def receive_from_channel(self, data):
pmts(data, Score)
t_cursor = t_address_for_s_address(self.ds.tree, self.ds.s_cursor)
new_tree = play_score(self.m, data)
# refetching the s_cursor via t_cursor ensures the current cursor is unaffected by changes in other windows.
s_cursor = best_s_address_for_t_address(new_tree, t_cursor)
self._update_internal_state_for_score(data, s_cursor, ELSEWHERE)
def channel_closed(self):
self.closed = True
self._construct_box_structure()
self._update_viewport_for_change(change_source=ELSEWHERE)
self.invalidate()
def broadcast_cursor_update(self, t_address):
self.cursor_channel.broadcast(t_address)
def _handle_edit_note(self, edit_note):
new_s_cursor, not_quite_score, error = edit_note_play(self.ds, edit_note)
# While converting from nerf0 to nerf1, I had some worries about the right shape for edit_note_play's output in
# terms of a score. (In nerf0 edit_note_play's output was posacts, but I judged that inelegant in the nerf1
# setup). The question is: what _is_ elegant then?
# For now, I've chosen: edit_note_play communicates a list of new notes; the present location is responsible for
# connecting those together into a new score. Why this solution? It at least has the advantage that it
# automatically means that playing edit notes can lead to extensions of our root node's score only.
# By the way, one could wonder where in nerf0 the 'latest nout hash' was stored as part of the Tree's DS, since
# it's basically the single source of truth for "full history" and . Answer: it was in the rootnode's metadata.
# in nerf0 bubble_history_up simply looked at the root_node and created an actuality out of that node's
# nout_hash and any new information.
score = self.ds.tree.score
for note in not_quite_score:
self.send_to_channel(note)
score = score.slur(note)
self._update_internal_state_for_score(score, new_s_cursor, change_source=HERE)
def _handle_selection_note(self, selection_note):
self.selection_ds = selection_note_play(selection_note, self.selection_ds)
# Selection changes may affect the main structure (i.e. if the selection changes the cursor_position). This
# information flows back into the main structure here (which is also why change_source=HERE)
self.ds = self.selection_ds.context
self._construct_box_structure()
self._update_viewport_for_change(change_source=HERE)
self.invalidate()
def _update_selection_ds_for_main_ds(self):
# SelectionContextChange does not affect the main structure:
# In the general case, playing SelectionNotes may affect the main structure. (E.g. moving a selection affects
# the structure it's operating on). Playing the note SelectionContextChange, however, does by definition _not_
# affect the main structure: it represents precisely the case in which we notify the selection that the
# surrounding context has changed. `selection_note_play(SelectionContextChange...` is the only case of
# selection_note_play which needs not be followed by handling of state-changes to the wrapped main structure.
self.selection_ds = selection_note_play(SelectionContextChange(self.ds), self.selection_ds)
def _update_internal_state_for_score(self, score, new_s_cursor, change_source):
new_tree = play_score(self.m, score)
self.ds = EditStructure(
new_tree,
new_s_cursor,
self.ds.pp_annotations[:],
construct_pp_tree(new_tree, self.ds.pp_annotations)
)
self._update_selection_ds_for_main_ds()
self._construct_box_structure()
self._update_viewport_for_change(change_source=change_source)
self.invalidate()
for notify_child in self.notify_children.values():
notify_child()
# TODO we only really need to broadcast the new t_cursor if it has changed.
self.broadcast_cursor_update(t_address_for_s_address(self.ds.tree, self.ds.s_cursor))
def keyboard_on_textinput(self, window, text):
FocusBehavior.keyboard_on_textinput(self, window, text)
self.generalized_key_press(text)
return True
def keyboard_on_key_down(self, window, keycode, text, modifiers):
FocusBehavior.keyboard_on_key_down(self, window, keycode, text, modifiers)
code, textual_code = keycode
if modifiers == ['ctrl'] and textual_code in ['e', 'y']:
# For now, these are the only ctrl-key keys we handle; once we get more of those, they should get a better
# home.
note = MoveViewportRelativeToCursor({'e': VIEWPORT_LINE_UP, 'y': VIEWPORT_LINE_DOWN}[textual_code])
self.viewport_ds = play_viewport_note(note, self.viewport_ds)
self.invalidate()
return True
also_on_textinput = (
[chr(ord('a') + i) for i in range(26)] + # a-z
[chr(ord('0') + i) for i in range(10)] + # 0-9
['`', '-', '=', ',', '.', '?', '/', "'", ':', ';', '\\', 'spacebar', 'tab', ']', '['])
# these are modifier-keys; we don't independently deal with them, so we ignore them explicitly.
# on my system, right-alt and right-super are not recognized at present; they show up as '' here;
# (their keycodes are respectively 1073741925 and 1073742055)
modifier_keys = ['alt', 'alt-gr', 'lctrl', 'rctrl', 'rshift', 'shift', 'super', '']
if textual_code not in modifier_keys + also_on_textinput:
self.generalized_key_press(textual_code)
return True
def keyboard_on_key_up(self, window, keycode):
"""FocusBehavior automatically defocusses on 'escape'. This is undesirable, so we override without providing any
behavior ourselves."""
return True
def generalized_key_press(self, textual_code):
"""
Kivy's keyboard-handling is lacking in documentation (or I cannot find it).
Some (partially) open questions are:
Q: what are the possible values for keyboard_on_key_down's `keycode` parameter?
A (partial): It's a tuple: code, textual_code = keycode; code is a number, textual_code is a textual
representation of it (which, according to the source, is stolen from pygame, although I cannot find the
original source)
A (partial): as far as I understand it: the system's keycode gets passed straight to keyboard_on_key_down, via
kivy.code.Window; Same for keyboard_on_textinput, but with text in that case.
Q: What is the relationship between keyboard_on_textinput and keyboard_on_key_down?
A (partial): https://groups.google.com/forum/#!topic/kivy-users/iYwK2uBOZPM
I need at least the following:
1. know when the alpha-numeric & interpunction keys are pressed.
2. the combined effect of combining shift (sometimes this is .upper; but note that this is not so for digits)
with such a key.
3. know that "special keys" such as the cursor keys and escape are pressed.
keyboard_on_key_down provides us with 1 & 3, but not 2; keyboard_on_textinput provides us with 1 & 2 but not 3.
In cases where keyboard_on_key_down provides us with insufficient information, which has an equivalent in
keyboard_on_textinput we ignore it. This is done by explicitly enumerating those cases.
I have _no idea_ how this generalizes to any keyboard other than the one I happen to be typing on... But for
now I'm just going to push forward to something that I _do_ understand (and is documented) so that we can at
least build on that.
"""
if self.vim_ds is not None:
self.vim_ds.vim.send(textual_code)
if self.vim_ds.vim.done == DONE_SAVE:
self.apply_and_close_vim()
elif self.vim_ds.vim.done == DONE_CANCEL:
self.vim_ds = None
self._construct_box_structure()
self._update_viewport_for_change(change_source=HERE)
self.invalidate()
return
if self.z_pressed:
self.z_pressed = False
if textual_code in ['z', 'b', 't']:
lookup = {
'z': CURSOR_TO_CENTER,
'b': CURSOR_TO_BOTTOM,
't': CURSOR_TO_TOP,
}
note = MoveViewportRelativeToCursor(lookup[textual_code])
self.viewport_ds = play_viewport_note(note, self.viewport_ds)
self.invalidate()
elif textual_code in ['z']:
self.z_pressed = True
elif textual_code in ['-']:
# quick & dirty all-around
set_font_size(get_font_size() - 1)
self.m.texture_for_text = {}
self.invalidate()
elif textual_code in ['+']:
# quick & dirty all-around
set_font_size(get_font_size() + 1)
self.m.texture_for_text = {}
self.invalidate()
elif textual_code in ['left', 'h']:
self._handle_edit_note(CursorParent())
elif textual_code in ['right', 'l']:
self._handle_edit_note(CursorChild())
elif textual_code in ['up', 'k']:
self._handle_edit_note(CursorDFS(-1))
elif textual_code in ['down', 'j']:
self._handle_edit_note(CursorDFS(1))
if self.closed:
# See the remarks in __init__
return
if textual_code in ['n']:
self._create_child_window()
elif textual_code in ['q']:
self._add_sibbling_text(INSERT_BEFORE)
elif textual_code in ['w']:
self._add_child_text()
elif textual_code in ['e']:
self._add_sibbling_text(INSERT_AFTER)
elif textual_code in ['a']:
self._handle_edit_note(InsertNodeSibbling(INSERT_BEFORE))
elif textual_code in ['s']:
self._handle_edit_note(InsertNodeChild())
elif textual_code in ['d']:
self._handle_edit_note(InsertNodeSibbling(INSERT_AFTER))
elif textual_code in ['A', 'S', 'D']:
if not self.selection_ds.exists:
return
if textual_code in ['A']:
self._handle_edit_note(
MoveSelectionSibbling(self.selection_ds.edge_0, self.selection_ds.edge_1, INSERT_BEFORE))
elif textual_code in ['S']:
self._handle_edit_note(
MoveSelectionChild(self.selection_ds.edge_0, self.selection_ds.edge_1))
elif textual_code in ['D']:
self._handle_edit_note(MoveSelectionSibbling(
self.selection_ds.edge_0, self.selection_ds.edge_1, INSERT_AFTER))
elif textual_code in ['x', 'del']:
self._handle_edit_note(EDelete())
# All the keys I've picked so far are quite arbitrary, and will at some point become configurable. Admittedly,
# the 3 keys below are the worst choices so far.
elif textual_code in ['u', 'i', 'o', 'p']:
pp_map = {
'u': PPUnset,
'i': PPSetSingleLine,
'o': PPSetMultiLineAligned,
'p': PPSetMultiLineIndented,
}
pp_note_type = pp_map[textual_code]
self._change_pp_style(pp_note_type)
elif textual_code in ['K']:
self._handle_edit_note(SwapSibbling(-1))
elif textual_code in ['J']:
self._handle_edit_note(SwapSibbling(1))
elif textual_code in ['<']:
self._handle_edit_note(LeaveChildrenBehind())
elif textual_code in ['>']:
self._handle_edit_note(EncloseWithParent())
elif textual_code in ['v']:
self._handle_selection_note(AttachDetach())
elif textual_code in ['V']:
self._handle_selection_note(SwitchToOtherEnd())
elif textual_code in ['b']:
self._handle_selection_note(ClearSelection())
def on_focus_change(self, widget, focus):
if not focus and self.vim_ds is not None:
# When defocussing, we close the current Vim mode (if any). The reasoning is as such: while in vim-mode we
# are "in between states"; i.e. we're not in any state represented in the s_expr clef.
# When we defocus (and go to a different "window"), such a window may produce new notes through its channel,
# which we must then handle. It not immediately clear how to do that from the intermediate state. Also: from
# the UX perspective it "feels natural" (i.e. is a common pattern) that a switch-away removes some aspects
# of the cursor. I.e. compare how Text inputs stop blinking their carret when they're not in focus.
# This solution may in fact not be a permanent one: it relies on the assumption that being in focus is
# directly related to being the only source of new notes. Such an assumption may break when implementing a
# "distributed editor", in which case such notes may come in at any given time.
# Thoughts about problems & solutions for such situations, once they would occur:
# Problems:
# * Some notes may change the text that's also currently being edited by the vim mode.
# * Some notes may affect where the cursor is (e.g. an insert in the parent with a lower index than yours)
# Potential solutions:
# * fully block while in this funnny mode
# * Using t-addresses to denote where vim-mode is. (this is harder for insertions, because they don't have
# a t-address yet; a solution to that could be: insertions are alway immedeate, followed by an edit)
self.apply_and_close_vim()
def apply_and_close_vim(self):
"""apply the vim_ds, and close it:"""
if self.vim_ds.insert_or_replace == "I":
# At present, the editor's clef's TextInsert has an interface very similar to the s_expression's clef's
# BecomeAtom, namely: (address, index, text). This means we're doing a split of the vim_ds.s_address at the
# present point. Alternatively, we could change TextInsert to have a single s_address and apply the split at
# the point of construction.
self._handle_edit_note(
TextInsert(self.vim_ds.s_address[:-1], self.vim_ds.s_address[-1], self.vim_ds.vim.text))
else:
self._handle_edit_note(TextReplace(self.vim_ds.s_address, self.vim_ds.vim.text))
self.vim_ds = None
def _change_pp_style(self, pp_note_type):
t_address = t_address_for_s_address(self.ds.tree, self.ds.s_cursor)
pp_note = pp_note_type(t_address)
annotation = Annotation(self.ds.tree.score, pp_note)
pp_annotations = self.ds.pp_annotations[:] + [annotation]
pp_tree = construct_pp_tree(self.ds.tree, pp_annotations)
self.ds = EditStructure(
self.ds.tree,
self.ds.s_cursor,
pp_annotations,
pp_tree,
)
self._update_selection_ds_for_main_ds()
self._construct_box_structure()
# change_source=ELSEWHERE: this matches with the desirable behavior: you want the cursor to stay in place, and
# revolve the layout-changes around it.
self._update_viewport_for_change(change_source=ELSEWHERE)
self.invalidate()
def _child_channel_for_t_address(self, t_address):
child_channel = ClosableChannel()
channel_id = self.next_channel_id
self.next_channel_id += 1
def receive_from_child(data):
pmts(data, Note)
s_address = get_s_address_for_t_address(self.ds.tree, t_address)
if s_address is None:
# the child represents dead history; its updates are silently ignored.
# in practice this "shouldn't happen" in the current version, because closed children no longer
# communicate back to us.
return
note = bubble_history_up(data, self.ds.tree, s_address)
# TODO: new_s_cursor should be determined by looking at the pre-change tree, deducing a t_cursor and
# then setting the new s_cursor based on the t_cursor and the new tree; this is made more
# complicated because of the current choices in methods (s_cursor-setting integrated w/
# tree-creation)
# NERF-1 worry: in nerf0 we communicated over the child channel using "any nout_hash", i.e. potentially a
# full new history. Here we assume note-by-note instead (which implies: history is consecutive). The worry
# is: this only works if the child channel faithfully communicates all notes in order.
score = self.ds.tree.score.slur(note)
self._update_internal_state_for_score(score, self.ds.s_cursor, change_source=ELSEWHERE)
def receive_close_from_child():
del self.notify_children[channel_id]
send_to_child, close_child = child_channel.connect(receive_from_child, receive_close_from_child)
def notify_child():
# Optimization (and: mental optimization) notes: The present version of notify_child takes no arguments.
# It simply looks at the latest version of the tree, calculates the node where the child lives and sends
# that node's hash to the child widget. This also means that we send information to the child when
# really nothing changed.
# However, in practice this function is called precisely when new information about the latest hash (for
# the root node) is available. We could therefore:
# a] figure out the differences between the 2 hashes (in terms of historiography's live & dead
# operations)
# b] figure out which t_addresses are affected by these changes.
# c] send the update-information only to children that listen those addresses.
# (because a change to a child node always affects all its ancesters, there is no need to be smart
# here, it's enough to send the precise match information)
#
# This has the additional advantage of being cleaner for the case of deletions: in the optimized
# algorithm, the deletion is always automatically the last bit of information that happens at a
# particular t_address (further changes cannot affect the address); [caveats may apply for deletions
# that become dead because they are on a dead branch]
s_address = get_s_address_for_t_address(self.ds.tree, t_address)
if s_address is None:
# as it stands, it's possible to call close_child() multiple times (which we do). This is ugly but
# it works (the calls are idempotent)
close_child()
# nothing to send to the child, the child represents dead history
return
node = node_for_s_address(self.ds.tree, s_address)
send_to_child(node.score) # this kind of always-send behavior can be optimized
self.notify_children[channel_id] = notify_child
return child_channel, send_to_child, close_child
def _create_child_window(self):
child_lives_at_t_address = t_address_for_s_address(self.ds.tree, self.ds.s_cursor)
child_channel, _, _ = self._child_channel_for_t_address(child_lives_at_t_address)
cursor_node = node_for_s_address(self.ds.tree, self.ds.s_cursor)
new_widget = self.report_new_tree_to_app(child_channel)
new_widget.receive_from_channel(cursor_node.score)
new_widget.report_new_tree_to_app = self.report_new_tree_to_app
def invalidate(self, *args):
if not self._invalidated:
Clock.schedule_once(self.refresh, -1)
self._invalidated = True
def _update_viewport_for_change(self, change_source):
cursor_position, cursor_size = cursor_dimensions(self.box_structure, self.ds.s_cursor)
# In the below, all sizes and positions are brought into the positive integers; there is a mirroring `+` in the
# offset calculation when we actually apply the viewport.
context = ViewportContext(
document_size=self.box_structure.underlying_node.outer_dimensions[Y] * -1,
viewport_size=self.size[Y],
cursor_size=cursor_size * -1,
cursor_position=cursor_position * -1)
note = ViewportContextChange(
context=context,
change_source=change_source,
)
self.viewport_ds = play_viewport_note(note, self.viewport_ds)
def size_change(self, *args):
self._update_viewport_for_change(change_source=ELSEWHERE)
self.invalidate()
def _construct_box_structure(self):
self.box_structure = annotate_boxes_with_s_addresses(self._nts_for_pp_annotated_node(self.ds.pp_tree), [])
def refresh(self, *args):
"""refresh means: redraw (I suppose we could rename, but I believe it's "canonical Kivy" to use 'refresh')"""
self.canvas.clear()
with self.canvas:
if self.closed:
Color(0.5, 0.5, 0.5, 1)
else:
Color(1, 1, 1, 1)
Rectangle(pos=self.pos, size=self.size,)
self.offset = (self.pos[X], self.pos[Y] + self.size[Y] + self.viewport_ds.get_position())
with apply_offset(self.canvas, self.offset):
self._render_box(self.box_structure.underlying_node)
self._invalidated = False
def on_touch_down(self, touch):
# see https://kivy.org/docs/guide/inputs.html#touch-event-basics
# Basically:
# 1. Kivy (intentionally) does not limit its passing of touch events to widgets that it applies to, you
# need to do this youself
# 2. You need to call super and return its value
ret = super(TreeWidget, self).on_touch_down(touch)
if not self.collide_point(*touch.pos):
return ret
self.focus = True
clicked_item = from_point(self.box_structure, bring_into_offset(self.offset, (touch.x, touch.y)))
if clicked_item is not None:
if self.vim_ds:
# Clicking on any node closes vim; we do this before cursor-set avoid undoing the cursor-set.
# (We don't have a conditional here "only if another node was clicked"; this is because in "Insert" mode
# vimd_ds messes up our addressing. (The real solution is likely: remove the special case for Insert
# mode)
self.apply_and_close_vim()
self._handle_edit_note(CursorSet(clicked_item.annotation))
return True
# ## Edit-actions that need further user input (i.e. Text-edits)
def _add_child_text(self):
cursor_node = node_for_s_address(self.ds.tree, self.ds.s_cursor)
if not isinstance(cursor_node, List):
# edit this text node
self.vim_ds = VimDS("R", self.ds.s_cursor, Vim(cursor_node.atom, 0))
self._construct_box_structure()
self.invalidate()
return
# create a child node, and edit that
index = len(cursor_node.children)
self.vim_ds = VimDS("I", self.ds.s_cursor + [index], Vim("", 0))
self._construct_box_structure()
self.invalidate()
def _add_sibbling_text(self, direction):
if self.ds.s_cursor == []:
return # adding sibblings to the root is not possible (it would lead to a forest)
# because direction is in [0, 1]... no need to minimize/maximize (PROVE!)
self.vim_ds = VimDS("I", self.ds.s_cursor[:-1] + [self.ds.s_cursor[-1] + direction], Vim("", 0))
self._construct_box_structure()
self.invalidate()
# ## Section for drawing boxes
def _t_for_text(self, text, colors):
fg, bg = colors
text_texture = self._texture_for_text(text)
content_height = text_texture.height
content_width = text_texture.width
top_left = 0, 0
bottom_left = (top_left[X], top_left[Y] - MARGIN - PADDING - content_height - PADDING - MARGIN)
bottom_right = (bottom_left[X] + MARGIN + PADDING + content_width + PADDING + MARGIN, bottom_left[Y])
instructions = [
Color(*bg),
Rectangle(
pos=(bottom_left[0] + MARGIN, bottom_left[1] + MARGIN),
size=(content_width + 2 * PADDING, content_height + 2 * PADDING),
),
Color(*fg),
Rectangle(
pos=(bottom_left[0] + MARGIN + PADDING, bottom_left[1] + MARGIN + PADDING),
size=text_texture.size,
texture=text_texture,
),
]
return BoxTerminal(instructions, bottom_right)
def _t_for_vim(self, vim):
# This was created as an ad hoc copy/pasta of _t_for_text. As it stands, it is not an obvious candidate for
# factoring out commonalities (there aren't that many) but once the opportunity arises we should take it.
texts = [
vim.text[:vim.cursor_pos],
vim.text[vim.cursor_pos:vim.cursor_pos + 1],
vim.text[vim.cursor_pos + 1:]]
if len(vim.text) == vim.cursor_pos:
# if the cursor-position is to the right of the rightmost character (appending to the line), we need some
# 'imaginary' (not actually drawn, but used for measurements) text as a placeholder.
texts[1] = '▨'
text_textures = [self._texture_for_text(text) for text in texts]
content_height = max([text_texture.height for text_texture in text_textures])
content_width = sum([text_texture.width for text_texture in text_textures])
top_left = 0, 0
bottom_left = (top_left[X], top_left[Y] - MARGIN - PADDING - content_height - PADDING - MARGIN)
bottom_right = (bottom_left[X] + MARGIN + PADDING + content_width + PADDING + MARGIN, bottom_left[Y])
instructions = [
Color(*AQUA_GREEN),
Rectangle(
pos=(bottom_left[0] + MARGIN, bottom_left[1] + MARGIN),
size=(content_width + 2 * PADDING, content_height + 2 * PADDING),
),
]
offset_x = bottom_left[0] + MARGIN + PADDING
offset_y = bottom_left[1] + MARGIN + PADDING
for i, text_texture in enumerate(text_textures):
if i == 1: # i.e. the cursor
instructions.extend([
Color(*WHITE),
Rectangle(
pos=(offset_x, offset_y),
size=text_texture.size,
),
])
# if this is the cursor, and the cursor is a fake character, don't actually draw it.
is_cursor_eol = (i == 1 and len(vim.text) == vim.cursor_pos)
if not is_cursor_eol:
instructions.extend([
Color(*(BLACK if i == 1 else WHITE)),
Rectangle(
pos=(offset_x, offset_y),
size=text_texture.size,
texture=text_texture,
),
])
offset_x += text_texture.width
return BoxTerminal(instructions, bottom_right)
def colors_for_cursor(self, is_cursor, is_selection):
if is_cursor:
return WHITE, BLACK
if is_selection:
return WHITE, LAUREL_GREEN
return BLACK, WHITE
def _nts_for_pp_annotated_node(self, pp_annotated_node):
iri_annotated_node = construct_iri_top_down(
pp_annotated_node,
# We start MULTI_LINE_ALIGNED (but if the first PP node is annotated otherwise the result will reflect that)
InheritedRenderingInformation(MULTI_LINE_ALIGNED),
IriAnnotatedSExpr,
)
if self.vim_ds is not None:
vim_nt = BoxNonTerminal([], [no_offset(self._t_for_vim(self.vim_ds.vim))])
exception = (self.vim_ds.s_address, self.vim_ds.insert_or_replace, vim_nt)
return self.bottom_up_construct_with_exception(self._nt_for_iri, exception, iri_annotated_node, [])
return self.bottom_up_construct(self._nt_for_iri, iri_annotated_node, [])
def bottom_up_construct_with_exception(self, f, exception, node, s_address):
"""like bottom_up_construct, but with a special case ("exception") for a single s_address.
In other words: a hack to enable the rendering of "current vim node" """
exception_s_address, exception_type, exception_value = exception
# If we're not on the exception's branch, we proceed as usual.
if exception_s_address[:len(s_address)] != s_address:
return self.bottom_up_construct(f, node, s_address)
constructed_children = []
for i, child in enumerate(node.children):
constructed_children.append(self.bottom_up_construct_with_exception(f, exception, child, s_address + [i]))
if exception_s_address[:-1] == s_address:
constructed_child = exception_value
if exception_type == 'R':
constructed_children[exception_s_address[-1]] = constructed_child
else:
constructed_children.insert(exception_s_address[-1], constructed_child)
return f(node, constructed_children, s_address)
def bottom_up_construct(self, f, node, s_address):
"""Somewhat similar to a generalized catamorphism over s_expr nodes, but not quite.
In particular: `f` is like the algebra; which is called over already-transformed children (and also given the
node)
Not quite, because in the process of recursing down the tree we construct a s_address, which is also passed to
the algebra.
Alternative solution: split out the top-down construction of the s_address, and do a pure catamorphism after
that.
But because the whole reason for that (displaying of cursor, selection) is subject to future change that's not
worthwhile now.
"""
children = [self.bottom_up_construct(f, child, s_address + [i]) for i, child in enumerate(node.children)]
return f(node, children, s_address)
def _nt_for_iri(self, iri_annotated_node, children_nts, s_address):
# in some future version, rendering of `is_cursor` in a different color should not be part of the main drawing
# mechanism, but as some separate "layer". The idea is: things that are likely to change should be drawn on top
# of things that are very stable (and can therefore be cached).
is_cursor = s_address == self.ds.s_cursor
# For now, we'll display only the selection's begin & end. Thinking about "what does this mean for the nodes
# lying 'in between'" is not quite trivial, because we're talking about a tree-structure. One possible answer
# _could be_: the "in between in the DFS / alfabetical ordering", but it's not quite clear that this is always
# the right answer. One argument in favor is: this is the way you're navigating. I'll postpone the decision once
# we get some more cases of "how is the selection actually used?"
is_selection = s_address in [self.selection_ds.edge_0, self.selection_ds.edge_1]
if iri_annotated_node.annotation.multiline_mode == MULTI_LINE_ALIGNED:
f = self._nt_for_node_as_multi_line_aligned
elif iri_annotated_node.annotation.multiline_mode == MULTI_LINE_INDENTED:
f = self._nt_for_node_as_multi_line_indented
else: # SINGLE_LINE
f = self._nt_for_node_single_line
return f(iri_annotated_node, children_nts, is_cursor, is_selection)
def _nt_for_node_single_line(self, iri_annotated_node, children_nts, is_cursor, is_selection):
node = iri_annotated_node.underlying_node
if isinstance(node, Atom):
return BoxNonTerminal([], [no_offset(
self._t_for_text(node.atom, self.colors_for_cursor(is_cursor, is_selection)))])
t = self._t_for_text("(", self.colors_for_cursor(is_cursor, is_selection))
offset_terminals = [
no_offset(t),
]
offset_nonterminals = []
offset_right = t.outer_dimensions[X]
offset_down = 0
for nt in children_nts:
offset_nonterminals.append(OffsetBox((offset_right, offset_down), nt))
offset_right += nt.outer_dimensions[X]
t = self._t_for_text(")", self.colors_for_cursor(is_cursor, is_selection))
offset_terminals.append(OffsetBox((offset_right, offset_down), t))
return BoxNonTerminal(offset_nonterminals, offset_terminals)
def _nt_for_node_as_todo_list(self, iri_annotated_node, children_nts, is_cursor, is_selection):
node = iri_annotated_node.underlying_node
if isinstance(node, Atom):
return BoxNonTerminal([], [no_offset(self._t_for_text(
node.atom, self.colors_for_cursor(is_cursor, is_selection)))])
if len(children_nts) == 0:
return BoxNonTerminal([], [no_offset(self._t_for_text(
"* ...", self.colors_for_cursor(is_cursor, is_selection)))])
t = self._t_for_text("*", self.colors_for_cursor(is_cursor, is_selection))
nt = children_nts[0]
offset_nonterminals = [
OffsetBox((20, 0), nt)
]
offset_down = nt.outer_dimensions[Y]
offset_right = 50 # Magic number for indentation
for nt in children_nts[1:]:
offset_nonterminals.append(OffsetBox((offset_right, offset_down), nt))
offset_down += nt.outer_dimensions[Y]
return BoxNonTerminal(offset_nonterminals, [no_offset(t)])
def _nt_for_node_as_multi_line_aligned(self, iri_annotated_node, children_nts, is_cursor, is_selection):
# "Align with index=1, like so:.. (xxx yyy
# zzz)
pmts(iri_annotated_node, IriAnnotatedSExpr)
node = iri_annotated_node.underlying_node
if isinstance(node, Atom):
return BoxNonTerminal([], [no_offset(self._t_for_text(
node.atom, self.colors_for_cursor(is_cursor, is_selection)))])
t = self._t_for_text("(", self.colors_for_cursor(is_cursor, is_selection))
offset_right = t.outer_dimensions[X]
offset_down = 0
offset_terminals = [
no_offset(t),
]
offset_nonterminals = []
if len(children_nts) > 0:
nt = children_nts[0]
offset_nonterminals.append(
OffsetBox((offset_right, offset_down), nt)
)
offset_right += nt.outer_dimensions[X]
if len(children_nts) > 1:
for nt in children_nts[1:]:
offset_nonterminals.append(OffsetBox((offset_right, offset_down), nt))
offset_down += nt.outer_dimensions[Y]
# get the final drawn item to figure out where to put the closing ")"
last_drawn = nt.get_all_terminals()[-1]
offset_right += last_drawn.item.outer_dimensions[X] + last_drawn.offset[X]
# go "one line" back up
offset_down -= last_drawn.item.outer_dimensions[Y]
else:
offset_right = t.outer_dimensions[X]
t = self._t_for_text(")", self.colors_for_cursor(is_cursor, is_selection))
offset_terminals.append(OffsetBox((offset_right, offset_down), t))
return BoxNonTerminal(offset_nonterminals, offset_terminals)
def _nt_for_node_as_multi_line_indented(self, iri_annotated_node, children_nts, is_cursor, is_selection):
# "Indented with the equivalent of 2 spaces, like so:.. (xxx yyy
# zzz)
# TODO this is a pure copy/pasta with _nt_for_node_as_multi_line_aligned with alterations; factoring the
# commonalities out would be the proper course of action here.
pmts(iri_annotated_node, IriAnnotatedSExpr)
node = iri_annotated_node.underlying_node
if isinstance(node, Atom):
return BoxNonTerminal([], [no_offset(self._t_for_text(
node.atom, self.colors_for_cursor(is_cursor, is_selection)))])
if len(node.children) <= 2:
return self._nt_for_node_single_line(iri_annotated_node, children_nts, is_cursor, is_selection)
t = self._t_for_text("(", self.colors_for_cursor(is_cursor, is_selection))
offset_right_i0 = t.outer_dimensions[X]
offset_right_i2_plus = t.outer_dimensions[X] * 1.3 # ") " by approximation
offset_down = 0
offset_terminals = [
no_offset(t),
]
offset_nonterminals = []
nt = children_nts[0]
offset_nonterminals.append(OffsetBox((offset_right_i0, offset_down), nt))
offset_right_i1 = offset_right_i0 + nt.outer_dimensions[X]
nt = children_nts[1]
offset_nonterminals.append(OffsetBox((offset_right_i1, offset_down), nt))
offset_down += nt.outer_dimensions[Y]
for nt in children_nts[2:]:
offset_nonterminals.append(OffsetBox((offset_right_i2_plus, offset_down), nt))
offset_down += nt.outer_dimensions[Y]
# get the final drawn item to figure out where to put the closing ")"
last_drawn = nt.get_all_terminals()[-1]
offset_right = offset_right_i2_plus + last_drawn.item.outer_dimensions[X] + last_drawn.offset[X]
# go "one line" back up
offset_down -= last_drawn.item.outer_dimensions[Y]
t = self._t_for_text(")", self.colors_for_cursor(is_cursor, is_selection))
offset_terminals.append(OffsetBox((offset_right, offset_down), t))
return BoxNonTerminal(offset_nonterminals, offset_terminals)
def _render_box(self, box):
for o, t in box.offset_terminals:
with apply_offset(self.canvas, o):
for instruction in t.instructions:
self.canvas.add(instruction)
for o, nt in box.offset_nonterminals:
with apply_offset(self.canvas, o):
self._render_box(nt)
def _texture_for_text(self, text):
if text in self.m.texture_for_text:
return self.m.texture_for_text[text]
kw = {
'font_size': pt(get_font_size()),
# 'font_name': 'Oxygen',
'bold': False,
'anchor_x': 'left',
'anchor_y': 'top',
'padding_x': 0,
'padding_y': 0,
'padding': (0, 0)}
label = Label(text=text, **kw)
label.refresh()
self.m.texture_for_text[text] = label.texture
return label.texture
|
import argparse
import torch
def args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='fmnist',
help="dataset we want to train on")
parser.add_argument('--num_agents', type=int, default=10,
help="number of agents:K")
parser.add_argument('--agent_frac', type=float, default=1,
help="fraction of agents per round:C")
parser.add_argument('--num_corrupt', type=int, default=0,
help="number of corrupt agents")
parser.add_argument('--rounds', type=int, default=200,
help="number of communication rounds:R")
parser.add_argument('--aggr', type=str, default='avg',
help="aggregation function to aggregate agents' local weights")
parser.add_argument('--local_ep', type=int, default=2,
help="number of local epochs:E")
parser.add_argument('--bs', type=int, default=256,
help="local batch size: B")
parser.add_argument('--client_lr', type=float, default=0.1,
help='clients learning rate')
parser.add_argument('--client_moment', type=float, default=0.9,
help='clients momentum')
parser.add_argument('--server_lr', type=float, default=1,
help='servers learning rate for signSGD')
parser.add_argument('--base_class', type=int, default=5,
help="base class for backdoor attack")
parser.add_argument('--target_class', type=int, default=7,
help="target class for backdoor attack")
parser.add_argument('--poison_frac', type=float, default=0.0,
help="fraction of dataset to corrupt for backdoor attack")
parser.add_argument('--pattern_type', type=str, default='plus',
help="shape of bd pattern")
parser.add_argument('--robustLR_threshold', type=int, default=0,
help="break ties when votes sum to 0")
parser.add_argument('--clip', type=float, default=0,
help="weight clip to -clip,+clip")
parser.add_argument('--noise', type=float, default=0,
help="set noise such that l1 of (update / noise) is this ratio. No noise if 0")
parser.add_argument('--top_frac', type=int, default=100,
help="compare fraction of signs")
parser.add_argument('--snap', type=int, default=1,
help="do inference in every num of snap rounds")
parser.add_argument('--device', default=torch.device("cuda:0" if torch.cuda.is_available() else "cpu"),
help="To use cuda, set to a specific GPU ID.")
parser.add_argument('--num_workers', type=int, default=0,
help="num of workers for multithreading")
args = parser.parse_args()
return args |
author_special_cases = {
"Jeanette Hellgren Kotaleski": ("Jeanette", "Hellgren Kotaleski"),
"Hellgren Kotaleski J": ("Jeanette", "Hellgren Kotaleski"),
"João Pedro Santos": ("João Pedro", "Santos"),
"Yi Ming Lai": ("Yi Ming", "Lai"),
"Luis Georg Romundstad": ("Luis Georg", "Romundstad"),
"Johanna Frost Nylen": ("Johanna", "Frost Nylen"),
"Pål Gunnar Larsson": ("Pål Gunnar", "Larsson"),
"André Sevenius Nilsen": ("André Sevenius", "Nilsen"),
"Gabriel Andrés Fonseca Guerra": ("Gabriel Andrés", "Fonseca Guerra"),
"Pier Stanislao Paolucci": ("Pier Stanislao", "Paolucci"),
"Werner Van Geit": ("Werner", "Van Geit"),
"Sacha van Albada": ("Sacha", "van Albada"),
"Paolo Del Giudice": ("Paolo", "Del Giudice"),
"Ignazio De Blasi": ("Ignazio", "De Blasi"),
"Marc de Kamps": ("Marc", "de Kamps"),
"José Francisco Gómez González": ("José Francisco", "Gómez González"),
"Ivilin Peev Stoianov": ("Ivilin Peev", "Stoianov"),
"BBP-team": ("BBP", "team")
}
def resolve_name(full_name, verbose=False):
if (full_name in author_special_cases):
first_name, last_name = author_special_cases[full_name]
elif (full_name[1:] in author_special_cases):
first_name, last_name = author_special_cases[full_name[1:]]
else:
parts = full_name.strip().split(" ")
if len(parts) == 2:
first_name, last_name = parts
elif len(parts) == 3 and ("." in parts[1] or len(parts[1]) == 1 or parts[1] in ("van", "de", "di", "Del", "De")):
first_name = " ".join(parts[0:2])
last_name = parts[2]
else:
first_name, last_name = None, None
print("ERR: {}".format(full_name))
raise Exception(str(parts))
if last_name and verbose:
# logger.debug("Resolved {} to {}, {}".format(full_name, last_name, first_name))
print("Resolved {} to {}, {}".format(full_name, last_name, first_name))
return first_name, last_name
|
import cv2
import numpy as np
import sys
sys.path.append('../')
from object_detection.feature_extraction.main import extract_hog_features, extract_features_single_img
from object_detection.color_hist.main import color_hist
from object_detection.bin_spatial.main import bin_spatial
from object_detection.hog.main import HOG
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(128, 128), #(64, 64), (96, 96)
xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0]*(xy_overlap[0]))
ny_buffer = np.int(xy_window[1]*(xy_overlap[1]))
nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step)
ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step)
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
def search_windows(img, windows, svc, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
hist_range=(0, 256),
bin_spatial_feat=True, color_hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = extract_features_single_img(test_img, cspace=color_space,
orient=orient, pix_per_cell=pix_per_cell,
hog_channel=hog_channel,
cell_per_block=cell_per_block,
spatial_size=spatial_size, hist_bins=hist_bins,
hist_range=hist_range,
bin_spatial_feat=bin_spatial_feat, color_hist_feat=color_hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = svc.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
def find_vehicles(img, cspace, ystart, ystop, scale, svc, X_scaler,
orient, pix_per_cell, cell_per_block,
spatial_size, hist_bins):
draw_img = np.copy(img)
img = img.astype(np.float32)/255
hog = HOG(orient, pix_per_cell, cell_per_block)
img_tosearch = img[ystart:ystop,:,:]
if cspace != 'RGB':
if cspace == 'HSV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2YCrCb)
else:
ctrans_tosearch = np.copy(img_tosearch)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch,
(np.int(imshape[1]/scale),
np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
# print('shape ch1:', ch1.shape[1], ch1.shape[0])
nxblocks = (ch1.shape[1] // pix_per_cell) + 1
nyblocks = (ch1.shape[0] // pix_per_cell) + 1
# print('nx, ny blocks:', nxblocks, nyblocks)
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
# print('cell_per_block', cell_per_block)
nblocks_per_window = (window // pix_per_cell) - 1
# print('nblocks_per_window:', nblocks_per_window)
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = hog.get_hog_features(ch1, feature_vec=False)
hog2 = hog.get_hog_features(ch2, feature_vec=False)
hog3 = hog.get_hog_features(ch3, feature_vec=False)
bbox_list=[]
# print("steps", nxsteps, nysteps)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64))
# Get color features
spatial_features = bin_spatial(subimg, color_space=cspace, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(np.hstack((spatial_features,
hist_features,
hog_features)).reshape(1, -1))
# Scale features and make a prediction
test_prediction = svc.predict(test_features)
showAllWindows = False
if test_prediction == 1 or showAllWindows:
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),
(xbox_left+win_draw,ytop_draw+win_draw+ystart),
(255,0,0),8)
bbox_list.append(((xbox_left, ytop_draw+ystart),
(xbox_left+win_draw,ytop_draw+win_draw+ystart)))
return bbox_list |
import os
from pykgr.subroutines import import_from_string
class Builder(object):
data = None
def __init__(self, **kwargs):
self.data = BuilderData(**kwargs)
if not self.data.directory:
raise Exception("What can we do without a directory?")
if not os.path.exists(self.data.directory):
os.mkdir(self.data.directory)
def build(self, package_class):
if type(package_class) == str:
package_class = import_from_string(package_class)
package_to_build = package_class()
package_to_build.__build__()
def build_toolchain(self):
binutils = import_from_string("base.packages.binutils.Binutils")
gcc = import_from_string("base.packages.gcc.Gcc")
self.build(binutils)
self.build(gcc)
def build_library(self):
lib = import_from_string("toolchain.glibc")
self.build(lib)
class BuilderData:
def __init__(self, **kwargs):
self.directory = None
self_keys = [row for row in dir(self) if "__" not in row and not callable(getattr(self, row))]
for key in kwargs:
if key in self_keys:
setattr(self, key, kwargs.get(key))
class BuilderLibrary:
# For maintaining a local glibc
pass |
def friend_or_foe(x):
return [ f for f in x if len(f) == 4 ]
import unittest
class TestFriendOrFoe(unittest.TestCase):
def test_friend_or_foe(self):
self.assertEquals(
friend_or_foe(['Ryan', 'Kieran', 'Mark',]), ['Ryan', 'Mark'])
if __name__ == '__main__':
unittest.main()
|
import cv2
import numpy as np
import dlib
cap = cv2.VideoCapture(0)
#refer to the 68-face-landmarks-labeled-by-dlib-software-automatically.png to understand why certain coordinates are used to find certain parts of the face
detector = dlib.get_frontal_face_detector() #front face classifier
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") #assigned coordinates of the face by DLIB
while True:
ret, frame = cap.read() #return status variable and the captured image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #grayscale image for detection filtering of eyes
faces = detector(gray) #array of all faces
for face in faces:
x, y = face.left(), face.top() # Top Left coordinates of face in window
x1, y1 = face.right(), face.bottom() # Bottom right coordinates of face in windows
cv2.rectangle(frame, (x,y), (x1,y1), (0,255,0), 2) # form a rectangle based on previous two coordinates
poslandmarkpoints = predictor(gray, face)
#Precision Tracking Of Eyes
#Left Eye Tracking
leftEyeTrack = np.array([(poslandmarkpoints.part(36).x,poslandmarkpoints.part(36).y),
(poslandmarkpoints.part(37).x,poslandmarkpoints.part(37).y),
(poslandmarkpoints.part(38).x,poslandmarkpoints.part(38).y),
(poslandmarkpoints.part(39).x,poslandmarkpoints.part(39).y),
(poslandmarkpoints.part(40).x,poslandmarkpoints.part(40).y),
(poslandmarkpoints.part(41).x,poslandmarkpoints.part(41).y)
],
np.int32)
#Right Eye Tracking
rightEyeTrack = np.array([(poslandmarkpoints.part(42).x,poslandmarkpoints.part(42).y),
(poslandmarkpoints.part(43).x,poslandmarkpoints.part(43).y),
(poslandmarkpoints.part(44).x,poslandmarkpoints.part(44).y),
(poslandmarkpoints.part(45).x,poslandmarkpoints.part(45).y),
(poslandmarkpoints.part(46).x,poslandmarkpoints.part(46).y),
(poslandmarkpoints.part(47).x,poslandmarkpoints.part(47).y)
],
np.int32)
# ================================ tracking left eye on new window ================================
lemin_x = np.min(leftEyeTrack[:, 0])
lemax_x = np.max(leftEyeTrack[:, 0])
lemin_y = np.min(leftEyeTrack[:, 1])
lemax_y = np.max(leftEyeTrack[:, 1])
left_eye = frame[lemin_y : lemax_y, lemin_x : lemax_x]
left_eye = cv2.resize(left_eye, None, fx = 10, fy = 10) #fx and fy is the scale factor for frame
cv2.imshow("Left Eye", left_eye)
# ================================ tracking left eye on new window ================================
# ================================ tracking right eye on new window ================================
remin_x = np.min(rightEyeTrack[:, 0])
remax_x = np.max(rightEyeTrack[:, 0])
remin_y = np.min(rightEyeTrack[:, 1])
remax_y = np.max(rightEyeTrack[:, 1])
right_eye = frame[remin_y : remax_y, remin_x : remax_x]
right_eye = cv2.resize(right_eye, None, fx = 10, fy = 10) #fx and fy is the scale factor for frame
cv2.imshow("Right Eye", right_eye)
# ================================ tracking right eye on new window ================================
#draw polys after eye frame is on window to prevent drawn polys from showing in eye window
cv2.polylines(frame, [leftEyeTrack], True, (0,255,0), 2)
cv2.polylines(frame, [rightEyeTrack], True, (0,255,0), 2)
cv2.imshow("IrisTrackingDLIB", frame)
key = cv2.waitKey(1)
if key == 27: #esc key is pressed
break
cap.release()
cv2.destroyAllWindows()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Tue Mar 6 14:49:49 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(768, 607)
MainWindow.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "SunPy PlotMan", None, QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/main.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
MainWindow.setWindowOpacity(1.0)
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setTabPosition(QtGui.QTabWidget.South)
self.tabWidget.setElideMode(QtCore.Qt.ElideNone)
self.tabWidget.setTabsClosable(True)
self.tabWidget.setMovable(True)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 768, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "&File", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setObjectName(_fromUtf8("menuFile"))
MainWindow.setMenuBar(self.menubar)
self.fileToolBar = QtGui.QToolBar(MainWindow)
self.fileToolBar.setWindowTitle(QtGui.QApplication.translate("MainWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.fileToolBar.setLayoutDirection(QtCore.Qt.LeftToRight)
self.fileToolBar.setIconSize(QtCore.QSize(22, 22))
self.fileToolBar.setObjectName(_fromUtf8("fileToolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.fileToolBar)
self.colorOptionsDockWidget = QtGui.QDockWidget(MainWindow)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.colorOptionsDockWidget.sizePolicy().hasHeightForWidth())
self.colorOptionsDockWidget.setSizePolicy(sizePolicy)
self.colorOptionsDockWidget.setFloating(False)
self.colorOptionsDockWidget.setFeatures(QtGui.QDockWidget.DockWidgetFloatable|QtGui.QDockWidget.DockWidgetMovable)
self.colorOptionsDockWidget.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Color options", None, QtGui.QApplication.UnicodeUTF8))
self.colorOptionsDockWidget.setObjectName(_fromUtf8("colorOptionsDockWidget"))
self.dockWidgetContents = QtGui.QWidget()
self.dockWidgetContents.setObjectName(_fromUtf8("dockWidgetContents"))
self.verticalLayout = QtGui.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.cmListWidget = QtGui.QListWidget(self.dockWidgetContents)
self.cmListWidget.setObjectName(_fromUtf8("cmListWidget"))
self.verticalLayout.addWidget(self.cmListWidget)
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label = QtGui.QLabel(self.dockWidgetContents)
self.label.setText(QtGui.QApplication.translate("MainWindow", "Clip values:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_4.addWidget(self.label)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_2 = QtGui.QLabel(self.dockWidgetContents)
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Min", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_5.addWidget(self.label_2)
self.clipMinDoubleSpinBox = QtGui.QDoubleSpinBox(self.dockWidgetContents)
self.clipMinDoubleSpinBox.setDecimals(2)
self.clipMinDoubleSpinBox.setMinimum(-99999.0)
self.clipMinDoubleSpinBox.setMaximum(99999.0)
self.clipMinDoubleSpinBox.setSingleStep(5.0)
self.clipMinDoubleSpinBox.setProperty("value", 0.0)
self.clipMinDoubleSpinBox.setObjectName(_fromUtf8("clipMinDoubleSpinBox"))
self.horizontalLayout_5.addWidget(self.clipMinDoubleSpinBox)
self.horizontalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_3 = QtGui.QLabel(self.dockWidgetContents)
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "Max", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_6.addWidget(self.label_3)
self.clipMaxDoubleSpinBox = QtGui.QDoubleSpinBox(self.dockWidgetContents)
self.clipMaxDoubleSpinBox.setMinimum(-99999.0)
self.clipMaxDoubleSpinBox.setMaximum(99999.0)
self.clipMaxDoubleSpinBox.setSingleStep(5.0)
self.clipMaxDoubleSpinBox.setProperty("value", 0.0)
self.clipMaxDoubleSpinBox.setObjectName(_fromUtf8("clipMaxDoubleSpinBox"))
self.horizontalLayout_6.addWidget(self.clipMaxDoubleSpinBox)
self.horizontalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_4.addLayout(self.horizontalLayout_3)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.label_4 = QtGui.QLabel(self.dockWidgetContents)
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Scaling:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_7.addWidget(self.label_4)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem)
self.scalingComboBox = QtGui.QComboBox(self.dockWidgetContents)
self.scalingComboBox.setObjectName(_fromUtf8("scalingComboBox"))
self.scalingComboBox.addItem(_fromUtf8(""))
self.scalingComboBox.setItemText(0, QtGui.QApplication.translate("MainWindow", "Linear", None, QtGui.QApplication.UnicodeUTF8))
self.scalingComboBox.addItem(_fromUtf8(""))
self.scalingComboBox.setItemText(1, QtGui.QApplication.translate("MainWindow", "Logarithmic", None, QtGui.QApplication.UnicodeUTF8))
self.horizontalLayout_7.addWidget(self.scalingComboBox)
self.verticalLayout_3.addLayout(self.horizontalLayout_7)
self.colorErrorLabel = QtGui.QLabel(self.dockWidgetContents)
self.colorErrorLabel.setText(_fromUtf8(""))
self.colorErrorLabel.setObjectName(_fromUtf8("colorErrorLabel"))
self.verticalLayout_3.addWidget(self.colorErrorLabel)
self.verticalLayout.addLayout(self.verticalLayout_3)
self.colorOptionsDockWidget.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.colorOptionsDockWidget)
self.actionOpen_file = QtGui.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/open_plot.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen_file.setIcon(icon1)
self.actionOpen_file.setText(QtGui.QApplication.translate("MainWindow", "Open file...", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_file.setToolTip(QtGui.QApplication.translate("MainWindow", "Open a FITS file for plotting...", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_file.setStatusTip(QtGui.QApplication.translate("MainWindow", "Open a FITS file for plotting...", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_file.setShortcut(QtGui.QApplication.translate("MainWindow", "Ctrl+O", None, QtGui.QApplication.UnicodeUTF8))
self.actionOpen_file.setObjectName(_fromUtf8("actionOpen_file"))
self.actionExit_PlotMan = QtGui.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/exit.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionExit_PlotMan.setIcon(icon2)
self.actionExit_PlotMan.setText(QtGui.QApplication.translate("MainWindow", "Exit PlotMan", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit_PlotMan.setToolTip(QtGui.QApplication.translate("MainWindow", "Close the application.", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit_PlotMan.setStatusTip(QtGui.QApplication.translate("MainWindow", "Close the application.", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit_PlotMan.setObjectName(_fromUtf8("actionExit_PlotMan"))
self.menuFile.addAction(self.actionOpen_file)
self.menuFile.addAction(self.actionExit_PlotMan)
self.menubar.addAction(self.menuFile.menuAction())
self.fileToolBar.addAction(self.actionOpen_file)
self.fileToolBar.addAction(self.actionExit_PlotMan)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(-1)
QtCore.QObject.connect(self.actionExit_PlotMan, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
pass
from resources import qrc_resources
|
# terrascript/data/clc.py
import terrascript
__all__ = []
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import itertools
import os
import traceback
import mock
import yaml
import rally
from rally import api
from rally.task import context
from rally.task import engine
from rally.task import scenario
from rally.task import task_cfg
from tests.unit import test
RALLY_PATH = os.path.dirname(os.path.dirname(rally.__file__))
class TaskSampleTestCase(test.TestCase):
samples_path = os.path.join(RALLY_PATH, "samples", "tasks")
def setUp(self):
super(TaskSampleTestCase, self).setUp()
if os.environ.get("TOX_ENV_NAME") == "cover":
self.skipTest("There is no need to check samples in coverage job.")
with mock.patch("rally.api.API.check_db_revision"):
self.rapi = api.API()
def iterate_samples(self, merge_pairs=True):
"""Iterates all task samples
:param merge_pairs: Whether or not to return both json and yaml samples
of one sample.
"""
for dirname, dirnames, filenames in os.walk(self.samples_path):
for filename in filenames:
# NOTE(hughsaunders): Skip non config files
# (bug https://bugs.launchpad.net/rally/+bug/1314369)
if filename.endswith("json") or (
not merge_pairs and filename.endswith("yaml")):
yield os.path.join(dirname, filename)
def test_check_missing_sla_section(self):
failures = []
for path in self.iterate_samples():
if "tasks/scenarios" not in path:
continue
with open(path) as task_file:
task_config = yaml.safe_load(
self.rapi.task.render_template(
task_template=task_file.read()))
for workload in itertools.chain(*task_config.values()):
if not workload.get("sla", {}):
failures.append(path)
if failures:
self.fail("One or several workloads from the list of samples below"
" doesn't have SLA section: \n %s" %
"\n ".join(failures))
def test_schema_is_valid(self):
scenarios = set()
for path in self.iterate_samples():
with open(path) as task_file:
try:
try:
task_config = yaml.safe_load(
self.rapi.task.render_template(
task_template=task_file.read()))
except Exception:
print(traceback.format_exc())
self.fail("Invalid JSON file: %s" % path)
eng = engine.TaskEngine(task_cfg.TaskConfig(task_config),
mock.MagicMock(), mock.Mock())
eng.validate(only_syntax=True)
except Exception:
print(traceback.format_exc())
self.fail("Invalid task file: %s" % path)
else:
scenarios.update(task_config.keys())
missing = set(s.get_name() for s in scenario.Scenario.get_all())
missing -= scenarios
# check missing scenario is not from plugin
missing = [s for s in list(missing)
if scenario.Scenario.get(s).__module__.startswith("rally")]
self.assertEqual(missing, [],
"These scenarios don't have samples: %s" % missing)
def test_task_config_pairs(self):
not_equal = []
missed = []
checked = []
for path in self.iterate_samples(merge_pairs=False):
if path.endswith(".json"):
json_path = path
yaml_path = json_path.replace(".json", ".yaml")
else:
yaml_path = path
json_path = yaml_path.replace(".yaml", ".json")
if json_path in checked:
continue
else:
checked.append(json_path)
if not os.path.exists(yaml_path):
missed.append(yaml_path)
elif not os.path.exists(json_path):
missed.append(json_path)
else:
with open(json_path) as json_file:
json_config = yaml.safe_load(
self.rapi.task.render_template(
task_template=json_file.read()))
with open(yaml_path) as yaml_file:
yaml_config = yaml.safe_load(
self.rapi.task.render_template(
task_template=yaml_file.read()))
if json_config != yaml_config:
not_equal.append("'%s' and '%s'" % (yaml_path, json_path))
error = ""
if not_equal:
error += ("Sample task configs are not equal:\n\t%s\n"
% "\n\t".join(not_equal))
if missed:
self.fail("Sample task configs are missing:\n\t%s\n"
% "\n\t".join(missed))
if error:
self.fail(error)
def test_no_underscores_in_filename(self):
bad_filenames = []
for dirname, dirnames, filenames in os.walk(self.samples_path):
for filename in filenames:
if "_" in filename and (filename.endswith(".yaml") or
filename.endswith(".json")):
full_path = os.path.join(dirname, filename)
bad_filenames.append(full_path)
self.assertEqual([], bad_filenames,
"Following sample task filenames contain "
"underscores (_) but must use dashes (-) instead: "
"{}".format(bad_filenames))
def test_context_samples_found(self):
all_plugins = context.Context.get_all()
context_samples_path = os.path.join(self.samples_path, "contexts")
for p in all_plugins:
# except contexts which belongs to tests module
if not inspect.getfile(p).startswith(
os.path.dirname(rally.__file__)):
continue
file_name = p.get_name().replace("_", "-")
file_path = os.path.join(context_samples_path, file_name)
if not os.path.exists("%s.json" % file_path):
self.fail(("There is no json sample file of %s,"
"plugin location: %s" %
(p.get_name(), p.__module__)))
|
import sys, datetime
from CovBedClass import *
from pk2txt import bdgmsg, newmsg
class NormalizeProtocol(object):
def __init__(self, args):
#latestage, protocol=1, earlystage=False, pseudo=0.1, bandwidth=2500, quiet=False, impute=False, replace=False, replace_with='0', replace_this='.'
self.args = args
self._normalize()
self._finalize()
def _set_protocol(self):
if self.args.protocol1:
self.protocol = 1
self._run_protocol = self._protocol1
elif self.args.protocol2:
self.protocol = 2
self._run_protocol = self._protocol2
elif self.args.protocol3:
self.protocol = 3
self._run_protocol = self._protocol3
elif self.args.protocol4:
self.protocol = 4
self._run_protocol = self._protocol4
elif self.args.protocol5:
self.protocol = 5
self._run_protocol = self._protocol5
elif self.args.protocol6:
self.protocol = 6
self._run_protocol = self._protocol6
elif self.args.protocol7:
self.protocol = 7
self._run_protocol = self._protocol7
elif self.args.protocol8:
self.protocol = 8
self._run_protocol = self._protocol8
elif self.args.protocol9:
self.protocol = 9
self._run_protocol = self._protocol9
elif self.args.protocol10:
self.protocol = 10
self._run_protocol = self._protocol10
elif self.args.protocol11:
self.protocol = 11
self._run_protocol = self._protocol11
elif self.args.protocol12:
self.protocol = 12
self._run_protocol = self._protocol12
elif self.args.protocol13:
self.protocol = 13
self._run_protocol = self._protocol13
elif self.args.protocol14:
self.protocol = 14
self._run_protocol = self._protocol14
elif self.args.protocol15:
self.protocol = 15
self._run_protocol = self._protocol15
elif self.args.protocol16:
self.protocol = 16
self._run_protocol = self._protocol16
elif self.args.protocol17:
self.protocol = 17
self._run_protocol = self._protocol17
elif self.args.protocol18:
self.protocol = 18
self._run_protocol = self._protocol18
elif self.args.protocol19:
self.protocol = 19
self._run_protocol = self._protocol19
elif self.args.protocol20:
self.protocol = 20
self._run_protocol = self._protocol20
elif self.args.protocol21:
self.protocol = 21
self._run_protocol = self._protocol21
elif self.args.protocol22:
self.protocol = 22
self._run_protocol = self._protocol22
elif self.args.protocol23:
self.protocol = 23
self._run_protocol = self._protocol23
elif self.args.protocol24:
self.protocol = 24
self._run_protocol = self._protocol24
elif self.args.protocol25:
self.protocol = 25
self._run_protocol = self._protocol25
elif self.args.protocol26:
self.protocol = 26
self._run_protocol = self._protocol26
elif self.args.protocol27:
self.protocol = 27
self._run_protocol = self._protocol27
elif self.args.protocol28:
self.protocol = 28
self._run_protocol = self._protocol28
elif self.args.protocol29:
self.protocol = 29
self._run_protocol = self._protocol29
elif self.args.protocol30:
self.protocol = 30
self._run_protocol = self._protocol30
def _protocol1(self):
self.late.median_normalize_data()
if self.early:
self.early.median_normalize_data()
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol2(self):
#print self.late.get_median()
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.late.median_normalize_data()
if self.early:
self.early.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.early.median_normalize_data()
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol3(self):
self.late.median_normalize_data()
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
if self.early:
self.early.median_normalize_data()
self.early.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol4(self):
self.late.median_normalize_data()
if self.early:
self.early.median_normalize_data()
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
def _protocol5(self):
#smoothing only -- late/early before smooth
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
def _protocol6(self):
#smoothing only -- late/early AFTER smooth
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
if self.early:
self.early.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol7(self):
#no smoothing, no median norm
# late:early only if early present
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol8(self):
self.late.computeSkew()
def _protocol9(self):
self.late.computePercentChange()
def _protocol10(self):
self.late.computeSkewChange()
def _protocol11(self):
self.late.computePercentChange()
def _protocol12(self):
# median ratio norm - e.g. similar to DEseq (or TMM in EdgeR) -- global version
# if no "early" sample present, then this just returns median norm late -- like protocol 1
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.median_normalize_data()
def _protocol13(self):
# median ratio norm w/ pre-smoothing - e.g. similar to DEseq (or TMM in EdgeR) -- global version
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
if self.early:
self.early.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.late.normalize_to_other(other=self.early,
pseudocount=self.args.pseudo)
self.late.median_normalize_data()
def _protocol14(self):
# median ratio norm w/ post-smoothing - e.g. similar to DEseq (or TMM in EdgeR) -- global version
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
self.late.median_normalize_data()
def _protocol15(self):
# median ratio norm w/ end-smoothing - e.g. similar to DEseq (or TMM in EdgeR) -- global version
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.median_normalize_data()
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
def _protocol16(self):
# experimental glocal median ratio norm w/ end-smoothing -
self.late.normalize_with_glocalMedRatioNorm(other=self.early,
pseudocount=self.args.pseudo,
globalweight=100,
minlocalbins=5,
minpropdata=0.333) ## LAST 3 HARDCODED FOR NOW -- less local
def _protocol17(self):
## First median normalizes both samples
## Then scales to a target median bin cov -- default 1000.
## Then adds pseudocount - default 1. (so default pseudo is 0.1% median, 1000-fold less than median)
## Then normalizes late to early
## Then median ratio normalizes a la DEseq/EdgeR
self.late.median_normalize_data()
self.late.scale_data(scale=self.args.scalecov)
if self.early:
self.early.median_normalize_data()
self.early.scale_data(scale=self.args.scalecov)
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.median_normalize_data(relearn=True)
def _protocol18(self):
## Robust Z scores
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.robust_z_normalize_data()
def _protocol19(self):
## Rank scores
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.rank_normalize_data()
def _protocol20(self):
## Robust Z score difference
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.robust_z_normalize_data()
self.early.robust_z_normalize_data()
self.late.subtract_other(self.early)
def _protocol21(self):
## Rank difference
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.rank_normalize_data()
self.early.rank_normalize_data()
self.late.subtract_other(self.early)
def _protocol22(self):
## SPMR - all counts are summed up, all bins divided by sum and multipled by 1e6 (default)
self.late.spxr_normalize_data(x=self.args.SPXR)
if self.early:
self.early.spxr_normalize_data(x=self.args.SPXR)
self.late.normalize_to_other(self.early,
self.args.pseudo)
def _protocol23(self):
## Std Rank: Rank scores, subtract theoretical middle rank (min+max)/2, fold-normalize by middle rank
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.rank_standardize_data()
def _protocol24(self):
## pct difference: (Z_t-Z_c)/abs(Z_c)
## t < c; both neg
## (-1 - -0.5)/(-0.5) = -0.5/-0.5 = 1
## (-1 - -0.5)/abs(-0.5) = -0.5/0.5 = -1
## t > c; both neg
## (-0.5 - -1) /(-1) = 0.5/-1 = -1.5
## (-0.5 - -1) /abs(-1) = 0.5/1 = 0.5
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.pct_diff_from_other(self.early, setToControlDist=self.args.setToControlDist, pseudoZeroBins=self.args.pseudoZeroBins, addMinOtherPlusOneToBoth=self.args.addMinOtherPlusOneToBoth)
def _protocol25(self):
## pct skew: (Z_t - Z_c)/(abs(Z_t) + abs(Z_c))
## t < c; both neg
## (-1 - -0.5)/(-1 + -0.5) = -0.5/-1.5 = 0.333
## (-1 - -0.5)/abs(-1 + -0.5) = -0.5/1.5 = -0.333
## (-1 - -0.5)/(abs(-1) + abs(-0.5)) = -0.5/1.5 = -0.333
## t > c; both neg
## (-0.5 - -1) /(-0.5 + -1) = 0.5/-1.5 = -0.333
## (-0.5 - -1) /abs(-0.5 + -1) = 0.5/1.5 = 0.333
## (-0.5 - -1) /(abs(-0.5) + abs(-1)) = 0.5/1.5 = 0.333
##
## t < c; pos and neg
## (-1 - 0.5)/(-1 + 0.5) = -1.5/-0.5 = 3
## (-1 - 0.5)/abs(-1 + 0.5) = -1.5/0.5 = -3
## (-1 - 0.5)/(abs(-1) + abs(0.5)) = -1.5/1.5 = -1
## t > c; pos and neg
## (0.5 - -1) /(0.5 + -1) = 1.5/-0.5 = -0.333
## (0.5 - -1) /abs(0.5 + -1) = 1.5/0.5 = 0.333
## (0.5 - -1) /(abs(0.5) + abs(-1)) = 1.5/1.5 = 1
##
## t < c; both pos
## (0.5 - 1)/(0.5 + 1) = -0.5/1.5 = -0.333
## (0.5 - 1)/abs(0.5 + 1) = -0.5/1.5 = -0.333
## (0.5 - 1)/(abs(0.5) + abs(1)) = -0.5/1.5 = -0.333
## t > c; both pos
## (1 - 0.5) /(1 + 0.5) = 0.5/1.5 = 0.333
## (1 - 0.5) /abs(1 + 0.5) = 0.5/1.5 = 0.333
## (1 - 0.5) /(abs(1) + abs(0.5)) = 0.5/1.5 = 0.333
##
## inputs should really just be positive.
## However, the "most correct" way when anticipating pos and neg is to sum the abs vals of each.
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.pct_skew_given_other(self.early, setToControlDist=self.args.setToControlDist, pseudoZeroBins=self.args.pseudoZeroBins, addMinOtherPlusOneToBoth=self.args.addMinOtherPlusOneToBoth)
def _protocol26(self):
## Robust Z score pct difference: (Z_t-Z_c)/abs(Z_c)
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.robust_z_normalize_data()
self.early.robust_z_normalize_data()
self.late.pct_diff_from_other(self.early, setToControlDist=self.args.setToControlDist, pseudoZeroBins=self.args.pseudoZeroBins, addMinOtherPlusOneToBoth=self.args.addMinOtherPlusOneToBoth)
def _protocol27(self):
## Robust Z score pct skew: (Z_t - Z_c)/(abs(Z_t) + abs(Z_c))
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.robust_z_normalize_data()
self.early.robust_z_normalize_data()
self.late.pct_skew_given_other(self.early, setToControlDist=self.args.setToControlDist, pseudoZeroBins=self.args.pseudoZeroBins, addMinOtherPlusOneToBoth=self.args.addMinOtherPlusOneToBoth)
def _protocol28(self):
## Rank pct difference: (Z_t-Z_c)/abs(Z_c)
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.rank_normalize_data()
self.early.rank_normalize_data()
self.late.pct_diff_from_other(self.early)
def _protocol29(self):
## Rank pct skew: (Z_t - Z_c)/(abs(Z_t) + abs(Z_c))
if not self.early:
sys.stderr.write("This requires late (test) and early (control) files, not just late (test).....\n")
quit()
self.late.rank_normalize_data()
self.early.rank_normalize_data()
self.late.pct_skew_given_other(self.early)
def _protocol30(self):
# median ratio norm for local windows - e.g. similar to DEseq (or TMM in EdgeR) -- PHYSICALLY-LOCAL version
# if no "early" sample present, then this just returns phys-local median norm late
if self.early:
self.late.normalize_to_other(self.early,
self.args.pseudo)
self.late.local_median_normalize_data(halfwidth=self.args.halfwidth)
def _normalize(self):
if not self.args.quiet:
newmsg("loading late stage file")
self.late = CovBed(self.args.latestage,
replace=self.args.replace,
replace_with=self.args.replace_with,
replace_this=self.args.replace_this,
stringcols=self.args.stringcols)
if self.args.impute:
if not self.args.quiet:
newmsg("imputing late stage bins with missing data")
self.late.impute_zeros(bw=self.args.impute)
if self.args.earlystage:
if not self.args.quiet:
newmsg("loading early stage file")
self.early = CovBed(self.args.earlystage,
replace=self.args.replace,
replace_with=self.args.replace_with,
replace_this=self.args.replace_this,
stringcols=self.args.stringcols)
if self.args.impute:
if not self.args.quiet:
newmsg("imputing early stage bins with missing data")
self.early.impute_zeros(bw=self.args.impute)
else:
self.early = False
## todo -- add filtering out 0 contigs option...
if not self.args.quiet:
if self.args.earlystage:
emsg = ' with additional early stage normalization'
else:
emsg = ' without additional early stage normalization'
self._set_protocol()
newmsg("following normalization protocol "+str(self.protocol)+emsg)
self._run_protocol()
def _finalize(self):
## Optional Log Transforms
if self.args.log10:
self.late.log10_data()
elif self.args.log2:
self.late.log2_data()
## Optional end smoothing
if self.args.endsmoothing:
self.late.ksmooth_counts(bw=self.args.bandwidth,
rescueNaN=self.args.replaceNaN,
localWindow=self.args.localwinsize)
##TODO - allow imputing values locally when a bin is 0 -- i.e. if surrounded by 2 bins with values >0, take average.
## various 0 spots are causing short state changes in the CN hmm.
## perhaps do ksmoothing with something like 10-50kb bandwidth -- then go back to raw signal, and wherever 0 is, substitute with Ksmoothed value.
## 0 spots lead to 2 problems:
## 1. these become fe=1 when both early and late are 0 -- leads to state drops and even bad smoothing
## 2. when not 0 in late, this becomes (count+0.1)/0.1 in fe step (after pseudocount)
## --> which could mean a really high bin
## best stage to impute would be prior to ANY normalization
## i.e. impute late and early separately --> then median norm --> then FE --> then smooth if want
## If one does protocol 2 or 3... technically imputing is done just by smoothing...
## only problem is it still shows a significant drop at 0 spots... whereas would be less the case if pre-imputed even before this smoothing
## NOTES: imputing in this way creates its own problems.
## e.g. if late needs to be imputed, but early does not
## then late will get value close to other values, but early will remain near 0 -- so you get massive spike
## would want to change same bins in both samples...
## didnt seem to change state path at all
def run(parser, args):
protocol = NormalizeProtocol(args)
sys.stdout.write(
protocol.late.get_bdg(
protocol.late.count,
args.collapsed
)
)
|
import math
import os
import re
import tempfile
from pprint import pformat, pprint
from shutil import copy, copyfile, rmtree
from typing import Callable, Optional, Tuple, Union
from beartype import beartype
from rich import print as rich_print
from rich.console import Console
from rich.panel import Panel
from rich.table import Table, box
# from fabsim.base.utils import add_prefix, print_prefix
from fabsim.base.decorators import load_plugin_env_vars, task
from fabsim.base.env import env
from fabsim.base.manage_remote_job import *
from fabsim.base.MultiProcessingPool import MultiProcessingPool
from fabsim.base.networks import local, put, rsync_project, run
from fabsim.base.setup_fabsim import *
from fabsim.deploy.machines import *
from fabsim.deploy.templates import (
script_template_content,
script_templates,
template,
)
@beartype
def get_plugin_path(name: str) -> str:
"""
Get the local base path of input plugin name.
Args:
name (str): the name of pluing
Returns:
str: the path of plugin
Raises:
RuntimeError: if the requested plugin is not installed in the
local system
"""
plugin_path = os.path.join(env.localroot, "plugins", name)
if not os.path.exists(plugin_path):
raise RuntimeError(
"The requested plugin {} does not exist ({}).\n"
"you can install it by typing:\n\t"
"fabsim localhost install_plugin:{}".format(
name, plugin_path, name
)
)
return plugin_path
@task
@beartype
def put_results(name: str) -> None:
"""
Transfer result files to a remote. Local path to find result
directories is specified in machines_user.json. This method is not
intended for normal use, but is useful when the local machine
cannot have an entropy mount, so that results from a local machine
can be sent to entropy, via 'fab legion fetch_results; fab entropy
put_results'
Args:
name (str, optional): the name of results directory
"""
with_job(name)
run(template("mkdir -p $job_results"))
if env.manual_gsissh:
local(
template(
"globus-url-copy -p 10 -cd -r -sync "
"file://$job_results_local/ "
"gsiftp://$remote/$job_results/"
)
)
else:
rsync_project(
local_dir=env.job_results_local + "/",
remote_dir=env.job_results
)
@task
@beartype
def fetch_results(
name: Optional[str] = "",
regex: Optional[str] = "",
files: Optional[str] = None,
debug: Optional[bool] = False) -> None:
"""
Fetch results of remote jobs to local result store. Specify a job
name to transfer just one job. Local path to store results is
specified in machines_user.json, and should normally point to a
mount on entropy, i.e. /store4/blood/username/results.
If you can't mount entropy, `put results` can be useful, via
`fab legion fetch_results; fab entropy put_results`
Args:
name (str, optional): the job name, it no name provided, all
directories from `fabric_dir` will be fetched
regex (str, optional): the matching pattern
files (str, optional): the list of files need to fetched from the
remote machine. The list of file should be passed as string, and
split by `;`. For example, to fetch only `out.csv` and `env.yml`
files, you should pass `files="out.csv;env.yml" to this function.
debug (bool, optional): it `True`, all `env` variable will shown.
"""
fetch_files = []
if files is not None:
fetch_files = files.split(";")
includes_files = ""
if len(fetch_files) > 0:
includes_files = " ".join(
[
*["--include='*/' "],
*["--include='{}' ".format(file) for file in fetch_files],
*["--exclude='*' "],
*["--prune-empty-dirs "],
]
)
env.job_results, env.job_results_local = with_job(name)
# check if the local results directory exists or not
if not os.path.isdir(env.job_results_local):
os.makedirs(env.job_results_local)
if env.manual_sshpass:
local(
template(
"rsync -pthrvz -e 'sshpass -p $sshpass ssh -p $port' {}"
"$username@$remote:$job_results/{} "
"$job_results_local".format(includes_files, regex)
)
)
elif env.manual_gsissh:
local(
template(
"globus-url-copy -cd -r -sync {}"
"gsiftp://$remote/$job_results/{} "
"file://$job_results_local/".format(includes_files, regex)
)
)
else:
local(
template(
# "rsync -pthrvz --port=$port \
"rsync -pthrvz -e 'ssh -p $port' {}"
"$username@$remote:$job_results/{} "
"$job_results_local".format(includes_files, regex)
)
)
@task
@beartype
def fetch_configs(config: str) -> None:
"""
Fetch config files from the remote machine, via `rsync`.
Example Usage:
```sh
fab eagle_vecma fetch_configs:mali
```
Args:
config (str): the name of config directory
"""
with_config(config)
if env.manual_gsissh:
local(
template(
"globus-url-copy -cd -r -sync "
"gsiftp://$remote/$job_config_path/ "
"file://$job_config_path_local/"
)
)
else:
local(
template(
"rsync -pthrvz $username@$remote:$job_config_path/ "
"$job_config_path_local"
)
)
@task
@beartype
def clear_results(name: str) -> None:
"""
Completely wipe all result files from the remote.
Args:
name (str, optional): the name of result folder
"""
with_job(name)
run(template("rm -rf $job_results_contents"))
@beartype
def execute(task: Callable, *args, **kwargs) -> None:
"""
Execute a task (callable function).
The input arg `task` can be an actual callable function or its name.
The target function can be warped by @task or not.
"""
f_globals = inspect.stack()[1][0].f_globals
if callable(task):
task(*args, **kwargs)
elif task in f_globals:
f_globals[task](*args, **kwargs)
else:
msg = (
"The request task [green3]{}[/green3] passed to execute() "
"function can not be found !!!".format(task)
)
console = Console()
console.print(
Panel(
"{}".format(
msg),
title="[red1]Error[/red1]",
border_style="red1",
expand=False,
)
)
@beartype
def put_configs(config: str) -> None:
"""
Transfer config files to the remote machine, via `rsync`.
Args:
config (str): Specify a config directory
"""
with_config(config)
# by using get_setup_fabsim_dirs_string(), the FabSim3 directories will
# created automatically whenever a config file is uploaded.
run(
template("{}; mkdir -p $job_config_path".format(
get_setup_fabsim_dirs_string())
)
)
rsync_delete = False
if (
hasattr(env, "prevent_results_overwrite")
and env.prevent_results_overwrite == "delete"
):
rsync_delete = True
if env.manual_sshpass:
# TODO: maybe the better option here is to overwrite the rsync_project
# function from /fabric/contrib/project.py
local(
template(
"rsync -pthrvz --rsh='sshpass -p $sshpass ssh -p 22 ' "
"$job_config_path_local/ "
"$username@$remote:$job_config_path/"
)
)
elif env.manual_gsissh:
# TODO: implement prevent_results_overwrite here
local(
template(
"globus-url-copy -p 10 -cd -r -sync "
"file://$job_config_path_local/ "
"gsiftp://$remote/$job_config_path/"
)
)
else:
rsync_project(
local_dir=env.job_config_path_local + "/",
remote_dir=env.job_config_path,
delete=rsync_delete,
exclude=['file1.txt', 'dir1/*', 'dir2'],
)
def calc_nodes() -> None:
"""
Calculate the required number of node needs for the job execution.
This will set the `env.nodes` which will be used to set the node request
number in the job script.
!!! tip
If we're not reserving whole nodes, then if we request less than one
node's worth of cores, need to keep N<=n
"""
env.coresusedpernode = env.corespernode
if int(env.coresusedpernode) > int(env.cores):
env.coresusedpernode = env.cores
env.nodes = int(math.ceil(float(env.cores) / float(env.coresusedpernode)))
def calc_total_mem() -> None:
"""
Calculate the total amount of memory for the job script.
!!! tip
in terms of using `PJ` option, please make sure you set the total
required memory for all sub-tasks.
"""
# for qcg scheduler, #QCG memory requires total memory for all nodes
if not hasattr(env, "memory"):
env.memory = "2GB"
mem_size = int(re.findall("\d+", str(env.memory))[0])
try:
mem_unit_str = re.findall("[a-zA-Z]+", str(env.memory))[0]
except Exception:
mem_unit_str = ""
if mem_unit_str.upper() == "GB" or mem_unit_str.upper() == "G":
mem_unit = 1000
else:
mem_unit = 1
if hasattr(env, "PJ") and env.PJ.lower() == "true":
# env.total_mem = mem_size * int(env.PJ_size) * mem_unit
env.total_mem = env.memory
else:
env.total_mem = mem_size * int(env.nodes) * mem_unit
@beartype
def find_config_file_path(
name: str,
ExceptWhenNotFound: Optional[bool] = True
) -> str:
"""
Find the config file path
Args:
name (str): Description
ExceptWhenNotFound (bool, optional): Description
Returns:
Union[bool, str]: - `False`: if the input config name not found
- the path of input config name
"""
# Prevent of executing localhost runs on the FabSim3 root directory
if env.host == "localhost" and env.work_path == env.fabsim_root:
msg = (
"The localhost run dir is same as your FabSim3 folder\n"
"To avoid any conflict of config folder, please consider\n"
"changing your home_path_template variable\n"
"you can easily modify it by updating localhost entry in\n"
"your FabSim3/fabsim/deploy/machines_user.yml file\n\n"
"Here is the suggested changes:\n\n"
)
solution = "localhost:\n"
solution += " ...\n"
solution += ' home_path_template: "{}/localhost_exe"'.format(
env.localroot
)
rich_print(
Panel(
"{}[green3]{}[/green3]".format(
msg, solution),
title="[red1]Error[/red1]",
border_style="red1",
expand=False,
)
)
exit()
path_used = None
for p in env.local_config_file_path:
config_file_path = os.path.join(p, name)
if os.path.exists(config_file_path):
path_used = config_file_path
if path_used is None:
if ExceptWhenNotFound:
raise Exception(
"Error: config file directory not found in: ",
env.local_config_file_path,
)
else:
return False
return path_used
@beartype
def with_config(name: str):
"""
Internal: augment the fabric environment with information
regarding a particular configuration name.
Definitions created:
- `job_config_path`: the remote location where the config files for the
job should be stored
- `job_config_path_local`: the local location where the config files for
the job may be found
Args:
name (str): the name of config directory
"""
env.config = name
env.job_config_path = os.path.join(env.config_path, name + env.job_desc)
path_used = find_config_file_path(name)
env.job_config_path_local = os.path.join(path_used)
env.job_config_contents = os.path.join(env.job_config_path, "*")
env.job_config_contents_local = os.path.join(
env.job_config_path_local, "*"
)
# name of the job sh submission script.
env.job_name_template_sh = template("{}.sh".format(env.job_name_template))
@beartype
def add_local_paths(plugin_name: str) -> None:
"""
Updates `env` variables for the input plugin name
Args:
plugin_name (str): plugin name
"""
# This variable encodes the default location for templates.
env.local_templates_path.insert(
0, os.path.join(env.localroot, "plugins", plugin_name, "templates")
)
# This variable encodes the default location for config files.
env.local_config_file_path.insert(
0, os.path.join(env.localroot, "plugins", plugin_name, "config_files")
)
@beartype
def with_template_job(
ensemble_mode: Optional[bool] = False,
label: Optional[str] = None
) -> Tuple[str, str]:
"""
Determine a generated job name from environment parameters,
and then define additional environment parameters based on it.
Args:
ensemble_mode (bool, optional): determines if the job is an ensemble
simulation or not
label (str, optional): the label of job
Returns:
Tuple[str, str]: returns `job_results, job_results_local` env variables
filled based on job and label name
"""
# The name is now depending of the label name
name = template(env.job_name_template)
if label and not ensemble_mode:
name = "_".join((label, name))
job_results, job_results_local = with_job(
name=name, ensemble_mode=ensemble_mode, label=label
)
return job_results, job_results_local
@beartype
def with_job(
name: str,
ensemble_mode: Optional[bool] = False,
label: Optional[str] = None
) -> Tuple[str, str]:
"""
Augment the fabric environment with information regarding a particular
job name.
Definitions created:
- `job_results`: the remote location where job results should be stored
- `job_results_local`: the local location where job results should be
stored
Args:
name (str): the job name
ensemble_mode (bool, optional): determines if the job is an ensemble
simulation or not
label (str, optional): the label of job
Returns:
Tuple[str, str]: two string value
- job_results: the remote location where job results should be stored
- job_results_local: the local location where job results should
be stored
"""
env.name = name
if not ensemble_mode:
job_results = env.pather.join(env.results_path, name)
job_results_local = os.path.join(env.local_results, name)
else:
job_results = "{}/RUNS/{}".format(
env.pather.join(env.results_path, name), label
)
job_results_local = "{}/RUNS/{}".format(
os.path.join(env.local_results, name), label
)
env.job_results_contents = env.pather.join(job_results, "*")
env.job_results_contents_local = os.path.join(job_results_local, "*")
# Template name is now depending of the label of the job when needed
if label is not None:
env.job_name_template_sh = "{}_{}.sh".format(name, label)
else:
env.job_name_template_sh = "{}.sh".format(name)
return job_results, job_results_local
def with_template_config() -> None:
"""
Determine the name of a used or generated config from environment
parameters, and then define additional environment parameters based
on it.
"""
with_config(template(env.config_name_template))
def job(*job_args):
"""
Internal low level job launcher.
Parameters for the job are determined from the prepared fabric environment
Execute a generic job on the remote machine.
To improve the total job submission, and reduce the number of SSH
connection for job files/folders transmission, the job submission workflow
divided into 3 individual sub-tasks:
1. job_preparation
2. job_transmission
3. job_submission
Returns the generate jobs scripts for submission on the remote machine.
"""
args = {}
for adict in job_args:
args = dict(args, **adict)
# check if with_config function is already called or not
if not hasattr(env, "job_config_path"):
raise RuntimeError(
"Function with_config did NOT called, "
"Please call it before calling job()"
)
# sys.exit()
update_environment(args)
# Add label, mem, core to env.
calc_nodes()
calc_total_mem()
if "sweepdir_items" in args:
env.ensemble_mode = True
else:
env.ensemble_mode = False
########################################################
# temporary folder to save job files/folders/scripts #
########################################################
env.tmp_work_path = env.pather.join(
tempfile._get_default_tempdir(),
next(tempfile._get_candidate_names()),
"FabSim3"
# env.fabric_dir
)
if os.path.exists(env.tmp_work_path):
rmtree(env.tmp_work_path)
# the config_files folder is already transfered by put_config
env.tmp_results_path = env.pather.join(env.tmp_work_path, "results")
env.tmp_scripts_path = env.pather.join(env.tmp_work_path, "scripts")
os.makedirs(env.tmp_scripts_path)
os.makedirs(env.tmp_results_path)
POOL = MultiProcessingPool(PoolSize=int(env.nb_process))
#####################################
# job preparation phase #
#####################################
msg = "tmp_work_path = {}".format(env.tmp_work_path)
rich_print(
Panel.fit(
msg,
title="[orange_red1]job preparation phase[/orange_red1]",
border_style="orange_red1"
)
)
print("Submit tasks to multiprocessingPool : start ...")
if env.ensemble_mode is True:
for task_label in env.sweepdir_items:
POOL.add_task(
func=job_preparation,
func_args=dict(
ensemble_mode=env.ensemble_mode, label=task_label
),
)
else:
POOL.add_task(func=job_preparation, func_args=args)
print("Submit tasks to multiprocessingPool : done ...")
job_scripts_to_submit = POOL.wait_for_tasks()
#####################################
# job transmission phase #
#####################################
msg = (
"Copy all generated files/folder from\n"
"tmp_work_path = {}\n"
"to\n"
"work_path = {}".format(env.tmp_work_path, env.work_path)
)
rich_print(
Panel.fit(
msg,
title="[orange_red1]job transmission phase[/orange_red1]",
border_style="orange_red1"
)
)
job_transmission()
if not (hasattr(env, "TestOnly") and env.TestOnly.lower() == "true"):
# DO NOT submit any job
# env.submit_job is False in case of using PilotJob option
# therefore, DO NOT submit the job directly, only submit PJ script
if not (
hasattr(env, "submit_job")
and isinstance(env.submit_job, bool)
and env.submit_job is False
):
#####################################
# job submission phase #
#####################################
msg = (
"Submit all generated job scripts to "
"the target remote machine"
)
rich_print(
Panel.fit(
msg,
title="[orange_red1]job submission phase[/orange_red1]",
border_style="orange_red1"
)
)
for job_script in job_scripts_to_submit:
job_submission(dict(job_script=job_script))
print(
"submitted job script = \n{}".format(
pformat(job_scripts_to_submit)
)
)
# POOL.shutdown_threads()
return job_scripts_to_submit
def job_preparation(*job_args):
"""
here, all job folders and scripts will be created in the temporary folder
`<tmp_folder>/{results,scripts}`, later, in job_transmission,
we transfer all these files and folders with a single `rsync` command.
This approach will helps us to reduce the number of SSH connection and
improve the stability of job submission workflow which can be compromised
by high parallel SSH connection
"""
pprint(job_args)
args = {}
for adict in job_args:
args = dict(args, **adict)
if "label" in args:
env.label = args["label"]
else:
env.label = ""
return_job_scripts = []
for i in range(1, int(env.replicas) + 1):
env.job_results, env.job_results_local = with_template_job(
ensemble_mode=env.ensemble_mode, label=env.label
)
if int(env.replicas) > 1:
if env.ensemble_mode is False:
env.job_results += "_replica_" + str(i)
else:
env.job_results += "_" + str(i)
tmp_job_results = env.job_results.replace(
env.results_path, env.tmp_results_path
)
env["job_name"] = env.name[0: env.max_job_name_chars]
complete_environment()
env.run_command = template(env.run_command)
if env.label not in ["PJ_PYheader", "PJ_header"]:
env.run_prefix += (
"\n\n"
"# copy files from config folder\n"
"config_dir={}\n"
"rsync -pthrvz --inplace --exclude SWEEP "
"$config_dir/* .".format(
env.job_config_path
)
)
if env.ensemble_mode:
env.run_prefix += (
"\n\n"
"# copy files from SWEEP folder\n"
"rsync -pthrvz --inplace $config_dir/SWEEP/{}/ .".format(
env.label)
)
if not (hasattr(env, "venv") and str(env.venv).lower() == "true"):
if hasattr(env, "py_pkg") and len(env.py_pkg) > 0:
env.run_prefix += (
"\n\n"
"# Install requested python packages\n"
"pip3 install --user --upgrade {}".format(
" ".join(pkg for pkg in env.py_pkg)
)
)
# this is a tricky situation,
# in case of ensemble runs, or simple job, we need to add env.label
# to generated job script name,
# however, for PJ_PYheader and PJ_header header script, nothing should
# be added at the end of script file name, so, here we pass a empty
# string as label
if hasattr(env, "NoEnvScript") and env.NoEnvScript:
tmp_job_script = script_templates(env.batch_header)
else:
tmp_job_script = script_templates(env.batch_header, env.script)
# Separate base from extension
base, extension = os.path.splitext(env.pather.basename(tmp_job_script))
# Initial new name if we have replicas or ensemble
if int(env.replicas) > 1:
if env.ensemble_mode is False:
dst_script_name = base + "_replica_" + str(i) + extension
else:
dst_script_name = base + "_" + str(i) + extension
else:
dst_script_name = base + extension
dst_job_script = env.pather.join(env.tmp_scripts_path, dst_script_name)
# Add target job script to return list
"""
return_job_scripts.append(env.pather.join(env.scripts_path,
dst_script_name)
"""
# here, instead of returning PATH to script folder, it is better to
# submit script from results_path folder, specially in case of PJ job
return_job_scripts.append(
env.pather.join(env.job_results, dst_script_name)
)
copy(tmp_job_script, dst_job_script)
# chmod +x dst_job_script
# 755 means read and execute access for everyone and also
# write access for the owner of the file
os.chmod(dst_job_script, 0o755)
os.makedirs(tmp_job_results)
copy(dst_job_script, env.pather.join(tmp_job_results, dst_script_name))
# TODO: these env variables are not used anywhere
# TODO: maybe it is better to remove them
# job_name_template_sh
# job_results_contents
# job_results_contents_local
with open(
env.pather.join(tmp_job_results, "env.yml"), "w"
) as env_yml_file:
yaml.dump(
dict(
env,
**{
"sshpass": None,
"passwords": None,
"password": None,
"sweepdir_items": None,
}
),
env_yml_file,
default_flow_style=False,
)
return return_job_scripts
def job_transmission(*job_args):
"""
here, we only transfer all generated files/folders from
`<tmp_folder>/{results,scripts}`
to
`<target_work_dir>/{results,scripts}`
"""
args = {}
for adict in job_args:
args = dict(args, **adict)
if (
hasattr(env, "prevent_results_overwrite")
and env.prevent_results_overwrite == "delete"
):
# if we have a large result directory contains thousands of files and
# folders, using rm command will not be efficient,
# so, here I am using rsync
#
# Note: there is another option, using perl which is much faster than
# rsync -a --delete, but I am not sure if we can use it on
# all HPC resources
empty_folder = "/tmp/{}".format(next(tempfile._get_candidate_names()))
results_dir_items = os.listdir(env.tmp_results_path)
for results_dir_item in results_dir_items:
run(
template(
"mkdir {} && "
"mkdir -p {}/results &&"
"rsync -a --delete --inplace {}/ {}/results/{}/".format(
empty_folder,
env.work_path,
empty_folder,
env.work_path,
results_dir_item,
)
)
)
rsyc_src_dst_folders = []
rsyc_src_dst_folders.append((env.tmp_scripts_path, env.scripts_path))
rsyc_src_dst_folders.append((env.tmp_results_path, env.results_path))
for sync_src, sync_dst in rsyc_src_dst_folders:
if env.manual_sshpass:
# TODO: maybe the better option here is to overwrite the
# rsync_project
local(
template(
"rsync -pthrvz "
"--rsh='sshpass -p $sshpass ssh -p 22 ' "
"{}/ $username@$remote:{}/ ".format(sync_src, sync_dst)
)
)
elif env.manual_gsissh:
# TODO: implement prevent_results_overwrite for this option
local(
template(
"globus-url-copy -p 10 -cd -r -sync "
"file://{}/ "
"gsiftp://$remote/{}/".format(sync_src, sync_dst)
)
)
else:
rsync_project(local_dir=sync_src + "/", remote_dir=sync_dst)
def job_submission(*job_args):
"""
here, all prepared job scrips will be submitted to the
target remote machine
!!! note
please make sure to pass the list of job scripts be summited as
an input to this function
"""
args = {}
for adict in job_args:
args = dict(args, **adict)
job_script = args["job_script"]
if (
hasattr(env, "dispatch_jobs_on_localhost")
and isinstance(env.dispatch_jobs_on_localhost, bool)
and env.dispatch_jobs_on_localhost
):
local(template("$job_dispatch " + job_script))
print("job dispatch is done locally\n")
elif not env.get("noexec", False):
if env.remote == "localhost":
run(
cmd="{} && {}".format(
env.run_prefix,
template("$job_dispatch {}".format(job_script))
),
cd=env.pather.dirname(job_script)
)
else:
run(
cmd=template("$job_dispatch {}".format(job_script)),
cd=env.pather.dirname(job_script)
)
print(
"Use `fab {} fetch_results` to copy the results "
"back to localhost.".format(env.machine_name)
)
return [job_script]
@task
@beartype
def ensemble2campaign(
results_dir: str,
campaign_dir: str,
skip: Optional[Union[int, str]]=0
) -> None:
"""
Converts FabSim3 ensemble results to EasyVVUQ campaign definition.
results_dir: FabSim3 results root directory
campaign_dir: EasyVVUQ root campaign directory.
skip: The number of runs (run_1 to run_skip) not to copy to the campaign
"""
# update_environment(args)
# if skip > 0: only copy the run directories run_X for X > skip back
# to the EasyVVUQ campaign dir
if int(skip) > 0:
# all run directories
runs = os.listdir("{}/RUNS/".format(results_dir))
for run in runs:
# extract X from run_X
run_id = int(run.split("_")[-1])
# if X > skip copy results back
if run_id > int(skip):
local(
"rsync -pthrvz {}/RUNS/{} {}/runs".format(
results_dir, run, campaign_dir
)
)
# copy all runs from FabSim results directory to campaign directory
else:
local(
"rsync -pthrvz {}/RUNS/ {}/runs".format(results_dir, campaign_dir)
)
@task
@beartype
def campaign2ensemble(
config: str,
campaign_dir: str,
skip: Optional[Union[int, str]]=0
) -> None:
"""
Converts an EasyVVUQ campaign run set TO a FabSim3 ensemble definition.
Args:
config (str): FabSim3 configuration name (will create in top level if
non-existent, and overwrite existing content).
campaign_dir (str): EasyVVUQ root campaign directory
skip (Union[int, str], optional): The number of runs(run_1 to run_skip)
not to copy to the FabSim3 sweep directory. The first skip number
of samples will then not be computed.
"""
# update_environment(args)
config_path = find_config_file_path(config, ExceptWhenNotFound=False)
if config_path is False:
local("mkdir -p {}/{}".format(env.local_config_file_path[-1], config))
config_path = "{}/{}".format(env.local_config_file_path[-1], config)
sweep_dir = config_path + "/SWEEP"
local("mkdir -p {}".format(sweep_dir))
local("rm -rf {}/*".format(sweep_dir))
# if skip > 0: only copy the run directories run_X for X > skip to the
# FabSim3 sweep directory. This avoids recomputing already computed samples
# when the EasyVVUQ grid is refined adaptively.
if int(skip) > 0:
# all runs in the campaign dir
runs = os.listdir("{}/runs/".format(campaign_dir))
for run in runs:
# extract X from run_X
run_id = int(run.split("_")[-1])
# if X > skip, copy run directory to the sweep dir
if run_id > int(skip):
print("Copying {}".format(run))
local(
"rsync -pthrz {}/runs/{} {}".format(
campaign_dir, run, sweep_dir
)
)
# if skip = 0: copy all runs from EasyVVUQ run directort to the sweep dir
else:
local("rsync -pthrz {}/runs/ {}".format(campaign_dir, sweep_dir))
@beartype
def run_ensemble(
config: str,
sweep_dir: str,
sweep_on_remote: Optional[bool] = False,
execute_put_configs: Optional[bool] = True,
**args
) -> None:
"""
Map and execute ensemble jobs.
The job results will be stored with a name pattern as defined in
the environment
!!! note
function `with_config` should be called before calling this function in
plugin code.
Args:
config (str): base config directory to use to define input files
sweep_dir (str): directory containing inputs that will vary per
ensemble simulation instance.
sweep_on_remote (bool, optional): value `True` means the `SWEEP`
directory is located to the remote machine.
execute_put_configs (bool, optional): `True` means we already called
`put_configs` function to transfer `config` files and folders to
remote machine.
**args: Description
Raises:
RuntimeError: - if `with_config` function did not called before calling
`run_ensemble` task.
- if `env.script` variable did not set.
- if `SWEEP` directory is empty.
"""
update_environment(args)
if "script" not in env:
raise RuntimeError(
"ERROR: run_ensemble function has been called,"
"but the parameter 'script' was not specified."
)
# check if with_config function is already called or not
if not hasattr(env, "job_config_path"):
raise RuntimeError(
"Function with_config did NOT called, "
"Please call it before calling run_ensemble()"
)
# check for PilotJob option
if hasattr(env, "PJ") and env.PJ.lower() == "true":
# env.batch_header = "no_batch"
env.submitted_jobs_list = []
env.submit_job = False
env.batch_header = "bash_header"
if sweep_on_remote is False:
sweepdir_items = os.listdir(sweep_dir)
else:
# in case of reading SWEEP folder from remote machine, we need a
# SSH tunnel and then list the directories
sweepdir_items = run("ls -1 {}".format(sweep_dir)).splitlines()
if len(sweepdir_items) == 0:
raise RuntimeError(
"ERROR: no files where found in the sweep_dir : {}".format(
sweep_dir
)
)
# reorder an exec_first item for priority execution.
if hasattr(env, "exec_first"):
sweepdir_items.insert(
0, sweepdir_items.pop(sweepdir_items.index(env.exec_first))
)
if execute_put_configs is True:
execute(put_configs, config)
# output['everything'] = False
job_scripts_to_submit = job(
dict(
ensemble_mode=True,
sweepdir_items=sweepdir_items,
sweep_dir=sweep_dir,
)
)
if hasattr(env, "PJ") and env.PJ.lower() == "true":
print(
Panel.fit(
"NOW, we submitting PJ jobs",
title="[orange_red1]PJ job submission phase[/orange_red1]",
border_style="orange_red1"
)
)
# first, add all generated tasks script to PJ_PY
submitted_jobs_list = []
if not hasattr(env, "task_model"):
env.task_model = "default"
# Python's indexes start at zero, to start from 1, set start=1
for index, job_script in enumerate(job_scripts_to_submit, start=1):
# TODO: this loop should be improved
env.idsID = index
env.idsPath = job_script
submitted_jobs_list.append(
script_template_content("qcg-PJ-task-template")
)
env.submitted_jobs_list = "\n".join(submitted_jobs_list)
# to avoid apply replicas functionality on PilotJob folders
env.replicas = "1"
backup_header = env.batch_header
env.batch_header = env.PJ_PYheader
job_scripts_to_submit = job(
dict(ensemble_mode=False, label="PJ_PYheader", NoEnvScript=True)
)
env.PJ_PATH = job_scripts_to_submit[0]
env.PJ_FileName = env.pather.basename(env.PJ_PATH)
env.batch_header = env.PJ_header
env.submit_job = True
# load QCG-PJ-PY file
PJ_CMD = []
if hasattr(env, "venv") and str(env.venv).lower() == "true":
# QCG-PJ should load from virtualenv
PJ_CMD.append("# unload any previous loaded python module")
PJ_CMD.append("module unload python\n")
PJ_CMD.append("# load QCG-PilotJob from VirtualEnv")
PJ_CMD.append(
'eval "$({}/bin/conda shell.bash hook)"\n'.format(
env.virtual_env_path
)
)
PJ_CMD.append("# load QCG-PJ-Python file")
PJ_CMD.append(
"{}/bin/python3 {}".format(env.virtual_env_path, env.PJ_PATH)
)
else:
PJ_CMD.append("# Install QCG-PJ in user's home space")
PJ_CMD.append("pip3 install --user --upgrade qcg-pilotjob\n")
PJ_CMD.append("# load QCG-PJ-Python file")
PJ_CMD.append("python3 {}".format(env.PJ_PATH))
env.run_QCG_PilotJob = "\n".join(PJ_CMD)
job(dict(ensemble_mode=False, label="PJ_header", NoEnvScript=True))
env.batch_header = backup_header
env.NoEnvScript = False
def input_to_range(arg, default):
ttype = type(default)
# regexp for a array generator like [1.2:3:0.2]
gen_regexp = r"\[([\d\.]+):([\d\.]+):([\d\.]+)\]"
if not arg:
return [default]
match = re.match(gen_regexp, str(arg))
if match:
vals = list(map(ttype, match.groups()))
if ttype == int:
return range(*vals)
else:
return np.arange(*vals)
return [ttype(arg)]
@task
def install_packages(venv: bool = "False"):
"""
Install list of packages defined in deploy/applications.yml
!!! note
if you got an error on your local machine during the build wheel
for scipy, like this one
```sh
ERROR: lapack_opt_info:
```
Try to install BLAS and LAPACK first. by
```sh
sudo apt-get install libblas-dev
sudo apt-get install liblapack-dev
sudo apt-get install libatlas-base-dev
sudo apt-get install gfortran
```
Args:
venv (str, optional): `True` means the VirtualEnv is already installed
in the remote machine
"""
applications_yml_file = os.path.join(
env.fabsim_root, "deploy", "applications.yml"
)
user_applications_yml_file = os.path.join(
env.fabsim_root, "deploy", "applications_user.yml"
)
if not os.path.exists(user_applications_yml_file):
copyfile(applications_yml_file, user_applications_yml_file)
config = yaml.load(
open(user_applications_yml_file), Loader=yaml.SafeLoader
)
tmp_app_dir = "{}/tmp_app".format(env.localroot)
local("mkdir -p {}".format(tmp_app_dir))
for dep in config["packages"]:
local(
"pip3 download --no-binary=:all: -d {} {}".format(tmp_app_dir, dep)
)
add_dep_list_compressed = sorted(
Path(tmp_app_dir).iterdir(), key=lambda f: f.stat().st_mtime
)
for it in range(len(add_dep_list_compressed)):
add_dep_list_compressed[it] = os.path.basename(
add_dep_list_compressed[it]
)
# Create directory in the remote machine to store dependency packages
run(template("mkdir -p {}".format(env.app_repository)))
# Send the dependencies (and the dependencies of dependencies) to the
# remote machine
for whl in os.listdir(tmp_app_dir):
local(
template(
"rsync -pthrvz -e 'ssh -p $port' {}/{} "
"$username@$remote:$app_repository".format(tmp_app_dir, whl)
)
# "rsync -pthrvz %s/%s eagle:$app_repository"%(tmp_app_dir, whl)
)
# Set required env variable
env.config = "Install_VECMA_App"
# env.nodes = 1
env.nodes = env.cores
script = os.path.join(tmp_app_dir, "script")
# Write the Install command in a file
with open(script, "w") as sc:
install_dir = "--user"
if venv.lower() == "true":
sc.write(
"if [ ! -d {} ]; then \n\t python -m virtualenv "
"{} || echo 'WARNING : virtualenv is not installed "
"or has a problem' \nfi\n\nsource {}/bin/activate\n".format(
env.virtual_env_path,
env.virtual_env_path,
env.virtual_env_path,
)
)
install_dir = ""
# First install the additional_dependencies
for dep in reversed(add_dep_list_compressed):
print(dep)
if dep.endswith(".zip"):
sc.write(
"\nunzip {}/{} -d {} && cd {}/{} "
"&& python3 setup.py install {}".format(
env.app_repository,
dep,
env.app_repository,
env.app_repository,
dep.replace(".zip", ""),
install_dir,
)
)
elif dep.endswith(".tar.gz"):
sc.write(
"\ntar xf {}/{} -C {} && cd {}/{} "
"&& python3 setup.py install {}\n".format(
env.app_repository,
dep,
env.app_repository,
env.app_repository,
dep.replace(".tar.gz", ""),
install_dir,
)
)
# Add the tmp_app_dir directory in the local templates path because the
# script is saved in it
env.local_templates_path.insert(0, tmp_app_dir)
install_dict = dict(script="script")
# env.script = "script"
update_environment(install_dict)
# Determine a generated job name from environment parameters
# and then define additional environment parameters based on it.
env.job_results, env.job_results_local = with_template_job()
# Create job script based on "sbatch header" and script created above in
# deploy/.jobscript/
env.job_script = script_templates(env.batch_header_install_app, env.script)
# Create script's destination path to remote machine based on
env.dest_name = env.pather.join(
env.scripts_path, env.pather.basename(env.job_script)
)
# Send Install script to remote machine
put(env.job_script, env.dest_name)
#
run(template("mkdir -p $job_results"))
with cd(env.pather.dirname(env.job_results)):
run(template("{} {}".format(env.job_dispatch, env.dest_name)))
local("rm -rf {}".format(tmp_app_dir))
@task
def install_app(name="", external_connexion="no", venv="False"):
"""
Install a specific Application through FasbSim3
"""
applications_yml_file = os.path.join(
env.fabsim_root, "deploy", "applications.yml"
)
user_applications_yml_file = os.path.join(
env.fabsim_root, "deploy", "applications_user.yml"
)
if not os.path.exists(user_applications_yml_file):
copyfile(applications_yml_file, user_applications_yml_file)
config = yaml.load(
open(user_applications_yml_file), Loader=yaml.SafeLoader
)
info = config[name]
# Offline cluster installation - --user install
# Temporary folder
tmp_app_dir = "{}/tmp_app".format(env.localroot)
local("mkdir -p {}".format(tmp_app_dir))
# First download all the Miniconda3 installation script
local(
"wget {} -O {}/miniconda.sh".format(
config["Miniconda-installer"]["repository"], tmp_app_dir
)
)
# Next download all the additional dependencies
for dep in info["additional_dependencies"]:
local(
"pip3 download --no-binary=:all: -d {} {}".format(tmp_app_dir, dep)
)
add_dep_list_compressed = sorted(
Path(tmp_app_dir).iterdir(), key=lambda f: f.stat().st_mtime
)
for it in range(len(add_dep_list_compressed)):
add_dep_list_compressed[it] = os.path.basename(
add_dep_list_compressed[it]
)
# Download all the dependencies of the application
# This first method should download all the dependencies needed
# but for the local plateform !
# --> Possible Issue during the installation in the remote
# (it's not a cross-plateform install yet)
local(
"pip3 download --no-binary=:all: -d {} git+{}@v{}".format(
tmp_app_dir, info["repository"], info["version"]
)
)
# Create directory in the remote machine to store dependency packages
run(template("mkdir -p {}".format(env.app_repository)))
# Send the dependencies (and the dependencies of dependencies) to the
# remote machine
for whl in os.listdir(tmp_app_dir):
local(
template(
"rsync -pthrvz -e 'ssh -p $port' {}/{} "
"$username@$remote:$app_repository".format(tmp_app_dir, whl)
)
)
# Set required env variable
env.config = "Install_VECMA_App"
# env.nodes = 1
env.nodes = env.cores
script = os.path.join(tmp_app_dir, "script")
# Write the Install command in a file
with open(script, "w") as sc:
install_dir = ""
if venv == "True":
# clean virtualenv and App_repo directory on remote machine side
# To make sure everything is going to be installed from scratch
"""
sc.write("find %s/ -maxdepth 1 -mindepth 1 -type d \
-exec rm -rf \"{}\" \\;\n" % (env.app_repository))
sc.write("rm -rf %s\n" % (env.virtual_env_path))
"""
# It seems some version of python/virtualenv doesn't support
# the option --no-download. So there is sometime a problem :
# from pip import main
# ImportError: cannot import name 'main'
#
# TODO Check python version and raised a Warning if not the
# right version ?
# TODO
#
sc.write(
"if [ ! -d {} ]; then \n\t bash {}/miniconda.sh -b -p {} "
"|| echo 'WARNING : virtualenv is not installed "
"or has a problem' \nfi".format(
env.virtual_env_path,
env.app_repository,
env.virtual_env_path,
)
)
sc.write(
'\n\neval "$$({}/bin/conda shell.bash hook)"\n\n'.format(
env.virtual_env_path
)
)
# install_dir = ""
"""
with the latest version of numpy, I got this error:
1. Check that you expected to use Python3.8 from ...,
and that you have no directories in your PATH or PYTHONPATH
that can interfere with the Python and numpy version "1.18.1"
you're trying to use.
so, since that we are using VirtualEnv, to avoid any conflict,
it is better to clear PYTHONPATH
"""
# sc.write("\nexport PYTHONPATH=\"\"\n")
sc.write("\nmodule unload python\n")
# First install the additional_dependencies
for dep in reversed(add_dep_list_compressed):
print(dep)
if dep.endswith(".zip"):
sc.write(
"\nunzip {}/{} -d {} && cd {}/{} "
"&& {}/bin/python3 setup.py install {}\n".format(
env.app_repository,
dep,
env.app_repository,
env.app_repository,
dep.replace(".zip", ""),
env.virtual_env_path,
install_dir,
)
)
elif dep.endswith(".tar.gz"):
sc.write(
"\ntar xf {}/{} -C {} && cd {}/{} "
"&& {}/bin/python3 setup.py install {}\n".format(
env.app_repository,
dep,
env.app_repository,
env.app_repository,
dep.replace(".tar.gz", ""),
env.virtual_env_path,
install_dir,
)
)
sc.write(
"{}/bin/pip install --no-index --no-build-isolation "
"--find-links=file:{} {}/{}-{}.zip {} || "
"{}/bin/pip install --no-index "
"--find-links=file:{} {}/{}-{}.zip".format(
env.virtual_env_path,
env.app_repository,
env.app_repository,
info["name"],
info["version"],
install_dir,
env.virtual_env_path,
env.app_repository,
env.app_repository,
info["name"],
info["version"],
)
)
# Add the tmp_app_dir directory in the local templates path because the
# script is saved in it
env.local_templates_path.insert(0, tmp_app_dir)
install_dict = dict(script="script")
# env.script = "script"
update_environment(install_dict)
# Determine a generated job name from environment parameters
# and then define additional environment parameters based on it.
env.job_results, env.job_results_local = with_template_job()
# Create job script based on "sbatch header" and script created above in
# deploy/.jobscript/
env.job_script = script_templates(env.batch_header_install_app, env.script)
# Create script's destination path to remote machine based on
run(template("mkdir -p $scripts_path"))
env.dest_name = env.pather.join(
env.scripts_path, env.pather.basename(env.job_script)
)
# Send Install script to remote machine
put(env.job_script, env.dest_name)
#
run(template("mkdir -p $job_results"))
with cd(env.pather.dirname(env.job_results)):
run(template("{} {}".format(env.job_dispatch, env.dest_name)))
local("rm -rf {}".format(tmp_app_dir))
|
#!/usr/bin/python
import os
import subprocess
#import parmiko
#ssh = paramiko.SSHClient()
#ssh.connect('192.168.1.22', username=root, password=cloud123)
#ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(ls)
createCmd = "ssh [email protected] sh scale_machine.sh"
subprocess.check_call(createCmd,shell=True)
|
from pathlib import Path
import datetime
import logging
import tensorflow as tf
from vesper.util.settings import Settings
import vesper.util.yaml_utils as yaml_utils
_TRAINING_DATA_DIR_PATH = Path(
'/Users/harold/Desktop/NFC/Data/Vesper ML/NOGO Coarse Classifier 0.0')
_MODEL_DIR_PATH = _TRAINING_DATA_DIR_PATH / 'Models'
_EPOCH_DIR_NAME_FORMAT = 'Epoch {}'
_TENSORFLOW_SAVED_MODEL_DIR_NAME = 'TensorFlow SavedModel'
_KERAS_MODEL_FILE_NAME = 'Keras Model.h5'
_TRAINING_SETTINGS_FILE_NAME = 'Training Settings.yaml'
_INFERENCE_DATA_DIR_PATH = Path(__file__).parent / 'data'
_INFERENCE_SETTING_CHANGES = Settings(
waveform_slice_min_non_call_slice_start_time=0,
waveform_slice_max_non_call_slice_start_time=0,
waveform_amplitude_scaling_data_augmentation_enabled=False,
max_spectrogram_frequency_shift=0,
)
"""Changes to training settings for inference."""
def get_dataset_dir_path(dataset_name):
return _TRAINING_DATA_DIR_PATH / 'Datasets' / dataset_name
def create_training_name(settings):
start_time = datetime.datetime.now().strftime('%Y-%m-%d_%H.%M.%S')
return f'{start_time}'
def get_training_start_time(training_name):
return training_name
def get_training_log_dir_path(training_name):
return _TRAINING_DATA_DIR_PATH / 'Logs' / training_name
def get_training_model_dir_path(training_name):
return _MODEL_DIR_PATH / training_name
def get_training_epoch_model_dir_path(training_name, epoch_num):
training_model_dir_path = get_training_model_dir_path(training_name)
epoch_dir_name = _EPOCH_DIR_NAME_FORMAT.format(epoch_num)
return training_model_dir_path / epoch_dir_name
def get_training_tensorflow_model_dir_path(training_name, epoch_num):
model_dir_path = \
get_training_epoch_model_dir_path(training_name, epoch_num)
return model_dir_path / _TENSORFLOW_SAVED_MODEL_DIR_NAME
def get_training_keras_model_file_path(training_name, epoch_num):
model_dir_path = \
get_training_epoch_model_dir_path(training_name, epoch_num)
return model_dir_path / _KERAS_MODEL_FILE_NAME
def load_training_model(training_name, epoch_num):
file_path = get_training_keras_model_file_path(training_name, epoch_num)
return _load_model(file_path)
def _load_model(file_path):
logging.info(f'Loading classifier model from "{file_path}"...')
return tf.keras.models.load_model(file_path)
def load_inference_model():
file_path = _INFERENCE_DATA_DIR_PATH / _KERAS_MODEL_FILE_NAME
return _load_model(file_path)
def get_training_settings_file_path(training_name):
model_dir_path = get_training_model_dir_path(training_name)
return model_dir_path / _TRAINING_SETTINGS_FILE_NAME
def save_training_settings(settings, training_name):
file_path = get_training_settings_file_path(training_name)
text = yaml_utils.dump(settings.__dict__, default_flow_style=False)
file_path.write_text(text)
def load_training_settings(training_name):
file_path = get_training_settings_file_path(training_name)
return _load_settings(file_path)
def _load_settings(file_path):
logging.info(f'Loading classifier settings from "{file_path}"...')
text = file_path.read_text()
dict_ = yaml_utils.load(text)
return Settings.create_from_dict(dict_)
def load_inference_settings():
file_path = _INFERENCE_DATA_DIR_PATH / _TRAINING_SETTINGS_FILE_NAME
training_settings = _load_settings(file_path)
return get_inference_settings(training_settings)
def get_inference_settings(training_settings):
return Settings(training_settings, _INFERENCE_SETTING_CHANGES)
|
from keras.models import Model
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers import Input, merge
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
import keras.backend as K
|
''' show_ntp.py
Parser for the following show commands:
* show ntp detail
returns NTP configuration
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
# ====================================================
# schema for show dimm-summary
# ====================================================
class ShowNtpDetail(MetaParser):
"""Schema for show ntp detail"""
schema = {
'ntp':{
Optional('Enabled'): str,
Optional('Server 1'): str,
Optional('Server 2'): str,
Optional('Server 3'): str,
Optional('Server 4'): str,
Optional('Status'): str
},
}
# ====================================================
# parser for show ntp detail
# ====================================================
class ShowNtpDetail(ShowNtpDetail):
"""Parser for show ntp detail"""
cli_command = 'show ntp detail'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
p1 = re.compile(r'^Enabled\:\s(?P<enabled>\S*)')
p2 = re.compile(r'^(?P<server>Server\s\d)\:\s*(?P<ip>[\d\.]*)')
p3 = re.compile(r'^Status\:\s(?P<state>\S*)')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
print (line)
if m:
groups = m.groupdict()
dict_name = ret_dict.setdefault('ntp', {})
#dict_name['dimm'] = {}
dict_name['Enabled'] = groups['enabled']
continue
m = p2.match(line)
if m:
groups = m.groupdict()
ntp_server = groups['server']
dict_name[ntp_server] = groups['ip']
continue
m = p3.match(line)
if m:
groups = m.groupdict()
dict_name['Status'] = groups['state']
continue
return ret_dict
|
#!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
x = 42
print('Hello, World.{}'.format(x))
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import collections
class BBox(object):
def __init__(self):
self.xmin = self.xmax = None
self.ymin = self.ymax = None
def update(self, pt):
if self.xmin is None:
self.xmin = self.xmax = pt[0]
self.ymin = self.ymax = pt[1]
if pt[0] < self.xmin: self.xmin = pt[0]
if pt[0] > self.xmax: self.xmax = pt[0]
if pt[1] < self.ymin: self.ymin = pt[1]
if pt[1] > self.ymax: self.ymax = pt[1]
return self
@property
def width(self):
return self.xmax-self.xmin+1
@property
def height(self):
return self.ymax-self.ymin+1
class Map(object):
def __init__(self):
self.bbox = BBox()
self.data = dict()
def load(self, fh):
txt = fh.read()
vec = dict(N=(0,-1), S=(0,1), E=(1,0), W=(-1,0))
stack = [ [[0,0]] ]
self.data[(0,0)] = 'X'
for ch in txt:
if ch in 'NSEW':
dx, dy = vec[ch]
for p in stack[-1]:
x, y = p
p[:] = x+2*dx, y+2*dy
self.bbox.update((x+2*dx, y+2*dy))
self.data[(x+dx, y+dy)] = '|' if dy == 0 else '-'
self.data[(x+2*dx, y+2*dy)] = '.'
elif ch == '(':
current = stack.pop()
stack.append([ ]) # Future positions (after "()" closes)
stack.append([ list(x) for x in current ]) # Saved pos at "("
stack.append([ list(x) for x in current ]) # Current updatable position
elif ch == ')':
current = stack.pop()
# Now: [-1] == saved pos list; [-2] == future pos list
stack.pop() # No longer need saved state
stack[-1].extend(current)
stack[-1] = [ [x,y] for x,y in set((x,y) for x,y in stack[-1]) ]
elif ch == '|':
current = stack.pop()
# Now: [-1] == saved pos list; [-2] == future pos list
stack[-2].extend(current)
stack.append([ list(x) for x in stack[-1] ]) # Current updatable position
return self
def shortest_paths(self, pos=(0,0)):
self.diameter = 0
pos = tuple([ pos ])
self.paths = paths = { pos: [pos] }
dirs = [ (0,-1), (0,1), (1,0), (-1,0) ]
todo = collections.deque([ pos ])
while todo:
path = todo.popleft()
x, y = path[-1]
for d in dirs:
x1, y1 = x+d[0], y+d[1]
if self.data.get((x1,y1), "#") != "#":
x2, y2 = x+2*d[0], y+2*d[1]
if (x2, y2) not in paths:
self.diameter = len(path)
nxt = path + ((x2, y2),)
paths[(x2, y2)] = nxt
todo.append(nxt)
def show(self):
for y in range(self.bbox.ymin-1, self.bbox.ymax+2):
print("".join(self.data.get((x,y), "#") for x in range(self.bbox.xmin-1, self.bbox.xmax+2)))
def MAIN(argv):
with open(argv.fname, 'r', encoding='UTF-8') as fh:
map = Map().load(fh)
map.show()
map.shortest_paths()
print("Map diameter:", map.diameter)
print("At least 1000 away:", sum(1 for p in map.paths.values() if len(p) > 1000))
def getopts():
parser = argparse.ArgumentParser(description="""Advent of code day 20""")
parser.add_argument('fname', type=str, nargs='?', default="20.in", help='File name')
return parser.parse_args()
if __name__ == '__main__':
MAIN(getopts())
|
from pyspider.core.model.mysql_base import *
"""
每天的门店库存备份
"""
class ShopSkuDayStock(Model):
id = IntegerField(primary_key=True)
back_up_day = IntegerField(verbose_name='备份日期,截止到当日24时静态数据')
sku = CharField(verbose_name='skuBarCode')
warehouse_id = IntegerField(verbose_name='仓库ID')
shop_id = IntegerField(verbose_name='门店id')
shop_type = IntegerField(verbose_name='0:直营, 1:加盟, 2:联营')
actual_stock = IntegerField(verbose_name='实际库存')
underway_stock = IntegerField(verbose_name='在途库存')
lock_stock = IntegerField(verbose_name='锁定库存')
available_stock = IntegerField(verbose_name='可用库存')
created_at = DateTimeField(verbose_name='创建时间')
class Meta:
database = xs_report_db
db_table = 'shop_sku_day_stock'
indexes = (
(('back_up_day', 'sku', 'warehouse_id'), True),
)
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016-2020 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
from sequana.lazy import pylab
import colorlog
logger = colorlog.getLogger(__name__)
from sequana.viz import clusterisation
__all__ = ['Isomap']
class Isomap(clusterisation.Cluster):
"""
.. plot::
:include-source:
from sequana.viz.isomap import Isomap
from sequana import sequana_data
import pandas as pd
data = sequana_data("test_pca.csv")
df = pd.read_csv(data)
df = df.set_index("Id")
p = PCA(df, colors={
"A1": 'r', "A2": 'r', 'A3': 'r',
"B1": 'b', "B2": 'b', 'B3': 'b'})
p.plot(n_components=2)
"""
def __init__(self, data, colors={}):
super(Isomap, self).__init__(data, colors)
def plot(self, n_components=2, n_neighbors=5, transform="log", switch_x=False,
switch_y=False, switch_z=False, colors=None,
max_features=500, show_plot=True):
"""
:param n_components: at number starting at 2 or a value below 1
e.g. 0.95 means select automatically the number of components to
capture 95% of the variance
:param transform: can be 'log' or 'anscombe', log is just log10. count
with zeros, are set to 1
"""
from sklearn.manifold import Isomap
import numpy as np
pylab.clf()
data, kept = self.scale_data(transform_method=transform, max_features=max_features)
iso = Isomap(n_neighbors=n_neighbors, n_components=n_components)
iso.fit(data.T)
Xr = iso.transform(data.T)
self.Xr = Xr
if switch_x:
Xr[:,0] *= -1
if switch_y:
Xr[:,1] *= -1
if switch_z:
Xr[:,2] *= -1
# PC1 vs PC2
if show_plot:
pylab.figure(1)
self._plot(Xr, pca=None, pc1=0,pc2=1, colors=colors)
if n_components >=3:
if show_plot:
pylab.figure(2)
self._plot(Xr, pca=None, pc1=0,pc2=2, colors=colors)
pylab.figure(3)
self._plot(Xr, pca=None, pc1=1,pc2=2, colors=colors)
return iso
|
# This is the configuration for fpm-config-generator. It exists in its own file
# because then a script can automatically update it easier.
global systemGroups
global groupName
global configTemplate
global outputDir
global customConfigs
systemGroups = "group" #where the list of secondary groups are.
groupName = "users" #the group who get fpm configs
configTemplate = "templates/template.tpl"
outputDir = "fpm-out/" #requires trailing slash
customConfigs = "templates/custom/"
|
# listGroupConnectors
# Returns a list of information about all connectors within a group in your Fivetran account.
# Reference: https://fivetran.com/docs/rest-api/groups#listallconnectorswithinagroup
import fivetran_api
# Fivetran API URL (replace {group_id} with your group id)
url = "https://api.fivetran.com/v1/groups/{group_id}/connectors"
fivetran_api.dump(fivetran_api.get_url(url))
|
"""
Test lockdown decorator
"""
# third-party imports
import webapp2
# local imports
from .utils import DecodeError
from .utils import decode
from .utils import encode
from .decorators import locked_down
from .mixins import LockedDownMixin
from nacelle.test.testcases import NacelleTestCase
# test fixtures: we need to set up a local wsgi app so we can test the
# decorator against real handlers
@locked_down('uname', 'pword')
def locked_handler(request):
return webapp2.Response('success')
class LDTestHandler(LockedDownMixin, webapp2.RequestHandler):
lockdown_username = 'uname'
lockdown_password = 'pword'
def get(self):
self.response.write('success')
# Define our WSGI app so GAE can run it
routes = [('/', locked_handler), ('/ld', LDTestHandler)]
wsgi = webapp2.WSGIApplication(routes, debug=True, config={
'webapp2_extras.sessions': {'secret_key': 'xxxxxxxxxxxxxxxxxxxxxx'}
})
# attach dispatcher and error_handler to the WSGI app
dispatcher = webapp2.import_string('nacelle.core.dispatcher.nacelle_dispatcher')
wsgi.router.set_dispatcher(dispatcher)
class LockdownDecoratorTests(NacelleTestCase):
def test_properly_returns_401_when_no_headers(self):
"""@locked_down decorator returns a 401 when no auth headers present
"""
response = wsgi.get_response('/')
self.assertEqual(401, response.status_int)
self.assertIn('WWW-Authenticate', response.headers)
self.assertIn('Could not verify your access level for that URL. '
'You have to login with proper credentials', response.body)
def test_properly_unlocks_when_valid_headers(self):
"""@locked_down allows access when auth headers are valid
"""
headers = [('Authorization', encode('uname', 'pword'))]
response = wsgi.get_response('/', headers=headers)
self.assertEqual(200, response.status_int)
self.assertEqual('success', response.body)
def test_properly_unlocks_when_valid_but_malformed_headers(self):
"""@locked_down allows access when auth headers are valid but missing BASIC prefix
"""
auth_string = encode('uname', 'pword').replace('Basic ', '')
headers = [('Authorization', auth_string)]
response = wsgi.get_response('/', headers=headers)
self.assertEqual(200, response.status_int)
self.assertEqual('success', response.body)
class LockdownMixinTests(NacelleTestCase):
def test_properly_returns_401_when_no_headers(self):
"""LockedDownMixin returns a 401 when no auth headers present
"""
response = wsgi.get_response('/ld')
self.assertEqual(401, response.status_int)
self.assertIn('WWW-Authenticate', response.headers)
self.assertIn('Could not verify your access level for that URL. '
'You have to login with proper credentials', response.body)
def test_properly_unlocks_when_valid_headers(self):
"""LockedDownMixin allows access when auth headers are valid
"""
headers = [('Authorization', encode('uname', 'pword'))]
response = wsgi.get_response('/ld', headers=headers)
self.assertEqual(200, response.status_int)
self.assertEqual('success', response.body)
def test_properly_unlocks_when_valid_but_malformed_headers(self):
"""LockedDownMixin allows access when auth headers are valid but missing BASIC prefix
"""
auth_string = encode('uname', 'pword').replace('Basic ', '')
headers = [('Authorization', auth_string)]
response = wsgi.get_response('/ld', headers=headers)
self.assertEqual(200, response.status_int)
self.assertEqual('success', response.body)
class Encode(NacelleTestCase):
def test_prepends_basic_auth(self):
self.assertTrue(encode('', '').lower().startswith('basic'))
def test_adds_space_after_basic(self):
self.assertTrue(encode('', '').lower().startswith('basic '))
def test_encodes_short_username(self):
self.assertTrue(encode('', 'password'))
def test_encodes_short_password(self):
self.assertTrue(encode('username', ''))
def test_encodes_long_username(self):
self.assertTrue(encode('username'*1000000, ''))
def test_encodes_long_password(self):
self.assertTrue(encode('', 'password'*1000000))
class Decode(NacelleTestCase):
def test_decodes_empty_username(self):
self.assertEqual('', decode(encode('', 'password'))[0])
def test_decodes_empty_password(self):
self.assertEqual('', decode(encode('username', ''))[1])
def test_decodes_hashes_only(self):
username, password = 'username', 'omgawesome!'
encoded_str = encode(username, password)
encoded_hash = encoded_str.split(' ')[1]
self.assertEqual((username, password), decode(encoded_hash))
def test_decodes_fully_encoded_strings(self):
username, password = 'username', 'password'
encoded_str = encode(username, password)
self.assertEqual((username, password), decode(encoded_str))
def test_doesnt_decode_invalid_auth_types(self):
encoded_str = 'error woot'
self.assertRaises(DecodeError, decode, encoded_str)
def test_doesnt_decode_invalid_hashes(self):
encoded_str = 'basic omg hacks what'
self.assertRaises(DecodeError, decode, encoded_str)
encoded_str = 'basic omg hacks'
self.assertRaises(DecodeError, decode, encoded_str)
encoded_str = 'basic omg'
self.assertRaises(DecodeError, decode, encoded_str)
encoded_str = 'basic'
self.assertRaises(DecodeError, decode, encoded_str)
def test_properly_escapes_colons(self):
username, password = 'user:name:', 'pass:word:'
encoded_str = encode(username, password)
self.assertEqual((username, password), decode(encoded_str))
|
from dpsniper.attack.dpsniper import DPSniper
from dpsniper.classifiers.classifier_factory import LogisticRegressionFactory
from dpsniper.classifiers.torch_optimizer_factory import SGDOptimizerFactory
from dpsniper.search.ddconfig import DDConfig
from dpsniper.search.ddsearch import DDSearch
from dpsniper.input.input_domain import InputDomain, InputBaseType
from dpsniper.input.pattern_generator import PatternGenerator
from dpsniper.mechanisms.laplace import LaplaceMechanism
from dpsniper.utils.initialization import initialize_dpsniper
if __name__ == "__main__":
# create mechanism
mechanism = LaplaceMechanism()
# configuration
# use Logistic regression with stochastic gradient descent optimization as the underlying machine-learning algorithm
classifier_factory = LogisticRegressionFactory(in_dimensions=1, optimizer_factory=SGDOptimizerFactory())
# consider 1-dimensional floating point input pairs from the range [-10, 10] with maximum distance of 1
input_generator = PatternGenerator(InputDomain(1, InputBaseType.FLOAT, [-10, 10]), False)
# adapt the number of processes to fit your machine
config = DDConfig(n_processes=2)
with initialize_dpsniper(config, out_dir="example_outputs"):
# run DD-Search to find the witness
witness = DDSearch(mechanism, DPSniper(mechanism, classifier_factory, config), input_generator, config).run()
# re-compute the power of the witness using high precision for a tighter lower confidence bound
witness.compute_eps_high_precision(mechanism, config)
print("eps_lcb = {}".format(witness.lower_bound))
print("witness = ({}, {}, {})".format(witness.a1, witness.a2, witness.attack))
|
# Generated by Django 3.0.6 on 2020-05-29 02:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("review", "0004_auto_20200512_1526"),
]
operations = [
migrations.AddField(
model_name="review", name="comments", field=models.TextField(blank=True),
),
]
|
# -*- coding: utf-8 -*-
import os
from .helpers import display_title_header
from .commands import Commands
__version__ = "0.2.1"
def main():
display_title_header(__version__)
cmd = Commands(os.getcwd())
cmd.run()
|
#!/usr/bin/env python2
from __future__ import print_function
import os
import multiprocessing
import re
import shutil
import argparse
filesRemaining = []
botScores = {}
import random
from rgkit.run import Runner, Options
from rgkit.settings import settings as default_settings
def make_variants(variable, robot_file, possibilities):
"""Makes variants of the file robot_file with the constant variable
changed for each possibility.
e.g. if the variable is "ELEPHANTS" and the possibilities are [1, 2, 3],
this will find the line
ELEPHANTS = 3
in robobt_file and make a copy of the file for each value in possibilities.
1.py will have ELEPHANTS = 1, 2.py will have ELEPHANTS = 2, etc.
Raises IndexError if the variable name is not found in the file robot_file.
The line assigning the constant variable must be the first line in that
file has the variable name in it.
"""
filenames = []
with open(robot_file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if variable in line:
break
else:
raise IndexError('variable name %s is not found in file %s' % (variable, robot_file))
assert '=' in line
for p in possibilities:
varandp = variable + str(p)
lines[i] = "%s = %s\n" % (variable, p)
filenames.append(varandp)
with open(varandp, 'w') as pfile:
for line in lines:
pfile.write(line)
return filenames
def get_current_value(variable, robot_file):
"""
Returns the value of the constant variable in the robot file.
This function finds the first line in the file robot_file that has the
variable name in it, and parses the value after the '=' in that line for a
float, returning it.
Raises IndexError if the variable name is not found in the file robot_file.
The line assigning the constant variable must be the first line in that
file has the variable name in it.
"""
with open(robot_file, 'r') as f:
for line in f:
if variable in line:
break
else:
raise IndexError('variable name %s is not found in file %s' % (variable, robot_file))
assert '=' in line
return float(line[line.index('=') + 1:])
def optimize_variable(precision, matchNum, enemies, variable, robot_file, processes):
pool = multiprocessing.Pool(processes)
"""
Creates a bunch of variants of the file robot_file, each with variable
changed, then runs a tournament between the variants to find the best one.
The file robot_fily is modified to contain the best value, and it is
returned.
"""
try:
base_value = get_current_value(variable, robot_file)
while precision >= 0.1:
print('RUNNING WITH BASE VALUE', base_value, \
'PRECISION', precision)
values_to_test = [base_value - precision,
base_value + precision, base_value]
files = make_variants(variable, robot_file, values_to_test)
best_file = run_tourney(matchNum,enemies, files, pool)
best_value = values_to_test[files.index(best_file)]
if best_value == base_value:
precision /= 2.0
print('best value remains', best_value)
print('decreasing precision to', precision)
else:
base_value = best_value
print('new \'best\' value is', best_value)
shutil.copy(make_variants(variable, robot_file, [base_value])[0],
robot_file)
#make double sure we close processes so as to not make a process bomb when testing
except KeyboardInterrupt:
print("terminating pool, please wait...")
pool.terminate()
raise
#close processes explicitly insead of relying on GC to do it
pool.close()
pool.join()
return base_value
def run_match(args):
#rgkit integration
try:
bot1, bot2, n_matches = args
res = Runner(player_files=(bot1,bot2), options=Options(n_of_games=n_matches, quiet=4, game_seed=random.randint(0, default_settings.max_seed))).run()
dif = 0
for scores0, scores1 in res:
dif += scores0 - scores1
return dif
#if there is an error, return immediatly so that the program can quit
except KeyboardInterrupt:
pass
def versus(matches_to_run, bot1, bot2, pool):
"""Launches a multithreaded comparison between two robot files.
run_match() is run in separate processes, one for each CPU core by default, until 100
matches (default) are run. returns bot1 score - bot2 score"""
try:
#find out pool size
psize = len(pool._pool)
rem, mod = divmod(matches_to_run, psize)
matchnums = []
#create list that lists how many matches to run and which bots to run them with
for _ in xrange(psize):
if mod:
mod -= 1
matchnums.append((bot1, bot2, rem+1))
else:
matchnums.append((bot1,bot2, rem))
#sum up all the partial differences returned by the pool
#using imap on the list created earlier
return sum(pool.imap_unordered(run_match, matchnums))
except KeyboardInterrupt:
print('user did ctrl+c, ABORT EVERYTHING\n Removing files...')
for bot in filesRemaining:
os.remove(bot)
raise KeyboardInterrupt()
def run_tourney(matchNum,enemies, botfiles, pool):
"""Runs a tournament between all bot files in botfiles.
Returns the winner of the tournament."""
bestWin = ['', -5000]
scores = {}
for bot1 in botfiles:
filesRemaining.append(bot1)
scores[bot1] = 0
for enemy in enemies:
for bot1 in botfiles:
if bot1 in botScores[enemy] and botScores[enemy][bot1] != 0:
winScore = botScores[enemy][bot1]
print('ALREADY SCORED',str(bot1))
else:
winScore = versus(matchNum,bot1, enemy, pool)
botScores[enemy][bot1] = winScore
while winScore == 0:
print('VERSUS WAS A TIE. RETRYING...')
winScore = versus(matchNum,bot1, enemy, pool)
print('Difference in score:',str(bestWin[1]))
scores[bot1] += winScore
print(scores)
for bot1 in botfiles:
for bot2 in botfiles:
if bot1 != bot2 and scores[bot1] == scores[bot2]:
print("Two bots have same score, finding the winner")
bestWin[1] = versus(matchNum,bot1, bot2, pool)
while bestWin[1] == 0:
print("Wow. Another Tie.")
bestWin[1] = versus(matchNum,bot1, bot2, pool)
if bestWin[1] < 0:
bestWin[0] = bot2
elif bestWin[1] > 0:
bestWin[0] = bot1
else:
print("WTF? Impossible Tie.")
elif scores[bot1] > bestWin[1]:
bestWin[1] = scores[bot1]
bestWin[0] = bot1
bestwin = bestWin[0]
for bf in botfiles:
if not bf == bestwin:
print('removing',bf)
os.remove(bf)
filesRemaining.remove(bf)
print('Best Score:',str(bestWin[1]))
return bestwin
def main():
parser = argparse.ArgumentParser(
description="Optimize constant values for robotgame.")
parser.add_argument(
"constant", type=str, help='The constant name to optimize.')
parser.add_argument(
"file", type=str, help='The file of the robot to optimize.')
parser.add_argument(
"enemies", type=str, help='A comma-separated list of the enemy files.')
parser.add_argument(
"-pr", "--precision",
default=8.0,
type=float, help='The precision to start adjusting values at')
parser.add_argument(
"-m", "--matches",
default=100,
type=int, help='The number of matches to run per tourney')
parser.add_argument(
"-p", "--processes",
default=multiprocessing.cpu_count(),
type=int, help='The number of processes to simulate in')
args = vars(parser.parse_args())
eList = args['enemies'].split(',')
for e in eList:
botScores[e] = {}
best_value = optimize_variable(args['precision'],args['matches'],eList,
args['constant'], args['file'], processes=args['processes'])
print(best_value)
if __name__ == '__main__':
main()
|
"""Fixed tracks only being able to have one artist
Revision ID: 2528a69ac8e8
Revises: 8bd75fcafb3f
Create Date: 2020-05-09 23:54:46.757844
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "2528a69ac8e8"
down_revision = "8bd75fcafb3f"
branch_labels = ()
depends_on = None
def upgrade() -> None:
op.create_table(
"track_artists",
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("artist_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["artist_id"], ["artist.id"],),
sa.ForeignKeyConstraint(["track_id"], ["track.id"],),
sa.PrimaryKeyConstraint("track_id", "artist_id"),
)
op.drop_constraint("track_artist_id_fkey", "track", type_="foreignkey")
op.drop_column("track", "artist_id")
def downgrade() -> None:
op.add_column(
"track",
sa.Column("artist_id", sa.INTEGER(), autoincrement=False, nullable=False),
)
op.create_foreign_key(
"track_artist_id_fkey", "track", "artist", ["artist_id"], ["id"]
)
op.drop_table("track_artists")
|
from sklearn.model_selection import train_test_split
def split(x, y):
return train_test_split(x, y, test_size = 0.2, random_state =0) |
#!/usr/bin/env python3
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
import jsonschema
class GoofyGhostSchemaTest(unittest.TestCase):
def loadJSON(self, name):
parent_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(parent_dir, name), 'r') as fin:
return json.load(fin)
def runTest(self):
schema = self.loadJSON('goofy_ghost.schema.json')
jsonschema.validate(self.loadJSON('goofy_ghost.json'), schema)
jsonschema.validate(self.loadJSON('goofy_ghost.sample.json'), schema)
if __name__ == '__main__':
unittest.main()
|
from .base import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
if config("DJANGO_SECRET_KEY", default=None) is None:
raise RuntimeError(
"To start django with production conf the environment variable DJANGO_SECRET_KEY must be set"
)
DEBUG = False
DEPLOY_URL = os.environ.get("DEPLOY_URL", "")
# For heroku deploy - according to
# https://help.heroku.com/J2R1S4T8/can-heroku-force-an-application-to-use-ssl-tls
# SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Security recomendations based on ./manage.py check --deploy
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 1
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
# Production ORIGIN
ALLOWED_HOSTS = [DEPLOY_URL]
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
CORS_ALLOWED_ORIGINS = [
f"https://{DEPLOY_URL.split('.')[:-1]}.com",
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = [
BASE_DIR / ".." / "static",
]
STATIC_ROOT = BASE_DIR / ".." / "staticfiles"
|
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import torch
import pytest
import poptorch
def test_loop_constant():
class Model(torch.nn.Module):
def forward(self, x):
def body(x):
return x * 2
return poptorch.for_loop(10, body, [x])[0]
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([1.])
assert inference_model(x) == pow(2, 10)
def test_loop_simple():
class Model(torch.nn.Module):
def forward(self, x, y):
def body(x):
return x * y
return poptorch.for_loop(10, body, [x])[0]
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([1.])
y = torch.tensor([2.])
assert inference_model(x, y) == pow(2, 10)
def test_loop_multiple_inputs():
class Model(torch.nn.Module):
def forward(self, x, y, z, w):
def body(x, y, z, w):
return x * y, y + z, x * w, w + 1
return poptorch.for_loop(10, body, [x, y, z, w])
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([0.1])
y = torch.tensor([0.2])
z = torch.tensor([0.3])
w = torch.tensor([0.4])
out = inference_model(x, y, z, w)
# Check by running equiv on host.
x = torch.tensor([0.1])
y = torch.tensor([0.2])
z = torch.tensor([0.3])
w = torch.tensor([0.4])
for _ in range(0, 10):
_z = x * w
x *= y
y += z
w = w + 1
z = _z
for host, ipu in zip([x, y, z, w], out):
assert host == ipu
def test_loop_non_tensor_in():
class Model(torch.nn.Module):
def forward(self, x, _):
def body(x, y):
return x * y, y + 1
return poptorch.for_loop(10, body, [x, 5])
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([1.])
y = torch.tensor([2.])
msg = "(Object contained in list at index 1 is not torch.tensor)"
with pytest.raises(ValueError, match=msg):
inference_model(x, y)
def test_loop_non_list_in():
class Model(torch.nn.Module):
def forward(self, x, y):
def body(x):
return x * y
return poptorch.for_loop(10, body, x)
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([1.])
y = torch.tensor([2.])
msg = "(Object is not list)"
with pytest.raises(ValueError, match=msg):
inference_model(x, y)
# TODO(T33273)
@pytest.mark.skip(reason="Popart doesn't support weights in loop oddly")
def test_loop_weights():
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer1 = torch.nn.Linear(1, 256)
self.layer2 = torch.nn.Conv2d(4, 1, [8, 8])
def forward(self, x):
def body(x):
act = self.layer1(x)
act = act.reshape([1, 4, 8, 8])
act = self.layer2(act)
return act.flatten()
return poptorch.for_loop(10, body, [x])[0]
inference_model = poptorch.inferenceModel(Model())
x = torch.tensor([1.])
inference_model(x)
|
input_ = '1321131112'
def number(nb, time):
a = 0
while a<time:
for i in range(len(nb)):
if i == 0:
w = ''
v = nb[i]
c = 0
if nb[i] == v:
c += 1
if nb[i] != v:
w += str(c) + v
v = nb[i]
c = 1
if i == len(nb)-1:
w += str(c) + v
nb = w
a += 1
return w
print(len(number(input_, 40))) |
from hash_power_client import HashPowerClient
if __name__ == "__main__":
slave = HashPowerClient(
server_address=("zjlab-1", 13105),
)
print(slave.get_system_info())
# print(slave.allocate_gpus(num_gpus=2, exclusive=False, mem_size=10e9))
# print(slave.allocate_gpus(num_gpus=2, exclusive=True))
print(slave.release_gpus(["025beaf24a6311eab886f40270a36b56", "031395104a6311eab886f40270a36b56"]))
|
"""
Chat urls module
"""
from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import Conversations
urlpatterns = [
path(
'chats/',
login_required(Conversations.ChatList.as_view()),
name='chat-list'),
path(
'chat/create/',
login_required(Conversations.create_chat),
name='chat-create'),
path(
'chat/<int:room_id>/remove/',
login_required(Conversations.remove_chat),
name='chat-remove'),
path(
'chat/quit/<int:room_id>/',
login_required(Conversations.quit_room),
name='chat-quit'),
path(
'chat/<int:room_id>/add/<int:friend_id>/',
login_required(Conversations.add_to_chat),
name='chat-add-friend'),
path(
'chat/<int:room_id>/remove/<int:participant_id>/',
login_required(Conversations.remove_from_chat),
name='chat-remove-member'),
path(
'chat/<int:room_id>/makeadmin/<int:participant_id>/',
login_required(Conversations.make_admin),
name='chat-make-admin'),
path(
'chat/<int:room_id>/rmadmin/<int:participant_id>/',
login_required(Conversations.rm_admin),
name='chat-rm-admin'),
path(
'chat/change_avatar/<int:room_id>/',
login_required(Conversations.AvatarManaging.as_view()),
name='chat-avatar-change'),
path(
'chat/edit_chat_name/<int:room_id>/',
login_required(Conversations.edit_chat_name),
name='chat-edit-name'),
path(
'chats/<int:user_id>/<int:friend_id>/',
login_required(Conversations.chat_move),
name='chat-friend-redirect'),
path(
'chat/go_to_chat/<int:room_id>/',
login_required(Conversations.Room.as_view()),
name='chat-go-to-chat'),
path(
'chat/go_to_chat/<int:room_id>/get_messages/',
login_required(Conversations.get_messages),
name='chat-go-to-chat-get-message'),
path(
'chat/go_to_chat/<int:room_id>/send_files/',
login_required(Conversations.send_files),
name='chat-go-to-chat-send-files'),
]
|
from lxml import html
import re
import requests
from datetime import timedelta, datetime, date
class NrsrSpider:
name = 'NRSRspider'
from_date = date(2016, 3, 23)
to_date = date(2016, 3, 27)
period = '7'
increment = timedelta(days=1)
selector_class = '.alpha.omega'
url = "https://www.nrsr.sk/web/Default.aspx?sid=schodze/rozprava/vyhladavanie&CisObdobia={}&CPT=&CisSchodze=0&PoslanecID=929&DatumOd={}%200:0:0&DatumDo={}%200:0:0&TypVystupenia="
data = []
output = 'tomtom.txt'
pattern = re.compile("[^\w\p{L}(\w/.) ]")
pattern2 = re.compile("(\d+.)")
start_url = url.format(period, from_date.isoformat(), to_date.isoformat())
def start_requests(self):
print("Starting with {}".format(self.start_url))
self.get(self.start_url)
def get(self, address):
page = requests.get(address)
tree = html.fromstring(page.content)
print("{} downloaded, ready to parse".format(address))
self.parse(tree)
def parse(self, htmldata):
tomtom = htmldata.xpath("//div[contains(@class, 'alpha') and contains(@class, 'omega')]/span/text()")
for tom in tomtom:
tom = self.pattern.sub('', tom)
tom = self.pattern2.sub('', tom)
print(tom)
self.data.append(tom)
with open(self.output, "a") as output:
output.write(tom)
print("{} useful speeches added, going to get another ones".format(len(tomtom)))
self.next()
def next(self):
self.from_date = self.to_date + timedelta(days=1)
self.to_date = self.from_date + self.increment
if self.from_date < date.today():
self.get(self.url.format(self.period, self.from_date, self.to_date))
|
from robotpy_ext.autonomous import timed_state, StatefulAutonomous
class ThreeToteHot(StatefulAutonomous):
MODE_NAME = 'Three Totes'
DEFAULT = False
def initialize(self):
self.angle = 0
#self.register_sd_var('driveSpeed', .3)
#self.register_sd_var('rotateLeftTime', 5)
#self.register_sd_var('rotateRightTime', 5)
#self.register_sd_var('driveForwardTime', 5)
def on_enable(self):
StatefulAutonomous.on_enable(self)
def on_iteration(self, tm):
super().on_iteration(tm)
if tm<5:
self.angle = self.angle - 180/199
elif tm<10:
self.angle = self.angle + 180/199
@timed_state(duration=0, next_state = 'first_forward', first=True)
def raise_lift(self):
self.tote_forklift.raise_forklift()
self.next_state('first_forward')
@timed_state(duration=.25, next_state='drive_left')
def first_forward(self):
self.drive.move(-.35, 0,0)
@timed_state(duration=5, next_state = 'drive_right')
def drive_left(self):
self.drive.move(-.35,0,0)
self.drive.angle_rotation(self.angle)
if not self.sensors.toteLimitLSensor and not self.sensors.toteLimitR:
self.tote_forklift.set_pos_stack1()
@timed_state(duration=5)
def drive_right(self):
self.drive.move(-.35,0,0)
self.drive.angle_rotation(self.angle)
if not self.sensors.toteLimitL and not self.sensors.toteLimitR:
self.tote_forklift.set_pos_stack2()
@timed_state(duration=5)
def drive_forward(self):
self.drive.move(-.35,0,0)
self.drive.angle_rotation(0) |
import ctrnn
import min_cog_game
if __name__ == "__main__":
gtype = input()
num_input = 5
num_hidden = 2
num_output = 2
num_weights = num_hidden*(num_input + num_hidden) + num_output*(num_hidden + num_output)
num_biases = num_hidden + num_output
num_gains = num_hidden + num_output
num_taus = num_hidden + num_output
i = 0
weight_list = gtype[i:i+num_weights]
i += num_weights
bias_list = gtype[i:i+num_biases]
i += num_biases
gain_list = gtype[i:i+num_gains]
i += num_gains
tau_list = gtype[i:i+num_taus]
agent = ctrnn.CTRNN(num_input, num_hidden, num_output,
weight_list, bias_list, gain_list, tau_list)
game = min_cog_game.Game()
game.play(agent, True)
|
"""Make predictions using encoder."""
# pylint: disable=no-name-in-module
from json import load
import numpy as np # type: ignore
from PIL import Image # type: ignore
from click import command, option, Path # type: ignore
from dataset import process_img
from model import HYP_FILE
from center import CENTERS_FILE
from evaluate import load_encoder, get_dense
# pylint: disable=no-value-for-parameter
@command()
@option('--img', '-i', type=Path(exists=True,
file_okay=True,
dir_okay=False),
required=True, help='Path to input image.')
def predict(img: Image.Image) -> int:
"""Predict a label for the given image."""
with open(HYP_FILE) as fin:
hyp: dict = load(fin)
width = hyp['img_width']
height = hyp['img_height']
blur_radius = hyp['blur_radius']
in_img = process_img(img, width, height, blur_radius)
encoder = load_encoder(in_img.shape)
dense = get_dense(in_img.shape)
centers = np.load(CENTERS_FILE)
rep = encoder(in_img[np.newaxis, ...])
outputs = dense(np.abs(centers - rep))
return np.argmax(outputs)
if __name__ == '__main__':
predict()
|
import numpy as np
import torch
from .normalize import normalize
from sklearn.neural_network import BernoulliRBM
import matplotlib.pyplot as plt
def generate_ensembles(attributions, methods, rbm_params, device="cpu"):
size = [len(methods)] + list(attributions.shape)[1:]
e = torch.empty(size=size).to(device)
attributions[torch.isnan(attributions)] = 0
for i, m in enumerate(methods):
if "rbm" in m:
# TOOO temporary solution
if m == "flipped_rbm":
# add flipped rbm
j = methods.index("rbm")
e[i] = 1 - e[j]
elif m == "rbm_flip_detection":
# Requires all three calculated before
rbm_index = methods.index("rbm")
flipped_index = methods.index("flipped_rbm")
baseline_index = methods.index("mean")
e[i] = solve_flipping(e[rbm_index], e[flipped_index], e[baseline_index])
else:
ens = ensemble(m, attributions=attributions, param=rbm_params)
e[i] = ens
else:
ens = ensemble(m, attributions=attributions)
e[i] = ens
return e
def solve_flipping(rbm, rbm_flipped, baseline):
# Define percentage of top baseline pixels that are compared
pct_pixel_to_check = 5
nr_pixels = rbm.shape[1] * rbm.shape[2]
barrier = int(pct_pixel_to_check / 100 * nr_pixels)
# Get most important pixel positions of baseline
compare_pixels = torch.argsort(baseline.reshape(-1, nr_pixels), dim=1)
# Sort rbm pixels by relevancy
rbm_rank = torch.argsort(rbm.reshape(-1, nr_pixels), dim=1)
# Compute how many of the top baseline pixels are
# most relevant / least relevant pixels for the rbm using the percentage of pixels
rbm_best1 = calc_count_intersects(
compare_pixels[:, -barrier:], rbm_rank[:, -barrier:]
)
rbm_worst1 = calc_count_intersects(
compare_pixels[:, -barrier:], rbm_rank[:, :barrier]
)
# Compute same for worst baseline pixels
rbm_worst2 = calc_count_intersects(
compare_pixels[:, :barrier], rbm_rank[:, -barrier:]
)
rbm_best2 = calc_count_intersects(
compare_pixels[:, :barrier], rbm_rank[:, :barrier]
)
# Decide to flip if worst scores outweight best scores
preference_score = (
np.asarray(rbm_best1)
+ np.asarray(rbm_best2)
- np.asarray(rbm_worst1)
- np.asarray(rbm_worst2)
)
replace_index = preference_score < 0
# Depending on above choice, replace by flipped version
solved_rbm = rbm.clone()
solved_rbm[replace_index] = rbm_flipped[replace_index]
return solved_rbm
def calc_count_intersects(t1, t2):
# Calculate the number of elements contained in set t1 and set t2
# Dimension 0 is the batch dimension
# Combine the tensors, so that later we can find duplicates
combined = torch.cat((t1, t2), dim=1)
# Identify the duplicates of the combined set
# Unfortuntely batches don't work with unique function
c = [combined[i].unique(return_counts=True)[1] for i in range(combined.shape[0])]
# Count the duplicates
count_intersect = [torch.sum(c[i] > 1).item() for i in range(len(c))]
return count_intersect
def ensemble(name, **kwargs):
if name == "mean":
return mean_ens(**kwargs)
if name == "variance":
return variance_ens(**kwargs)
if name == "rbm":
return rbm_ens(**kwargs)
def mean_ens(attributions):
return torch.mean(attributions, dim=0)
# return np.mean(attributions, axis=1)
def variance_ens(attributions):
# TODO epsilon should be mean over the whole dataset, not just the batch
epsilon = torch.mean(attributions) * 10
return torch.mean(attributions, dim=0) / (torch.std(attributions, dim=0) + epsilon)
def rbm_ens(attributions, param):
# TODO use parameters
rbms = [
BernoulliRBM(
n_components=1,
batch_size=param["batch_size"],
learning_rate=param["learning_rate"],
n_iter=param["n_iter"],
)
]
A = attributions.clone()
# change (attribution methods, batch) into (batch, attribution methods)
A = torch.transpose(A, 0, 1)
A = A.cpu().detach().numpy()
# make last two dimensions (width * height) into one
A = A.reshape(A.shape[0], A.shape[1], A.shape[2] * A.shape[3])
size = list(attributions.shape)[1:]
result = torch.empty(size=size)
for i, a in enumerate(A):
a = np.nan_to_num(a)
a = a.T
for r in rbms:
r.fit(a)
# sample output from current rbm as input for the next rbm
a = r.transform(a)
result[i] = torch.tensor(a.reshape(size[-1], size[-1]))
return result
|
#!/usr/bin/env python3
from collections import Counter
def isValid(s):
one_leeway = False
freqs = dict(Counter(s))
most_common_freq = Counter(freqs.values()).most_common(1)[0][0]
for k in freqs:
if freqs[k] != most_common_freq:
if not one_leeway and (abs(freqs[k] - most_common_freq) == 1 or freqs[k] == 1):
one_leeway = True
else:
return 'NO'
return 'YES'
print(isValid("abc"))
print(isValid("abccc"))
print(isValid("aabbcd"))
print(isValid("aabbccddeefghi"))
print(isValid("abcdefghhgfedecba"))
print(isValid("ibfdgaeadiaefgbhbdghhhbgdfgeiccbiehhfcggchgghadhdhagfbahhddgghbdehidbibaeaagaeeigffcebfbaieggabcfbiiedcabfihchdfabifahcbhagccbdfifhghcadfiadeeaheeddddiecaicbgigccageicehfdhdgafaddhffadigfhhcaedcedecafeacbdacgfgfeeibgaiffdehigebhhehiaahfidibccdcdagifgaihacihadecgifihbebffebdfbchbgigeccahgihbcbcaggebaaafgfedbfgagfediddghdgbgehhhifhgcedechahidcbchebheihaadbbbiaiccededchdagfhccfdefigfibifabeiaccghcegfbcghaefifbachebaacbhbfgfddeceababbacgffbagidebeadfihaefefegbghgddbbgddeehgfbhafbccidebgehifafgbghafacgfdccgifdcbbbidfifhdaibgigebigaedeaaiadegfefbhacgddhchgcbgcaeaieiegiffchbgbebgbehbbfcebciiagacaiechdigbgbghefcahgbhfibhedaeeiffebdiabcifgccdefabccdghehfibfiifdaicfedagahhdcbhbicdgibgcedieihcichadgchgbdcdagaihebbabhibcihicadgadfcihdheefbhffiageddhgahaidfdhhdbgciiaciegchiiebfbcbhaeagccfhbfhaddagnfieihghfbaggiffbbfbecgaiiidccdceadbbdfgigibgcgchafccdchgifdeieicbaididhfcfdedbhaadedfageigfdehgcdaecaebebebfcieaecfagfdieaefdiedbcadchabhebgehiidfcgahcdhcdhgchhiiheffiifeegcfdgbdeffhgeghdfhbfbifgidcafbfcd"))
|
import csv
from collections import Counter
from pathlib import Path
from random import choice
BATTLE_CARD_FILE = "battle-table.csv"
class BattleCard:
def __init__(self, mapping: dict = None):
self.mapping = mapping
@property
def attackers(self):
if self.mapping:
return [x for x in self.mapping]
return []
def from_csv(self, filename):
with Path(filename).open() as csvfile:
reader = csv.DictReader(csvfile)
battle_dict = {}
for row in reader:
attacker = row.pop("Attacker")
battle_dict[attacker] = row
self.mapping = battle_dict
class Player:
def __init__(self, name):
self.name = name
class Roll:
def __init__(self, name):
self.name = name
self.battle_card = None
def set_battle_card(self, battle_card):
self.battle_card = battle_card
def can_defeat(self, roll):
return self.battle_card.mapping[self.name] == roll.name
def is_tied_with(self, roll):
return self.name == roll.name
def get_players_name():
return input("player name: ").strip()
def get_players_roll(battlecard):
print(f"Choices: {', '.join(battlecard.attackers)}")
choice = input("\nroll: ").strip()
roll = Roll(choice)
roll.set_battle_card(battlecard)
return roll
def build_rolls(battle_card):
return [Roll(attacker) for attacker in battle_card.attackers]
def print_header():
print("-" * 50)
print("Rock, Paper, Scissors".center(50))
print("-" * 50)
def compute_winner(player1, player2, scores):
if scores[player1.name] == scores[player2.name]:
return "It's a tie"
elif scores[player1.name] > scores[player2.name]:
return f"{player1.name} wins!"
else:
return f"{player2.name} wins!"
def main():
print_header()
battle_card = BattleCard()
battle_card.from_csv(BATTLE_CARD_FILE)
rolls = build_rolls(battle_card)
name = get_players_name()
player1 = Player(name)
player2 = Player("computer")
game_loop(player1, player2, rolls, battle_card)
def game_loop(player1, player2, rolls, battle_card):
scores = Counter()
scores[player1.name] = 0
scores[player2.name] = 0
count = 1
while count < 3:
print(f"\nROUND {count}:")
p2_roll = choice(rolls)
p2_roll.set_battle_card(battle_card)
p1_roll = get_players_roll(battle_card)
# if both players roll the same, let's retry the round
if p1_roll.is_tied_with(p2_roll):
print(f"\nROUND {count} is a tie: {p1_roll.name}/{p2_roll.name}\n")
continue
player1_wins = p1_roll.can_defeat(p2_roll)
# display throws
print(f"{player1.name}: {p1_roll.name}\n" f"{player2.name}: {p2_roll.name}")
# display winner for this round
round_winner = player1.name if player1_wins else player2.name
print(f"=> round {count} winner: {round_winner}")
# record score
scores[round_winner] += 1
# increment round
count += 1
# Compute who won
result = compute_winner(player1, player2, scores)
print(
f"\nResult: {result} "
f"\n=> {player1.name}: {scores[player1.name]}"
f"\n=> {player2.name}: {scores[player2.name]}"
)
if __name__ == "__main__":
main()
|
import struct
import json
import binascii
import os
import argparse
from collections import OrderedDict
DEFAULT_FILE_VERSION = "v0.1"
APPEARED_TYPES = {}
def read_bytes_as_hex(f, length):
return str(binascii.hexlify(f.read(length)), encoding="ascii")
def write_hex_as_bytes(f, hex):
f.write(binascii.unhexlify(hex))
def write_ascii_string(f, string):
f.write(bytes(string, encoding="ascii"))
def read_multiparameters(f):
return [str(binascii.hexlify(f.read(0x14)), encoding="ascii"), str(binascii.hexlify(f.read(0x14)), encoding="ascii")]
def write_multiparameters(f, multiparameters):
multiparam1 = binascii.unhexlify(multiparameters[0])
multiparam2 = binascii.unhexlify(multiparameters[1])
assert len(multiparam1) == 0x14 and len(multiparam2) == 0x14
f.write(multiparam1)
f.write(multiparam2)
def read_id(f, reverse=True):
if reverse:
return str(f.read(4)[::-1], encoding="ascii")
else:
str(f.read(4), encoding="ascii")
def write_id(f, id, reverse=True):
if reverse:
f.write(bytes(id[::-1], encoding="ascii"))
else:
f.write(bytes(id), encoding="ascii")
def read_float_tripple(f):
floats = struct.unpack(">fff", f.read(3*4))
return " ".join(str(num) for num in floats)
def parse_float_tripple(tripple_string):
floats = tripple_string.split(" ")
return map(float, floats)
def read_parameters(f, asfloat=False):
paramname = f.read(4)
params = OrderedDict()
while paramname != b"\xFF\xFF\xFF\xFF":
if paramname[3] != 4:
print(hex(f.tell()), paramname, paramname[3])
assert paramname[3] == 4
sane_param = str(paramname[0:3], encoding="ascii")
if not asfloat:
value = struct.unpack(">I", f.read(4))[0]
params[sane_param] = hex(value)
else:
value = struct.unpack(">f", f.read(4))[0]
params[sane_param] = value
paramname = f.read(4)
return params
def write_parameters(f, params, asFloat=False):
for param, value in params.items():
assert len(param) == 3
f.write(bytes(param, encoding="ascii"))
f.write(b"\x04") # size of following value, always 4 bytes
if asFloat:
f.write(struct.pack(">f", float(value)))
else:
f.write(struct.pack(">I", convert_hex(value)))
f.write(b"\xFF\xFF\xFF\xFF")
def convert_hex(hex):
if hex.startswith("0x"):
return int(hex, 16)
else:
return int(hex)
def write_int(f, val):
f.write(struct.pack(">I", val))
def write_generator(f, gen):
write_id(f, gen["name"])
write_id(f, "v0.0")
f.write(struct.pack(">II", convert_hex(gen["unk1"]), convert_hex(gen["unk2"])))
write_hex_as_bytes(f, gen["metadata"])
f.write(struct.pack(">fff", *parse_float_tripple(gen["position"])))
f.write(struct.pack(">fff", *parse_float_tripple(gen["position offset"])))
if gen["object type"] == "NULL":
f.write(b"\x00\x00\x00\x00") # If object is null we will skip all object data and go to area/spawn data
else:
write_id(f, gen["object type"])
# Write the object version or in the case of boss and teki an integer
if gen["object type"] in ("boss", "teki"):
f.write(struct.pack("I", gen["object version"]))
else:
write_id(f, gen["object version"])
objectdata = gen["object data"]
if gen["object type"] in ("piki", "debg", "navi"):
pass
elif gen["object type"] in ("actr", "mobj", "plnt", "pelt"):
f.write(struct.pack(">I", objectdata["unk int"]))
elif gen["object type"] == "item":
write_int(f, len(objectdata["item name"]))
write_ascii_string(f, objectdata["item name"])
write_hex_as_bytes(f, objectdata["item data 1"])
write_hex_as_bytes(f, objectdata["item data 2"])
elif gen["object type"] == "work":
write_int(f, len(objectdata["string1"]))
write_ascii_string(f, objectdata["string1"])
write_int(f, len(objectdata["string2"]))
write_ascii_string(f, objectdata["string2"])
if objectdata["string1"].strip("\x00") == "move stone":
#f.write(struct.pack(">fff", *parse_float_tripple(objectdata["work XYZ?"])))
f.write(struct.pack(">fff", *objectdata["work XYZ?"]))
elif gen["object type"] == "mpar":
f.write(struct.pack(">III",
objectdata["unk int"],
objectdata["unk int 2"],
objectdata["more data"]))
if objectdata["more data"] == 1:
f.write(struct.pack(">ffffff", *objectdata["additional data"]))
elif gen["object type"] == "teki":
if gen["object version"] < 7:
f.write(struct.pack(">III", objectdata["unk int"], objectdata["unk int1"], objectdata["unk int2"]))
write_multiparameters(f, objectdata["multi parameters"])
elif gen["object version"] == 8:
f.write(struct.pack(">III", objectdata["unk int"], objectdata["unk int1"], objectdata["unk int2"]))
write_id(f, objectdata["identification"])
f.write(struct.pack(">IIIffff", *objectdata["personality data"]))
elif gen["object version"] == 9:
f.write(struct.pack(">BBB", objectdata["unk byte"], objectdata["unk byte1"], objectdata["unk byte2"]))
write_id(f, objectdata["identification"])
f.write(struct.pack(">IIIffff", *objectdata["personality data"]))
elif gen["object version"] >= 10:
f.write(struct.pack(">BBB", objectdata["unk byte"], objectdata["unk byte1"], objectdata["unk byte2"]))
write_id(f, objectdata["identification"])
write_multiparameters(f, objectdata["multi parameters"])
elif gen["object type"] == "boss":
if gen["object version"] >= 2:
write_int(f, objectdata["boss type?"])
elif gen["object version"] < 2:
write_int(f, objectdata["boss type?"])
write_id(f, objectdata["boss name"])
else:
raise RuntimeError("Unknown object type:", gen["object type"])
write_parameters(f, objectdata["obj parameters"])
# Write area data
write_id(f, gen["area data"][0]) # Area type: Can be pint or circ
write_id(f, "v0.0")
f.write(struct.pack(">fff", *parse_float_tripple(gen["area data"][1]))) # Area position info?
asFloat = gen["area data"][0] == "circ" #if circle area then we parse the parameter as a float
write_parameters(f, gen["area data"][2], asFloat=asFloat) # Area parameters
# Write spawn type data
write_id(f, gen["spawn type data"][0]) # Spawn type: Can be 1one, aton or irnd
write_id(f, "v0.0")
write_parameters(f, gen["spawn type data"][1]) # Spawn type parameters
def read_generator(f):
gen = OrderedDict()
#print(hex(f.tell()))
gen["name"] = read_id(f)
assert read_id(f) == "v0.0"
gen["unk1"], gen["unk2"] = (hex(x) for x in struct.unpack(">II", f.read(2*4)))
gen["metadata"] = read_bytes_as_hex(f, 32)
gen["position"] = read_float_tripple(f)
gen["position offset"] = read_float_tripple(f)
gen["object type"] = read_id(f) # reverse string
objtype = gen["object type"]
if objtype == "\x00\x00\x00\x00":
gen["object type"] = objtype = "NULL"
APPEARED_TYPES[objtype] = True
if objtype in ("boss, teki"):
gen["object version"] = struct.unpack("I", f.read(4))[0]
elif objtype == "NULL":
pass
else:
gen["object version"] = str(f.read(4)[::-1], encoding="ascii")
objectdata = {}
#print("doing object of type", gen["object type"])
if objtype in ("piki", "debg", "navi"):
pass
elif objtype in ("actr", "mobj", "plnt", "pelt"):
objectdata["unk int"] = struct.unpack(">I", f.read(4))[0]
elif objtype == "item":
stringlength = struct.unpack(">I", f.read(4))[0]
objectdata["item name"] = str(f.read(stringlength), encoding="ascii")
objectdata["item data 1"] = str(binascii.hexlify(f.read(32)), encoding="ascii")
objectdata["item data 2"] = str(binascii.hexlify(f.read(32)), encoding="ascii")
elif objtype == "work":
stringlength = struct.unpack(">I", f.read(4))[0]
objectdata["string1"] = str(f.read(stringlength), encoding="ascii")
stringlength = struct.unpack(">I", f.read(4))[0]
objectdata["string2"] = str(f.read(stringlength), encoding="ascii")
#print(objectdata["string1"], objectdata["string2"] )
#print(objectdata["string1"], type(objectdata["string1"]), len(objectdata["string1"].strip("\x00")))
#print(objectdata["string1"].strip() == "move stone")
if objectdata["string1"].strip("\x00") == "move stone":
objectdata["work XYZ?"] = struct.unpack(">fff", f.read(3*4))
elif objtype == "mpar":
objectdata["unk int"], objectdata["unk int 2"], objectdata["more data"] = struct.unpack(">III", f.read(3*4))
if objectdata["more data"] == 1:
objectdata["additional data"] = [x for x in struct.unpack(">ffffff", f.read(6*4))]
else:
objectdata["additional data"] = []
elif objtype == "teki":
if gen["object version"] < 7:
objectdata["unk int"], objectdata["unk int1"], objectdata["unk int2"] = struct.unpack(">III", f.read(3*4))
objectdata["multi parameters"] = read_multiparameters(f)
elif gen["object version"] == 8:
objectdata["unk int"], objectdata["unk int1"], objectdata["unk int2"] = struct.unpack(">III", f.read(3*4))
objectdata["identification"] = read_id(f)
objectdata["personality data"] = struct.unpack(">IIIffff", f.read(7*4))
elif gen["object version"] == 9:
objectdata["unk byte"], objectdata["unk byte1"], objectdata["unk byte2"] = struct.unpack(">BBB", f.read(3*1))
objectdata["identification"] = read_id(f)
objectdata["personality data"] = struct.unpack(">IIIffff", f.read(7*4))
elif gen["object version"] >= 10:
objectdata["unk byte"], objectdata["unk byte1"], objectdata["unk byte2"] = struct.unpack(">BBB", f.read(3*1))
objectdata["identification"] = read_id(f)
objectdata["multi parameters"] = read_multiparameters(f)
elif objtype == "boss":
if gen["object version"] >= 2:
objectdata["boss type?"] = struct.unpack(">I", f.read(4))[0]
elif gen["object version"] < 2:
objectdata["boss type?"] = struct.unpack(">I", f.read(4))[0]
objectdata["boss name"] = str(f.read(4), encoding="ascii")
elif objtype == "NULL":
pass
else:
raise RuntimeError("unknown type: {}".format(objtype))
gen["object data"] = objectdata
if objtype != "NULL":
objectdata["obj parameters"] = read_parameters(f)
areatype = read_id(f)
assert read_id(f) == "v0.0"
areaxyz = read_float_tripple(f)
areaparams = read_parameters(f, asfloat=True)
gen["area data"] = [areatype, areaxyz, areaparams]
spawntype = read_id(f)
assert read_id(f) == "v0.0"
spawnparams = read_parameters(f)
gen["spawn type data"] = [spawntype, spawnparams]
return gen
def read_gen_file(f):
assert read_id(f) == "v0.1" # file version
position = read_float_tripple(f)
rotation, generator_count = struct.unpack(">fI", f.read(2*4))
header = {"position": position, "rotation": rotation}
generators = ["Header", header]
for i in range(generator_count):
generator = read_generator(f)
generators.append("Object type: {0}".format(generator["object type"]))
generators.append(generator)
more = f.read(1)
if len(more) > 0:
print("Warning: There is still data after the generators. File offset:", hex(f.tell()-1))
return generators
def write_gen_file(inputjson, output):
# Filter everything that is not a dict. This gets rid of the
# description strings added by read_gen_file
filtered = [obj for obj in inputjson if isinstance(obj, dict)]
# First item is header, all other items are generators
header = filtered[0]
x,y,z = parse_float_tripple(header["position"])
generator_count = len(filtered) - 1
write_id(output, DEFAULT_FILE_VERSION)
output.write(struct.pack(">ffffI", x, y, z, float(header["rotation"]), generator_count))
if len(filtered) > 1:
for generator in filtered[1:]:
write_generator(output, generator)
if __name__ == "__main__":
GEN2TXT = 1
TXT2GEN = 2
parser = argparse.ArgumentParser()
parser.add_argument("input",
help="Filepath to the file that should be converted")
parser.add_argument("--gen2txt", action="store_true", default=False,
help="If set, converts a .gen file to a json text file.")
parser.add_argument("--txt2gen", action="store_true", default=False,
help="If set, converts a json text file to .gen")
parser.add_argument("output", default=None, nargs = '?',
help="Filepath to which the result of the conversion will be written")
args = parser.parse_args()
mode = 0
if not args.gen2txt and not args.txt2gen:
print("Conversion mode not set. Trying to detect by file ending...")
if args.input.endswith(".gen"):
print("Detected gen2txt")
mode = GEN2TXT
elif args.output.endswith(".gen"):
print("Detected txt2gen")
mode = TXT2GEN
else:
raise RuntimeError("Couldn't detect conversion mode. You need to set either --gen2txt or --txt2gen!")
if args.gen2txt and args.txt2gen:
raise RuntimeError("You cannot use both conversion modes at the same time!")
if args.gen2txt:
mode = GEN2TXT
elif args.txt2gen:
mode = TXT2GEN
if mode == 0:
raise RuntimeError("Conversion mode undefined. Did you set a conversion mode? (--gen2txt or --txt2gen)")
if mode == GEN2TXT:
print("Converting gen file to text...")
print("Reading", args.input)
with open(args.input, "rb") as f:
data = read_gen_file(f)
print("Gen file read, now writing to", args.output)
with open(args.output, "w") as f:
json.dump(data, f, indent=" "*4)
print("Done")
elif mode == TXT2GEN:
print("Converting text file to gen...")
print("Reading", args.input)
with open(args.input, "r") as f:
data = json.load(f, object_pairs_hook=OrderedDict)
print("Text file loaded, now converting and writing to", args.output)
with open(args.output, "wb") as f:
write_gen_file(data, f)
print("Done")
# Regression test assuming a folder "stages" in the same path as the tool itself
"""if True:
for dirpath, drinames, filenames in os.walk("stages"):
for filename in filenames:
if ".gen" in filename: #filename.endswith(".gen"):
path = os.path.join(dirpath, filename)
print("reading", path)
with open(path, "rb") as f:
control_data = f.read()
f.seek(0)
data = read_gen_file(f)
with open("testgen.json", "w") as f:
json.dump(data, f, indent=" "*4)
with open("testgen.json", "r") as f:
newdata = json.load(f, object_pairs_hook=OrderedDict)
with open("testgen.gen", "wb") as f:
write_gen_file(newdata, f)
with open("testgen.gen", "rb") as f:
checkagainst = f.read()
assert control_data == checkagainst"""
#print(APPEARED_TYPES)
"""genfile = os.path.join(gendir, "stage3", "default.gen")
with open(genfile, "rb") as f:
data = read_gen_file(f)
with open("testgen.json", "w") as f:
json.dump(data, f, indent=" "*4)
with open("testgen.json", "r") as f:
data = json.load(f, object_pairs_hook=OrderedDict)
with open("newbingen.gen", "wb") as f:
write_gen_file(data, f)""" |
from ..proxy import ObjectProxy
from ..specification import FirstCall, AlteredCall, Call
from .story import Story
from .given import Given
from .manipulation import Manipulator, Append, Remove, Update, \
CompositeManipulatorInitializer
story = ObjectProxy(Given.get_current)
response = ObjectProxy(lambda: story.response)
status = ObjectProxy(lambda: response.status)
given = CompositeManipulatorInitializer()
def when(*args, **kwargs):
return story.when(*args, **kwargs)
|
# Copyright (C) 2015 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rest_framework import serializers
from django.conf import settings
from adjutant.common import user_store
role_options = settings.DEFAULT_ACTION_SETTINGS.get("NewUserAction", {}).get(
"allowed_roles", [])
def get_region_choices():
id_manager = user_store.IdentityManager()
return (region.id for region in id_manager.list_regions())
class BaseUserNameSerializer(serializers.Serializer):
"""
A serializer where the user is identified by username/email.
"""
domain_id = serializers.CharField(max_length=64, default='default')
username = serializers.CharField(max_length=255)
email = serializers.EmailField()
def __init__(self, *args, **kwargs):
super(BaseUserNameSerializer, self).__init__(*args, **kwargs)
if settings.USERNAME_IS_EMAIL:
self.fields.pop('username')
class BaseUserIdSerializer(serializers.Serializer):
user_id = serializers.CharField(max_length=64)
class NewUserSerializer(BaseUserNameSerializer):
roles = serializers.MultipleChoiceField(
choices=role_options, default=set)
inherited_roles = serializers.MultipleChoiceField(
choices=role_options, default=set)
project_id = serializers.CharField(max_length=64)
def validate(self, data):
if not data['roles'] and not data['inherited_roles']:
raise serializers.ValidationError(
"Must supply either 'roles' or 'inherited_roles', or both.")
return data
class NewProjectSerializer(serializers.Serializer):
parent_id = serializers.CharField(
max_length=64, default=None, allow_null=True)
project_name = serializers.CharField(max_length=64)
domain_id = serializers.CharField(max_length=64, default='default')
description = serializers.CharField(default="", allow_blank=True)
class NewProjectWithUserSerializer(BaseUserNameSerializer):
parent_id = serializers.CharField(
max_length=64, default=None, allow_null=True)
project_name = serializers.CharField(max_length=64)
class ResetUserSerializer(BaseUserNameSerializer):
domain_name = serializers.CharField(max_length=64, default='Default')
# override domain_id so serializer doesn't set it up.
domain_id = None
class EditUserRolesSerializer(BaseUserIdSerializer):
roles = serializers.MultipleChoiceField(
choices=role_options, default=set)
inherited_roles = serializers.MultipleChoiceField(
choices=role_options, default=set)
remove = serializers.BooleanField(default=False)
project_id = serializers.CharField(max_length=64)
domain_id = serializers.CharField(max_length=64, default='default')
def validate(self, data):
if not data['roles'] and not data['inherited_roles']:
raise serializers.ValidationError(
"Must supply either 'roles' or 'inherited_roles', or both.")
return data
class NewDefaultNetworkSerializer(serializers.Serializer):
setup_network = serializers.BooleanField(default=True)
project_id = serializers.CharField(max_length=64)
region = serializers.CharField(max_length=100)
class NewProjectDefaultNetworkSerializer(serializers.Serializer):
setup_network = serializers.BooleanField(default=False)
region = serializers.CharField(max_length=100)
class AddDefaultUsersToProjectSerializer(serializers.Serializer):
domain_id = serializers.CharField(max_length=64, default='default')
class SetProjectQuotaSerializer(serializers.Serializer):
pass
class SendAdditionalEmailSerializer(serializers.Serializer):
pass
class UpdateUserEmailSerializer(BaseUserIdSerializer):
new_email = serializers.EmailField()
class UpdateProjectQuotasSerializer(serializers.Serializer):
project_id = serializers.CharField(max_length=64)
size = serializers.CharField(max_length=64)
def __init__(self, *args, **kwargs):
super(UpdateProjectQuotasSerializer, self).__init__(*args, **kwargs)
# NOTE(amelia): This overide is mostly in use so that it can be tested
# However it does take into account the improbable edge case that the
# regions have changed since the server was last started
self.fields['regions'] = serializers.MultipleChoiceField(
choices=get_region_choices())
def validate_size(self, value):
"""
Check that the size exists in the conf.
"""
size_list = settings.PROJECT_QUOTA_SIZES.keys()
if value not in size_list:
raise serializers.ValidationError("Quota size: %s is not valid"
% value)
return value
|
import sys
import os
import yaml #PyYAML must be installed
import languageSwitcher
import CPlusPlusLanguageSwitcher
import CLanguageSwitcher
import JavaLanguageSwitcher
import PythonLanguageSwitcher
from UnsupportedLanguageException import *
sys.path.append("../util")
from Util import supportedLanguages
class LanguageSwitcherFactory:
extMap = {}
@staticmethod
def loadLanguageMap(langFile = "../../Resources/languages.yml"):
with open(langFile, 'r') as f:
LanguageSwitcherFactory.extMap = yaml.safe_load(f)
#Create a new language switcher of the correct type.
@staticmethod
def createLS(language):
if(LanguageSwitcherFactory.extMap == {}):
LanguageSwitcherFactory.loadLanguageMap("../../Resources/languages.yml")
return LanguageSwitcherFactory.determineLanguage(language)
#String -> String
#Given either a language name or a file extension for a language, return a normalized language string
#to use
@staticmethod
def determineLanguage(language): #Replace these with tokens?
language = language.strip()
#Check for names
if(language.lower() == "c++" or language.lower() in LanguageSwitcherFactory.extMap["C++"]["extensions"]):
return CPlusPlusLanguageSwitcher.CPlusPlusLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C++"]["extensions"]))
elif(language.lower() == "c" or language.lower() in LanguageSwitcherFactory.extMap["C"]["extensions"]):
return CLanguageSwitcher.CLanguageSwitcher(set(LanguageSwitcherFactory.extMap["C"]["extensions"]))
elif(language.lower() == "java" or language.lower() in LanguageSwitcherFactory.extMap["Java"]["extensions"]):
return JavaLanguageSwitcher.JavaLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Java"]["extensions"]))
elif(language.lower() == "python" or language.lower() in LanguageSwitcherFactory.extMap["Python"]["extensions"]):
return PythonLanguageSwitcher.PythonLanguageSwitcher(set(LanguageSwitcherFactory.extMap["Python"]["extensions"]))
else:
print((LanguageSwitcherFactory.extMap["C"]["extensions"]))
raise UnsupportedLanguageException(language + " not yet supported.")
@staticmethod
def getExtensions(languages):
'''
Given some languages, return the set of extensions associated with them. If no languages
are given or none in the set are recognized, return the extensions for all recognized languages.
If only a portion are recognized, return the set of extensions for just these languages.
'''
extensions = set()
for l in languages:
try:
extensions.update(LanguageSwitcherFactory.createLS(l).getExtensions())
except UnsupportedLanguageException: #skip unrecognized languages
pass
if (len(extensions) == 0):
return getExtensions(supportedLanguages)
else:
return extensions
|
"""djangoProject1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from project_first_app import views
from project_first_app.views import *
urlpatterns = [
path('admin/', admin.site.urls),
path('Car_Owner/<id>', views.detail),
path('Car_Ownerlist/', views.detail1),
path('Carlist/', ExamplelistCar.as_view()),
path('car/<int:pk>/', CarRetrieveView.as_view()),
path('car/list/', CarListView.as_view()),
path('owner_create', create_view),
path('car/create/', CarCreateView.as_view()),
path('car/<int:pk>/update/', CarUpdateView.as_view()),
path('car/<int:pk>/delete/',CarDeleteView.as_view()),
]
|
import cv2
from frame_info import FrameInfo
class ImageProcessor:
def __init__(self, camera, threshold, perspective_transform, lane, lane_validator, lane_mask_factory, logger):
self.camera = camera
self.threshold = threshold
self.perspective_transform = perspective_transform
self.lane = lane
self.lane_validator = lane_validator
self.lane_mask_factory = lane_mask_factory
self.logger = logger
self.mask_color = (0, 255, 0)
def process_frame(self, bgr_frame):
undistorted_image = self.camera.undistort(bgr_frame)
bw_image_filtered = self.threshold.execute(undistorted_image)
bw_bird_view = self.perspective_transform.execute(bw_image_filtered, transform_to="top_down_view")
self.lane.update(bw_bird_view)
validation_result = self.lane_validator.validate(self.lane)
texts = FrameInfo.create(self.lane, validation_result, self.perspective_transform.vanishing_point_distance)
if validation_result.lane_is_lost:
self.lane.reset()
self.logger.info(validation_result, bgr_frame, bw_bird_view, texts)
result_image = undistorted_image
else:
lane_mask_bird_view = self.lane_mask_factory.create(self.lane, validation_result)
lane_mask = self.perspective_transform.execute(lane_mask_bird_view, transform_to="front_view")
result_image = cv2.addWeighted(lane_mask, 0.9, undistorted_image, 1, 0)
perspective_distance_adjust_direction = self.lane.top_width - self.lane.width
self.perspective_transform.adjust_vanishing_point_distance(perspective_distance_adjust_direction)
if not validation_result.car_is_on_lane:
self.lane.reset()
FrameInfo.print(result_image, texts)
return result_image
|
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The experiment runner module."""
from __future__ import print_function
import getpass
import os
import shutil
import time
import afe_lock_machine
import test_flag
from cros_utils import command_executer
from cros_utils import logger
from cros_utils.email_sender import EmailSender
from cros_utils.file_utils import FileUtils
import config
from experiment_status import ExperimentStatus
from results_cache import CacheConditions
from results_cache import ResultsCache
from results_report import HTMLResultsReport
from results_report import TextResultsReport
from results_report import JSONResultsReport
from schedv2 import Schedv2
def _WriteJSONReportToFile(experiment, results_dir, json_report):
"""Writes a JSON report to a file in results_dir."""
has_llvm = any('llvm' in l.compiler for l in experiment.labels)
compiler_string = 'llvm' if has_llvm else 'gcc'
board = experiment.labels[0].board
filename = 'report_%s_%s_%s.%s.json' % (
board, json_report.date, json_report.time.replace(':', '.'),
compiler_string)
fullname = os.path.join(results_dir, filename)
report_text = json_report.GetReport()
with open(fullname, 'w') as out_file:
out_file.write(report_text)
class ExperimentRunner(object):
"""ExperimentRunner Class."""
STATUS_TIME_DELAY = 30
THREAD_MONITOR_DELAY = 2
def __init__(self,
experiment,
json_report,
using_schedv2=False,
log=None,
cmd_exec=None):
self._experiment = experiment
self.l = log or logger.GetLogger(experiment.log_dir)
self._ce = cmd_exec or command_executer.GetCommandExecuter(self.l)
self._terminated = False
self.json_report = json_report
self.locked_machines = []
if experiment.log_level != 'verbose':
self.STATUS_TIME_DELAY = 10
# Setting this to True will use crosperf sched v2 (feature in progress).
self._using_schedv2 = using_schedv2
def _GetMachineList(self):
"""Return a list of all requested machines.
Create a list of all the requested machines, both global requests and
label-specific requests, and return the list.
"""
machines = self._experiment.remote
# All Label.remote is a sublist of experiment.remote.
for l in self._experiment.labels:
for r in l.remote:
assert r in machines
return machines
def _UpdateMachineList(self, locked_machines):
"""Update machines lists to contain only locked machines.
Go through all the lists of requested machines, both global and
label-specific requests, and remove any machine that we were not
able to lock.
Args:
locked_machines: A list of the machines we successfully locked.
"""
for m in self._experiment.remote:
if m not in locked_machines:
self._experiment.remote.remove(m)
for l in self._experiment.labels:
for m in l.remote:
if m not in locked_machines:
l.remote.remove(m)
def _LockAllMachines(self, experiment):
"""Attempt to globally lock all of the machines requested for run.
This method will use the AFE server to globally lock all of the machines
requested for this crosperf run, to prevent any other crosperf runs from
being able to update/use the machines while this experiment is running.
"""
if test_flag.GetTestMode():
self.locked_machines = self._GetMachineList()
self._experiment.locked_machines = self.locked_machines
else:
lock_mgr = afe_lock_machine.AFELockManager(
self._GetMachineList(),
'',
experiment.labels[0].chromeos_root,
None,
log=self.l,)
for m in lock_mgr.machines:
if not lock_mgr.MachineIsKnown(m):
lock_mgr.AddLocalMachine(m)
machine_states = lock_mgr.GetMachineStates('lock')
lock_mgr.CheckMachineLocks(machine_states, 'lock')
self.locked_machines = lock_mgr.UpdateMachines(True)
self._experiment.locked_machines = self.locked_machines
self._UpdateMachineList(self.locked_machines)
self._experiment.machine_manager.RemoveNonLockedMachines(
self.locked_machines)
if len(self.locked_machines) == 0:
raise RuntimeError('Unable to lock any machines.')
def _UnlockAllMachines(self, experiment):
"""Attempt to globally unlock all of the machines requested for run.
The method will use the AFE server to globally unlock all of the machines
requested for this crosperf run.
"""
if not self.locked_machines or test_flag.GetTestMode():
return
lock_mgr = afe_lock_machine.AFELockManager(
self.locked_machines,
'',
experiment.labels[0].chromeos_root,
None,
log=self.l,)
machine_states = lock_mgr.GetMachineStates('unlock')
lock_mgr.CheckMachineLocks(machine_states, 'unlock')
lock_mgr.UpdateMachines(False)
def _ClearCacheEntries(self, experiment):
for br in experiment.benchmark_runs:
cache = ResultsCache()
cache.Init(br.label.chromeos_image, br.label.chromeos_root,
br.benchmark.test_name, br.iteration, br.test_args,
br.profiler_args, br.machine_manager, br.machine,
br.label.board, br.cache_conditions, br._logger, br.log_level,
br.label, br.share_cache, br.benchmark.suite,
br.benchmark.show_all_results, br.benchmark.run_local)
cache_dir = cache.GetCacheDirForWrite()
if os.path.exists(cache_dir):
self.l.LogOutput('Removing cache dir: %s' % cache_dir)
shutil.rmtree(cache_dir)
def _Run(self, experiment):
try:
if not experiment.locks_dir:
self._LockAllMachines(experiment)
if self._using_schedv2:
schedv2 = Schedv2(experiment)
experiment.set_schedv2(schedv2)
if CacheConditions.FALSE in experiment.cache_conditions:
self._ClearCacheEntries(experiment)
status = ExperimentStatus(experiment)
experiment.Run()
last_status_time = 0
last_status_string = ''
try:
if experiment.log_level != 'verbose':
self.l.LogStartDots()
while not experiment.IsComplete():
if last_status_time + self.STATUS_TIME_DELAY < time.time():
last_status_time = time.time()
border = '=============================='
if experiment.log_level == 'verbose':
self.l.LogOutput(border)
self.l.LogOutput(status.GetProgressString())
self.l.LogOutput(status.GetStatusString())
self.l.LogOutput(border)
else:
current_status_string = status.GetStatusString()
if current_status_string != last_status_string:
self.l.LogEndDots()
self.l.LogOutput(border)
self.l.LogOutput(current_status_string)
self.l.LogOutput(border)
last_status_string = current_status_string
else:
self.l.LogAppendDot()
time.sleep(self.THREAD_MONITOR_DELAY)
except KeyboardInterrupt:
self._terminated = True
self.l.LogError('Ctrl-c pressed. Cleaning up...')
experiment.Terminate()
raise
except SystemExit:
self._terminated = True
self.l.LogError('Unexpected exit. Cleaning up...')
experiment.Terminate()
raise
finally:
if not experiment.locks_dir:
self._UnlockAllMachines(experiment)
def _PrintTable(self, experiment):
self.l.LogOutput(TextResultsReport.FromExperiment(experiment).GetReport())
def _Email(self, experiment):
# Only email by default if a new run was completed.
send_mail = False
for benchmark_run in experiment.benchmark_runs:
if not benchmark_run.cache_hit:
send_mail = True
break
if (not send_mail and not experiment.email_to or
config.GetConfig('no_email')):
return
label_names = []
for label in experiment.labels:
label_names.append(label.name)
subject = '%s: %s' % (experiment.name, ' vs. '.join(label_names))
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
text_report = "<pre style='font-size: 13px'>%s</pre>" % text_report
html_report = HTMLResultsReport.FromExperiment(experiment).GetReport()
attachment = EmailSender.Attachment('report.html', html_report)
email_to = experiment.email_to or []
email_to.append(getpass.getuser())
EmailSender().SendEmail(email_to,
subject,
text_report,
attachments=[attachment],
msg_type='html')
def _StoreResults(self, experiment):
if self._terminated:
return
results_directory = experiment.results_directory
FileUtils().RmDir(results_directory)
FileUtils().MkDirP(results_directory)
self.l.LogOutput('Storing experiment file in %s.' % results_directory)
experiment_file_path = os.path.join(results_directory, 'experiment.exp')
FileUtils().WriteFile(experiment_file_path, experiment.experiment_file)
self.l.LogOutput('Storing results report in %s.' % results_directory)
results_table_path = os.path.join(results_directory, 'results.html')
report = HTMLResultsReport.FromExperiment(experiment).GetReport()
if self.json_report:
json_report = JSONResultsReport.FromExperiment(experiment,
json_args={'indent': 2})
_WriteJSONReportToFile(experiment, results_directory, json_report)
FileUtils().WriteFile(results_table_path, report)
self.l.LogOutput('Storing email message body in %s.' % results_directory)
msg_file_path = os.path.join(results_directory, 'msg_body.html')
text_report = TextResultsReport.FromExperiment(experiment, True).GetReport()
text_report += ('\nResults are stored in %s.\n' %
experiment.results_directory)
msg_body = "<pre style='font-size: 13px'>%s</pre>" % text_report
FileUtils().WriteFile(msg_file_path, msg_body)
self.l.LogOutput('Storing results of each benchmark run.')
for benchmark_run in experiment.benchmark_runs:
if benchmark_run.result:
benchmark_run_name = filter(str.isalnum, benchmark_run.name)
benchmark_run_path = os.path.join(results_directory, benchmark_run_name)
benchmark_run.result.CopyResultsTo(benchmark_run_path)
benchmark_run.result.CleanUp(benchmark_run.benchmark.rm_chroot_tmp)
def Run(self):
try:
self._Run(self._experiment)
finally:
# Always print the report at the end of the run.
self._PrintTable(self._experiment)
if not self._terminated:
self._StoreResults(self._experiment)
self._Email(self._experiment)
class MockExperimentRunner(ExperimentRunner):
"""Mocked ExperimentRunner for testing."""
def __init__(self, experiment, json_report):
super(MockExperimentRunner, self).__init__(experiment, json_report)
def _Run(self, experiment):
self.l.LogOutput("Would run the following experiment: '%s'." %
experiment.name)
def _PrintTable(self, experiment):
self.l.LogOutput('Would print the experiment table.')
def _Email(self, experiment):
self.l.LogOutput('Would send result email.')
def _StoreResults(self, experiment):
self.l.LogOutput('Would store the results.')
|
from ..utils import Object
class GetUser(Object):
"""
Returns information about a user by their identifier. This is an offline request if the current user is not a bot
Attributes:
ID (:obj:`str`): ``GetUser``
Args:
user_id (:obj:`int`):
User identifier
Returns:
User
Raises:
:class:`telegram.Error`
"""
ID = "getUser"
def __init__(self, user_id, extra=None, **kwargs):
self.extra = extra
self.user_id = user_id # int
@staticmethod
def read(q: dict, *args) -> "GetUser":
user_id = q.get('user_id')
return GetUser(user_id)
|
Subsets and Splits