code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env nix-shell
#!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])"
import json
import re
import requests
import sys
releases = ["openjdk11"]
oses = ["mac", "linux"]
types = ["jre", "jdk"]
impls = ["hotspot", "openj9"]
arch_to_nixos = {
"x64": "x86_64",
"aarch64": "aarch64",
}
def get_sha256(url):
resp = requests.get(url)
if resp.status_code != 200:
print("error: could not fetch checksum from url {}: code {}".format(url, resp.code), file=sys.stderr)
sys.exit(1)
return resp.text.strip().split(" ")[0]
RE_RELEASE_NAME = re.compile(r'[^-]+-([0-9.]+)\+([0-9]+)') # example release name: jdk-11.0.1+13
def generate_sources(release, assets):
out = {}
for asset in assets:
if asset["os"] not in oses: continue
if asset["binary_type"] not in types: continue
if asset["openjdk_impl"] not in impls: continue
if asset["heap_size"] != "normal": continue
if asset["architecture"] not in arch_to_nixos: continue
version, build = RE_RELEASE_NAME.match(asset["release_name"]).groups()
type_map = out.setdefault(asset["os"], {})
impl_map = type_map.setdefault(asset["binary_type"], {})
arch_map = impl_map.setdefault(asset["openjdk_impl"], {
"packageType": asset["binary_type"],
"vmType": asset["openjdk_impl"],
})
arch_map[arch_to_nixos[asset["architecture"]]] = {
"url": asset["binary_link"],
"sha256": get_sha256(asset["checksum_link"]),
"version": version,
"build": build,
}
return out
out = {}
for release in releases:
resp = requests.get("https://api.adoptopenjdk.net/v2/latestAssets/releases/" + release)
if resp.status_code != 200:
print("error: could not fetch data for release {} (code {})".format(release, resp.code), file=sys.stderr)
sys.exit(1)
out[release] = generate_sources(release, resp.json())
with open("sources.json", "w") as f:
json.dump(out, f, indent=2, sort_keys=True)
| SymbiFlow/nixpkgs | pkgs/development/compilers/adoptopenjdk-bin/generate-sources.py | Python | mit | 2,081 |
"""
<Program>
mocklib.py
<Author>
Justin Samuel
<Date Started>
Aug 10, 2009
<Purpose>
This provides functions that take care of mocking out (read: monkey
patching) various parts of seattlegeni's internal api, including calls to:
nodemanager.get_node_info
lockserver.create_lockserver_handle
lockserver.destroy_lockserver_handle
lockserver._perform_lock_request
backend.acquire_vessel
backend.generate_key
keygen.generate_keypair
The idea is to use the functions provided in this module to keep test scripts
clean and not repeat code in each one doing the same or similar monkey
patching with mock versions of these function calls.
"""
from seattlegeni.common.api import backend
from seattlegeni.common.api import keygen
from seattlegeni.common.api import lockserver
from seattlegeni.common.api import nodemanager
from seattlegeni.common.exceptions import *
_mock_nodemanager_get_node_info_args = None
def _mock_get_node_info(ip, port):
(nodeid_key, version, vessels_dict) = _mock_nodemanager_get_node_info_args
nodeinfo = {"version" : version,
"nodename" : "",
"nodekey" : nodeid_key,
"vessels" : {}}
nodeinfo["vessels"] = vessels_dict
return nodeinfo
def mock_nodemanager_get_node_info(nodeid_key, version, vessels_dict):
global _mock_nodemanager_get_node_info_args
_mock_nodemanager_get_node_info_args = (nodeid_key, version, vessels_dict)
nodemanager.get_node_info = _mock_get_node_info
def _mock_create_lockserver_handle(lockserver_url=None):
pass
def _mock_destroy_lockserver_handle(lockserver_handle):
pass
def _mock_perform_lock_request(request_type, lockserver_handle, user_list=None, node_list=None):
pass
def mock_lockserver_calls():
lockserver.create_lockserver_handle = _mock_create_lockserver_handle
lockserver.destroy_lockserver_handle = _mock_destroy_lockserver_handle
lockserver._perform_lock_request = _mock_perform_lock_request
_mock_acquire_vessel_result_list = None
def _mock_acquire_vessel(geniuser, vessel):
result_list = _mock_acquire_vessel_result_list
if len(result_list) == 0:
raise Exception("_mock_acquire_vessel ran out results. " +
"Either you need to provide more results in the result_list, " +
"or this is a legitimate test failure.")
if result_list.pop(0):
pass
else:
raise UnableToAcquireResourcesError
def mock_backend_acquire_vessel(result_list):
"""
Provide a list of boolean values that the mock'd out backend.acquire_vessels
will use to decide whether to return without an exception (True) or to raise
an UnableToAcquireResourcesError (False). The list will be used in order and
an exception will be raised if the mock backend.acquire_vessel() function is
called more times than there are items in the list.
"""
global _mock_acquire_vessel_result_list
_mock_acquire_vessel_result_list = result_list
backend.acquire_vessel = _mock_acquire_vessel
_mock_generate_key_keylist = None
def _mock_generate_key(keydescription):
keylist = _mock_generate_key_keylist
if len(keylist) == 0:
raise Exception("_mock_generate_key ran out of keys. " +
"Either you need to provide more keys, or this is a legitimate test failure.")
return keylist.pop(0)
def mock_backend_generate_key(keylist):
global _mock_generate_key_keylist
_mock_generate_key_keylist = keylist
backend.generate_key = _mock_generate_key
_mock_generate_keypair_keypairlist = None
def _mock_generate_keypair():
keypairlist = _mock_generate_keypair_keypairlist
if len(keypairlist) == 0:
raise Exception("_mock_generate_keypair ran out of keypairs. " +
"Either you need to provide more keypairs, or this is a legitimate test failure.")
return keypairlist.pop(0)
def mock_keygen_generate_keypair(keypairlist):
global _mock_generate_keypair_keypairlist
_mock_generate_keypair_keypairlist = keypairlist
keygen.generate_keypair = _mock_generate_keypair
| SensibilityTestbed/clearinghouse | tests/mocklib.py | Python | mit | 4,074 |
from actstream.models import Follow, Like
from django.template import Library
register = Library()
def is_following(user, actor):
"""
retorna True si el usuario esta siguiendo al actor
::
{% if request.user|is_following:another_user %}
You are already following {{ another_user }}
{% endif %}
"""
return Follow.objects.is_following(user, actor)
def likes(user, actor):
return Like.objects.is_like(user,actor)
register.filter(is_following)
register.filter(likes)
| LagunaJS/Midori | Feed/templatetags/activity_tags.py | Python | mit | 517 |
import argparse
import collections
import inspect
import json
import logging
import multiprocessing as mp
import numpy as np
import re
import sys
import zipfile
from datetime import datetime, timedelta
from os import path, listdir, environ, getpid
from textwrap import wrap
PARALLEL_PROCESS_NUM = mp.cpu_count()
TIMESTAMP_REGEX = r'(\d{4}_\d{2}_\d{2}_\d{6})'
SPEC_PATH = path.join(path.dirname(__file__), 'spec')
COMPONENT_LOCKS = json.loads(
open(path.join(SPEC_PATH, 'component_locks.json')).read())
LOCK_HEAD_REST_SIG = {
# signature list of [head, rest] in component lock
'mutex': [[0, 0], [1, 1]],
'subset': [[0, 0], [1, 0], [1, 1]],
}
# parse_args to add flag
parser = argparse.ArgumentParser(description='Set flags for functions')
parser.add_argument("-b", "--blind",
help="dont render graphics",
action="store_const",
dest="render",
const=False,
default=True)
parser.add_argument("-d", "--debug",
help="activate debug log",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO)
parser.add_argument("-e", "--experiment",
help="specify experiment to run",
action="store",
type=str,
nargs='?',
dest="experiment",
default="dev_dqn")
parser.add_argument("-p", "--param_selection",
help="run parameter selection if present",
action="store_true",
dest="param_selection",
default=False)
parser.add_argument("-q", "--quiet",
help="change log to warning level",
action="store_const",
dest="loglevel",
const=logging.WARNING,
default=logging.INFO)
parser.add_argument("-t", "--times",
help="number of times session is run",
action="store",
nargs='?',
type=int,
dest="times",
default=1)
parser.add_argument("-x", "--max_episodes",
help="manually set environment max episodes",
action="store",
nargs='?',
type=int,
dest="max_epis",
default=-1)
args = parser.parse_args([]) if environ.get('CI') else parser.parse_args()
# Goddam python logger
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s'))
logger.setLevel(args.loglevel)
logger.addHandler(handler)
logger.propagate = False
environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # mute tf warnings on optimized setup
def check_equal(iterator):
'''check if list contains all the same elements'''
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def check_lock(lock_name, lock, experiment_spec):
'''
refer to rl/spec/component_locks.json
check a spec's component lock using binary signatures
e.g. head = problem (discrete)
rest = [Agent, Policy] (to be discrete too)
first check if rest all has the same signature, i.e. same set
then check pair [bin_head, bin_rest] in valid_lock_sig_list
as specified by the lock's type
'''
lock_type = lock['type']
valid_lock_sig_list = LOCK_HEAD_REST_SIG[lock_type]
lock_head = lock['head']
bin_head = (experiment_spec[lock_head] in lock[lock_head])
bin_rest_list = []
for k, v_list in lock.items():
if k in experiment_spec and k != lock_head:
bin_rest_list.append(experiment_spec[k] in v_list)
# rest must all have the same signature
rest_equal = check_equal(bin_rest_list)
if not rest_equal:
logger.warn(
'All components need to be of the same set, '
'check component lock "{}" and your spec "{}"'.format(
lock_name, experiment_spec['experiment_name']))
bin_rest = bin_rest_list[0]
lock_sig = [bin_head, bin_rest]
lock_valid = lock_sig in valid_lock_sig_list
if not lock_valid:
logger.warn(
'Component lock violated: "{}", spec: "{}"'.format(
lock_name, experiment_spec['experiment_name']))
return lock_valid
def check_component_locks(experiment_spec):
'''
check the spec components for all locks
to ensure no lock is violated
refer to rl/spec/component_locks.json
'''
for lock_name, lock in COMPONENT_LOCKS.items():
check_lock(lock_name, lock, experiment_spec)
return
# import and safeguard the PROBLEMS, EXPERIMENT_SPECS with checks
def import_guard_asset():
PROBLEMS = json.loads(open(path.join(SPEC_PATH, 'problems.json')).read())
EXPERIMENT_SPECS = {}
spec_files = [spec_json for spec_json in listdir(
SPEC_PATH) if spec_json.endswith('experiment_specs.json')]
for filename in spec_files:
specs = json.loads(open(path.join(SPEC_PATH, filename)).read())
EXPERIMENT_SPECS.update(specs)
REQUIRED_PROBLEM_KEYS = [
'GYM_ENV_NAME', 'SOLVED_MEAN_REWARD',
'MAX_EPISODES', 'REWARD_MEAN_LEN']
REQUIRED_SPEC_KEYS = [
'problem', 'Agent', 'HyperOptimizer',
'Memory', 'Optimizer', 'Policy', 'PreProcessor', 'param']
for problem_name, problem in PROBLEMS.items():
assert all(k in problem for k in REQUIRED_PROBLEM_KEYS), \
'{} needs all REQUIRED_PROBLEM_KEYS'.format(
problem_name)
for experiment_name, spec in EXPERIMENT_SPECS.items():
assert all(k in spec for k in REQUIRED_SPEC_KEYS), \
'{} needs all REQUIRED_SPEC_KEYS'.format(experiment_name)
EXPERIMENT_SPECS[experiment_name]['experiment_name'] = experiment_name
check_component_locks(spec) # check component_locks.json
if 'param_range' not in EXPERIMENT_SPECS[experiment_name]:
continue
param_range = EXPERIMENT_SPECS[experiment_name]['param_range']
for param_key, param_val in param_range.items():
if isinstance(param_val, list):
param_range[param_key] = sorted(param_val)
elif isinstance(param_val, dict):
pass
else:
assert False, \
'param_range value must be list or dict: {}.{}:{}'.format(
experiment_name, param_key, param_val)
EXPERIMENT_SPECS[experiment_name]['param_range'] = param_range
return PROBLEMS, EXPERIMENT_SPECS
PROBLEMS, EXPERIMENT_SPECS = import_guard_asset()
def log_self(subject):
max_info_len = 300
info = '{}, param: {}'.format(
subject.__class__.__name__,
to_json(subject.__dict__))
trunc_info = (
info[:max_info_len] + '...' if len(info) > max_info_len else info)
logger.debug(trunc_info)
def wrap_text(text):
return '\n'.join(wrap(text, 60))
def make_line(line='-'):
if environ.get('CI'):
return
columns = 80
line_str = line*int(columns)
return line_str
def log_delimiter(msg, line='-'):
delim_msg = '''\n{0}\n{1}\n{0}\n\n'''.format(
make_line(line), msg)
logger.info(delim_msg)
def log_trial_delimiter(trial, action):
log_delimiter('{} Trial #{}/{} on PID {}:\n{}'.format(
action, trial.trial_num, trial.num_of_trials,
getpid(), trial.trial_id), '=')
def log_session_delimiter(sess, action):
log_delimiter(
'{} Session #{}/{} of Trial #{}/{} on PID {}:\n{}'.format(
action, sess.session_num, sess.num_of_sessions,
sess.trial.trial_num, sess.trial.num_of_trials,
getpid(), sess.session_id))
def timestamp():
'''timestamp used for filename'''
timestamp_str = '{:%Y_%m_%d_%H%M%S}'.format(datetime.now())
assert re.search(TIMESTAMP_REGEX, timestamp_str)
return timestamp_str
def timestamp_elapse(s1, s2):
'''calculate the time elapsed between timestamps from s1 to s2'''
FMT = '%Y_%m_%d_%H%M%S'
delta_t = datetime.strptime(s2, FMT) - datetime.strptime(s1, FMT)
return str(delta_t)
def timestamp_elapse_to_seconds(s1):
a = datetime.strptime(s1, '%H:%M:%S')
secs = timedelta(hours=a.hour, minutes=a.minute, seconds=a.second).seconds
return secs
# own custom sorted json serializer, cuz python
def to_json(o, level=0):
INDENT = 2
SPACE = " "
NEWLINE = "\n"
ret = ""
if isinstance(o, dict):
ret += "{" + NEWLINE
comma = ""
for k in sorted(o.keys()):
v = o[k]
ret += comma
comma = ",\n"
ret += SPACE * INDENT * (level+1)
ret += '"' + str(k) + '":' + SPACE
ret += to_json(v, level + 1)
ret += NEWLINE + SPACE * INDENT * level + "}"
elif isinstance(o, str):
ret += '"' + o + '"'
elif isinstance(o, list) or isinstance(o, tuple):
ret += "[" + ",".join([to_json(e, level+1) for e in o]) + "]"
elif isinstance(o, bool):
ret += "true" if o else "false"
elif isinstance(o, int):
ret += str(o)
elif isinstance(o, float):
ret += '%.7g' % o
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.integer):
ret += "[" + ','.join(map(str, o.flatten().tolist())) + "]"
elif isinstance(o, np.ndarray) and np.issubdtype(o.dtype, np.inexact):
ret += "[" + \
','.join(map(lambda x: '%.7g' % x, o.flatten().tolist())) + "]"
elif o is None:
ret += 'null'
elif hasattr(o, '__class__'):
ret += '"' + o.__class__.__name__ + '"'
else:
raise TypeError(
"Unknown type '%s' for json serialization" % str(type(o)))
return ret
# format object and its properties into printable dict
def format_obj_dict(obj, keys):
if isinstance(obj, dict):
return to_json(
{k: obj.get(k) for k in keys if obj.get(k) is not None})
else:
return to_json(
{k: getattr(obj, k, None) for k in keys
if getattr(obj, k, None) is not None})
# cast dict to have flat values (int, float, str)
def flat_cast_dict(d):
for k in d:
v = d[k]
if not isinstance(v, (int, float)):
d[k] = str(v)
return d
def flatten_dict(d, parent_key='', sep='_'):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def get_module(GREF, dot_path):
# get module from globals() by string dot_path
path_arr = dot_path.split('.')
# base level from globals
mod = GREF.get(path_arr.pop(0))
for deeper_path in path_arr:
mod = getattr(mod, deeper_path)
return mod
def import_package_files(globals_, locals_, __file__):
'''
Dynamically import all the public attributes of the python modules in this
file's directory (the package directory) and return a list of their names.
'''
exports = []
# globals_, locals_ = globals(), locals()
package_path = path.dirname(__file__)
package_name = path.basename(package_path)
for filename in listdir(package_path):
modulename, ext = path.splitext(filename)
if modulename[0] != '_' and ext in ('.py', '.pyw'):
subpackage = '{}.{}'.format(
package_name, modulename) # pkg relative
module = __import__(subpackage, globals_, locals_, [modulename])
modict = module.__dict__
names = (modict['__all__'] if '__all__' in modict else
[name for name in
modict if inspect.isclass(modict[name])]) # all public
exports.extend(names)
globals_.update((name, modict[name]) for name in names)
return exports
def clean_id_str(id_str):
return id_str.split('/').pop().split('.').pop(0)
def parse_trial_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
if len(name_time_trial) == 3:
return c_id_str
else:
return None
else:
return None
def parse_experiment_id(id_str):
c_id_str = clean_id_str(id_str)
if re.search(TIMESTAMP_REGEX, c_id_str):
name_time_trial = re.split(TIMESTAMP_REGEX, c_id_str)
name_time_trial.pop()
experiment_id = ''.join(name_time_trial)
return experiment_id
else:
return None
def parse_experiment_name(id_str):
c_id_str = clean_id_str(id_str)
experiment_id = parse_experiment_id(c_id_str)
if experiment_id is None:
experiment_name = c_id_str
else:
experiment_name = re.sub(TIMESTAMP_REGEX, '', experiment_id).strip('-')
assert experiment_name in EXPERIMENT_SPECS, \
'{} not in EXPERIMENT_SPECS'.format(experiment_name)
return experiment_name
def load_data_from_trial_id(id_str):
experiment_id = parse_experiment_id(id_str)
trial_id = parse_trial_id(id_str)
data_filename = './data/{}/{}.json'.format(experiment_id, trial_id)
try:
data = json.loads(open(data_filename).read())
except (FileNotFoundError, json.JSONDecodeError):
data = None
return data
def load_data_array_from_experiment_id(id_str):
# to load all ./data files for a series of trials
experiment_id = parse_experiment_id(id_str)
data_path = './data/{}'.format(experiment_id)
trial_id_array = [
f for f in listdir(data_path)
if (path.isfile(path.join(data_path, f)) and
f.startswith(experiment_id) and
f.endswith('.json'))
]
return list(filter(None, [load_data_from_trial_id(trial_id)
for trial_id in trial_id_array]))
def save_experiment_data(data_df, trial_id):
experiment_id = parse_experiment_id(trial_id)
filedir = './data/{0}'.format(experiment_id)
filename = '{0}_analysis_data.csv'.format(experiment_id)
filepath = '{}/{}'.format(filedir, filename)
data_df.round(6).to_csv(filepath, index=False)
# zip the csv and best trial json for upload to PR
zipfile.ZipFile(filepath+'.zip', mode='w').write(
filepath, arcname=filename)
trial_filename = data_df.loc[0, 'trial_id'] + '.json'
trial_filepath = '{}/{}'.format(filedir, trial_filename)
zipfile.ZipFile(trial_filepath+'.zip', mode='w').write(
trial_filepath, arcname=trial_filename)
logger.info(
'experiment data saved to {}'.format(filepath))
def configure_hardware(RAND_SEED):
'''configure rand seed, GPU'''
from keras import backend as K
if K.backend() == 'tensorflow':
K.tf.set_random_seed(RAND_SEED)
else:
K.theano.tensor.shared_randomstreams.RandomStreams(seed=RAND_SEED)
if K.backend() != 'tensorflow':
# GPU config for tf only
return
process_num = PARALLEL_PROCESS_NUM if args.param_selection else 1
tf = K.tf
gpu_options = tf.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=1./float(process_num))
config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True)
sess = tf.Session(config=config)
K.set_session(sess)
return sess
def debug_mem_usage():
import psutil
from mem_top import mem_top
pid = getpid()
logger.debug(
'MEM USAGE for PID {}, MEM_INFO: {}\n{}'.format(
pid, psutil.Process().memory_info(), mem_top()))
def del_self_attr(subject):
self_attrs = list(subject.__dict__.keys())
for attr in self_attrs:
delattr(subject, attr)
import gc
gc.collect()
# clone a keras model without file I/O
def clone_model(model, custom_objects=None):
from keras.models import model_from_config
custom_objects = custom_objects or {}
config = {
'class_name': model.__class__.__name__,
'config': model.get_config(),
}
clone = model_from_config(config, custom_objects=custom_objects)
clone.set_weights(model.get_weights())
return clone
# clone a keras optimizer without file I/O
def clone_optimizer(optimizer):
from keras.optimizers import optimizer_from_config
if isinstance(optimizer, str):
return get(optimizer)
params = dict([(k, v) for k, v in optimizer.get_config().items()])
config = {
'class_name': optimizer.__class__.__name__,
'config': params,
}
clone = optimizer_from_config(config)
return clone
| kengz/openai_lab | rl/util.py | Python | mit | 16,973 |
def subleq(a, b, c):
instr = "%s %s %s" % (a,b,c);
return instr;
def next_subleq(a,b):
return subleq(a,b,"NEXT");
def clear(a):
return subleq(a,a,"NEXT"); | purisc-group/purisc | compiler/class_def/conversions/helpers.py | Python | gpl-2.0 | 168 |
'''<b>Conserve Memory</b> speeds up CellProfiler by removing images from memory.
<hr>
This module removes images from memory, which can speed up processing and
prevent out-of-memory errors.
<i>Note:</i> CellProfiler 1.0's <b>SpeedUpCellProfiler</b> had an option that let you
choose how often the output file of measurements was saved. This option is no longer neccessary since
the output file is automatically updated with each image set.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import gc
import numpy as np
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
C_REMOVE_OLD = "Remove"
C_REMOVE = "Images to remove"
C_KEEP_OLD = "Keep"
C_KEEP = "Images to keep"
'''# of settings in a module independent of the image settings'''
S_NUMBER_OF_PER_MODULE_SETTINGS = 1
'''# of settings per image in the pipeline'''
S_NUMBER_OF_SETTINGS_PER_IMAGE = 1
class ConserveMemory(cpm.CPModule):
module_name = "ConserveMemory"
category = 'Other'
variable_revision_number = 1
def create_settings(self):
self.how_to_remove = cps.Choice(
"Specify which images?",
[C_REMOVE, C_KEEP], doc="""
You can select from the following options:
<ul>
<li><i>%(C_REMOVE)s:</i> Remove some images from memory and keep the rest.</li>
<li><i>%(C_KEEP)s:</i> Keep some images and remove the rest.</li>
</ul>"""%globals())
self.spacer_top = cps.Divider(line=False)
self.image_names = []
self.add_image(can_remove = False)
self.spacer_bottom = cps.Divider(line=False)
self.add_image_button = cps.DoSomething("", "Add another image",
self.add_image)
def query(self):
if self.how_to_remove == C_REMOVE:
return "Select image to remove"
else:
return "Select image to keep"
def add_image(self, can_remove = True):
'''Add an image to the list of image names
can_remove - set this to False to keep from showing the "remove"
button for images that must be present.
'''
group = cps.SettingsGroup()
if can_remove:
group.append("divider", cps.Divider(line=False))
group.append("image_name", cps.ImageNameSubscriber(self.query(), cps.NONE))
if can_remove:
group.append("remover", cps.RemoveSettingButton("",
"Remove this image",
self.image_names,
group))
self.image_names.append(group)
def settings(self):
return [self.how_to_remove] + [im.image_name for im in self.image_names]
def prepare_settings(self, setting_values):
image_count = ((len(setting_values) - S_NUMBER_OF_PER_MODULE_SETTINGS) /
S_NUMBER_OF_SETTINGS_PER_IMAGE)
del self.image_names[image_count:]
while image_count > len(self.image_names):
self.add_image()
def visible_settings(self):
for image_setting in self.image_names:
image_setting.image_name.text = self.query()
result = [self.how_to_remove, self.spacer_top]
for image_setting in self.image_names:
result += image_setting.visible_settings()
result += [self.add_image_button]
return result
def run(self, workspace):
image_set = workspace.image_set
image_names = [x.image_name.value for x in self.image_names]
workspace.display_data.statistics = []
if self.how_to_remove == C_KEEP:
all_names = [x.name for x in image_set.providers]
for name in set(all_names) - set(image_names):
image_set.clear_image(name)
for name in image_names:
workspace.display_data.statistics.append(["Kept %s"%name])
else:
for name in image_names:
image_set.clear_image(name)
workspace.display_data.statistics.append(["Removed %s"%name])
gc.collect()
def display(self, workspace, figure):
figure.set_subplots((1, 1))
figure.subplot_table(0, 0, workspace.display_data.statistics)
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
if from_matlab and variable_revision_number == 5:
new_setting_values = [ C_REMOVE ]
for image_name in setting_values[2:]:
if image_name.lower() != cps.DO_NOT_USE.lower():
new_setting_values.append(image_name)
setting_values = new_setting_values
variable_revision_number = 1
from_matlab = False
if (not from_matlab) and variable_revision_number == 1:
# There was some skew in the capitalization of the first
# setting. We rewrite it, but we leave the revision
# number at 1.
remap = {'remove' : C_REMOVE,
C_REMOVE_OLD: C_REMOVE,
'keep' : C_KEEP,
C_KEEP_OLD: C_KEEP
}
if setting_values[0] in remap:
setting_values[0] = remap[setting_values[0]]
return setting_values, variable_revision_number, from_matlab
SpeedUpCellProfiler = ConserveMemory
| LeeKamentsky/CellProfiler | cellprofiler/modules/conservememory.py | Python | gpl-2.0 | 5,810 |
from run import run_base
class run_names(run_base):
# Verify behavior when multiple --name options passed
def init_subargs(self):
cont = self.sub_stuff["cont"]
name_base = cont.get_unique_name()
names = []
for number in xrange(self.config['names_count']):
name = ('%s_%d' % (name_base, number))
names.append(name)
self.sub_stuff['containers'].append(name) # just in case
self.sub_stuff['subargs'] += ["--name %s" % name for name in names]
if self.config['last_name_sticks']:
self.sub_stuff['expected_name'] = names[-1]
else:
self.sub_stuff['expected_name'] = names[0]
super(run_names, self).init_subargs()
def run_once(self):
super(run_names, self).run_once()
cid = self.sub_stuff['cid'] = self.sub_stuff['dkrcmd'].stdout.strip()
self.sub_stuff['containers'].append(cid)
try:
self.sub_stuff["cont"].wait_by_long_id(cid)
except ValueError:
pass # container already finished and exited
def postprocess(self):
super(run_names, self).postprocess()
cont = self.sub_stuff["cont"]
json = cont.json_by_long_id(self.sub_stuff['cid'])
self.failif(len(json) == 0)
# docker sticks a "/" prefix on name (documented?)
actual_name = str(json[0]['Name'][1:])
self.failif_ne(actual_name, self.sub_stuff['expected_name'], "Name")
| afomm/autotest-docker | subtests/docker_cli/run/run_names.py | Python | gpl-2.0 | 1,479 |
import os
from unittest import TestCase, skipIf
from zabbix.api import ZabbixAPI
from pyzabbix import ZabbixAPIException
@skipIf('TRAVIS' not in os.environ.keys(), "Travis CI test")
class FunctionalAPI(TestCase):
def test_LoginToServer(self):
try:
ZabbixAPI(url='http://127.0.0.1',
user='Admin',
password='zabbix')
except ZabbixAPIException:
self.fail('Can\'t login to Zabbix')
| blacked/py-zabbix | tests/test_Functional_API_Old.py | Python | gpl-2.0 | 469 |
from enigma import getPrevAsciiCode
from Tools.NumericalTextInput import NumericalTextInput
from Tools.Directories import resolveFilename, SCOPE_CONFIG, fileExists
from Components.Harddisk import harddiskmanager
from copy import copy as copy_copy
from os import path as os_path
from time import localtime, strftime
# ConfigElement, the base class of all ConfigElements.
# it stores:
# value the current value, usefully encoded.
# usually a property which retrieves _value,
# and maybe does some reformatting
# _value the value as it's going to be saved in the configfile,
# though still in non-string form.
# this is the object which is actually worked on.
# default the initial value. If _value is equal to default,
# it will not be stored in the config file
# saved_value is a text representation of _value, stored in the config file
#
# and has (at least) the following methods:
# save() stores _value into saved_value,
# (or stores 'None' if it should not be stored)
# load() loads _value from saved_value, or loads
# the default if saved_value is 'None' (default)
# or invalid.
#
class ConfigElement(object):
def __init__(self):
self.extra_args = {}
self.saved_value = None
self.save_forced = False
self.last_value = None
self.save_disabled = False
self.__notifiers = { }
self.__notifiers_final = { }
self.enabled = True
self.callNotifiersOnSaveAndCancel = False
def getNotifiers(self):
return [func for (func, val, call_on_save_and_cancel) in self.__notifiers.itervalues()]
def setNotifiers(self, val):
print "just readonly access to notifiers is allowed! append/remove doesnt work anymore! please use addNotifier, removeNotifier, clearNotifiers"
notifiers = property(getNotifiers, setNotifiers)
def getNotifiersFinal(self):
return [func for (func, val, call_on_save_and_cancel) in self.__notifiers_final.itervalues()]
def setNotifiersFinal(self, val):
print "just readonly access to notifiers_final is allowed! append/remove doesnt work anymore! please use addNotifier, removeNotifier, clearNotifiers"
notifiers_final = property(getNotifiersFinal, setNotifiersFinal)
# you need to override this to do input validation
def setValue(self, value):
self._value = value
self.changed()
def getValue(self):
return self._value
value = property(getValue, setValue)
# you need to override this if self.value is not a string
def fromstring(self, value):
return value
# you can overide this for fancy default handling
def load(self):
sv = self.saved_value
if sv is None:
self.value = self.default
else:
self.value = self.fromstring(sv)
def tostring(self, value):
return str(value)
# you need to override this if str(self.value) doesn't work
def save(self):
if self.save_disabled or (self.value == self.default and not self.save_forced):
self.saved_value = None
else:
self.saved_value = self.tostring(self.value)
if self.callNotifiersOnSaveAndCancel:
self.changed()
def cancel(self):
self.load()
if self.callNotifiersOnSaveAndCancel:
self.changed()
def isChanged(self):
sv = self.saved_value
if sv is None and self.value == self.default:
return False
return self.tostring(self.value) != sv
def changed(self):
if self.__notifiers:
for x in self.notifiers:
try:
if self.extra_args and self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
def changedFinal(self):
if self.__notifiers_final:
for x in self.notifiers_final:
try:
if self.extra_args and self.extra_args[x]:
x(self, self.extra_args[x])
else:
x(self)
except:
x(self)
# immediate_feedback = True means call notifier on every value CHANGE
# immediate_feedback = False means call notifier on leave the config element (up/down) when value have CHANGED
# call_on_save_or_cancel = True means call notifier always on save/cancel.. even when value have not changed
def addNotifier(self, notifier, initial_call = True, immediate_feedback = True, call_on_save_or_cancel = False, extra_args=None):
if not extra_args: extra_args = []
assert callable(notifier), "notifiers must be callable"
try:
self.extra_args[notifier] = extra_args
except: pass
if immediate_feedback:
self.__notifiers[str(notifier)] = (notifier, self.value, call_on_save_or_cancel)
else:
self.__notifiers_final[str(notifier)] = (notifier, self.value, call_on_save_or_cancel)
# CHECKME:
# do we want to call the notifier
# - at all when adding it? (yes, though optional)
# - when the default is active? (yes)
# - when no value *yet* has been set,
# because no config has ever been read (currently yes)
# (though that's not so easy to detect.
# the entry could just be new.)
if initial_call:
if extra_args:
notifier(self,extra_args)
else:
notifier(self)
def removeNotifier(self, notifier):
try:
del self.__notifiers[str(notifier)]
except:
try:
del self.__notifiers_final[str(notifier)]
except:
pass
def clearNotifiers(self):
self.__notifiers = { }
self.__notifiers_final = { }
def disableSave(self):
self.save_disabled = True
def __call__(self, selected):
return self.getMulti(selected)
def onSelect(self, session):
pass
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
KEY_LEFT = 0
KEY_RIGHT = 1
KEY_OK = 2
KEY_DELETE = 3
KEY_BACKSPACE = 4
KEY_HOME = 5
KEY_END = 6
KEY_TOGGLEOW = 7
KEY_ASCII = 8
KEY_TIMEOUT = 9
KEY_NUMBERS = range(12, 12+10)
KEY_0 = 12
KEY_9 = 12+9
def getKeyNumber(key):
assert key in KEY_NUMBERS
return key - KEY_0
class choicesList(object): # XXX: we might want a better name for this
LIST_TYPE_LIST = 1
LIST_TYPE_DICT = 2
def __init__(self, choices, type = None):
self.choices = choices
if type is None:
if isinstance(choices, list):
self.type = choicesList.LIST_TYPE_LIST
elif isinstance(choices, dict):
self.type = choicesList.LIST_TYPE_DICT
else:
assert False, "choices must be dict or list!"
else:
self.type = type
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices.keys()
return ret or [""]
def __iter__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[0] for x in self.choices]
else:
ret = self.choices
return iter(ret or [""])
def __len__(self):
return len(self.choices) or 1
def updateItemDescription(self, index, descr):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
assert isinstance(orig, tuple)
self.choices[index] = (orig[0], descr)
else:
key = self.choices.keys()[index]
self.choices[key] = descr
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
ret = self.choices[index]
if isinstance(ret, tuple):
ret = ret[0]
return ret
return self.choices.keys()[index]
def index(self, value):
try:
return self.__list__().index(value)
except (ValueError, IndexError):
# occurs e.g. when default is not in list
return 0
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
orig = self.choices[index]
if isinstance(orig, tuple):
self.choices[index] = (value, orig[1])
else:
self.choices[index] = value
else:
key = self.choices.keys()[index]
orig = self.choices[key]
del self.choices[key]
self.choices[value] = orig
def default(self):
choices = self.choices
if not choices:
return ""
if self.type is choicesList.LIST_TYPE_LIST:
default = choices[0]
if isinstance(default, tuple):
default = default[0]
else:
default = choices.keys()[0]
return default
class descriptionList(choicesList): # XXX: we might want a better name for this
def __list__(self):
if self.type == choicesList.LIST_TYPE_LIST:
ret = [not isinstance(x, tuple) and x or x[1] for x in self.choices]
else:
ret = self.choices.values()
return ret or [""]
def __iter__(self):
return iter(self.__list__())
def __getitem__(self, index):
if self.type == choicesList.LIST_TYPE_LIST:
for x in self.choices:
if isinstance(x, tuple):
if x[0] == index:
return str(x[1])
elif x == index:
return str(x)
return str(index) # Fallback!
else:
return str(self.choices.get(index, ""))
def __setitem__(self, index, value):
if self.type == choicesList.LIST_TYPE_LIST:
i = self.index(index)
orig = self.choices[i]
if isinstance(orig, tuple):
self.choices[i] = (orig[0], value)
else:
self.choices[i] = value
else:
self.choices[index] = value
#
# ConfigSelection is a "one of.."-type.
# it has the "choices", usually a list, which contains
# (id, desc)-tuples (or just only the ids, in case the id
# will be used as description)
#
# all ids MUST be plain strings.
#
class ConfigSelection(ConfigElement):
def __init__(self, choices, default = None):
ConfigElement.__init__(self)
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self._descr = None
self.default = self._value = self.last_value = default
def setChoices(self, choices, default = None):
self.choices = choicesList(choices)
if default is None:
default = self.choices.default()
self.default = default
if self.value not in self.choices:
self.value = default
def setValue(self, value):
if value in self.choices:
self._value = value
else:
self._value = self.default
self._descr = None
self.changed()
def tostring(self, val):
return val
def getValue(self):
return self._value
def setCurrentText(self, text):
i = self.choices.index(self.value)
self.choices[i] = text
self._descr = self.description[text] = text
self._value = text
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
# GUI
def handleKey(self, key):
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(self.value)
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
def selectNext(self):
nchoices = len(self.choices)
i = self.choices.index(self.value)
self.value = self.choices[(i + 1) % nchoices]
def getText(self):
if self._descr is None:
self._descr = self.description[self.value]
return self._descr
def getMulti(self, selected):
if self._descr is None:
self._descr = self.description[self.value]
return ("text", self._descr)
# HTML
def getHTML(self, id):
res = ""
for v in self.choices:
descr = self.description[v]
if self.value == v:
checked = 'checked="checked" '
else:
checked = ''
res += '<input type="radio" name="' + id + '" ' + checked + 'value="' + v + '">' + descr + "</input></br>\n"
return res
def unsafeAssign(self, value):
# setValue does check if value is in choices. This is safe enough.
self.value = value
description = property(lambda self: descriptionList(self.choices.choices, self.choices.type))
# a binary decision.
#
# several customized versions exist for different
# descriptions.
#
class ConfigBoolean(ConfigElement):
def __init__(self, default = False, descriptions = {False: _("false"), True: _("true")}):
ConfigElement.__init__(self)
self.descriptions = descriptions
self.value = self.last_value = self.default = default
def handleKey(self, key):
if key in (KEY_LEFT, KEY_RIGHT):
self.value = not self.value
elif key == KEY_HOME:
self.value = False
elif key == KEY_END:
self.value = True
def getText(self):
return self.descriptions[self.value]
def getMulti(self, selected):
return ("text", self.descriptions[self.value])
def tostring(self, value):
if not value:
return "false"
else:
return "true"
def fromstring(self, val):
if val == "true":
return True
else:
return False
def getHTML(self, id):
if self.value:
checked = ' checked="checked"'
else:
checked = ''
return '<input type="checkbox" name="' + id + '" value="1" ' + checked + " />"
# this is FLAWED. and must be fixed.
def unsafeAssign(self, value):
if value == "1":
self.value = True
else:
self.value = False
def onDeselect(self, session):
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigYesNo(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("no"), True: _("yes")})
class ConfigOnOff(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("off"), True: _("on")})
class ConfigEnableDisable(ConfigBoolean):
def __init__(self, default = False):
ConfigBoolean.__init__(self, default = default, descriptions = {False: _("disable"), True: _("enable")})
class ConfigDateTime(ConfigElement):
def __init__(self, default, formatstring, increment = 86400):
ConfigElement.__init__(self)
self.increment = increment
self.formatstring = formatstring
self.value = self.last_value = self.default = int(default)
def handleKey(self, key):
if key == KEY_LEFT:
self.value -= self.increment
elif key == KEY_RIGHT:
self.value += self.increment
elif key == KEY_HOME or key == KEY_END:
self.value = self.default
def getText(self):
return strftime(self.formatstring, localtime(self.value))
def getMulti(self, selected):
return "text", strftime(self.formatstring, localtime(self.value))
def fromstring(self, val):
return int(val)
# *THE* mighty config element class
#
# allows you to store/edit a sequence of values.
# can be used for IP-addresses, dates, plain integers, ...
# several helper exist to ease this up a bit.
#
class ConfigSequence(ConfigElement):
def __init__(self, seperator, limits, default, censor_char = ""):
ConfigElement.__init__(self)
assert isinstance(limits, list) and len(limits[0]) == 2, "limits must be [(min, max),...]-tuple-list"
assert censor_char == "" or len(censor_char) == 1, "censor char must be a single char (or \"\")"
#assert isinstance(default, list), "default must be a list"
#assert isinstance(default[0], int), "list must contain numbers"
#assert len(default) == len(limits), "length must match"
self.marked_pos = 0
self.seperator = seperator
self.limits = limits
self.censor_char = censor_char
self.last_value = self.default = default
self.value = copy_copy(default)
self.endNotifier = None
def validate(self):
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
if self._value[num] < self.limits[num][0]:
self._value[num] = self.limits[num][0]
if self._value[num] > self.limits[num][1]:
self._value[num] = self.limits[num][1]
num += 1
if self.marked_pos >= max_pos:
if self.endNotifier:
for x in self.endNotifier:
x(self)
self.marked_pos = max_pos - 1
if self.marked_pos < 0:
self.marked_pos = 0
def validatePos(self):
if self.marked_pos < 0:
self.marked_pos = 0
total_len = sum([len(str(x[1])) for x in self.limits])
if self.marked_pos >= total_len:
self.marked_pos = total_len - 1
def addEndNotifier(self, notifier):
if self.endNotifier is None:
self.endNotifier = []
self.endNotifier.append(notifier)
def handleKey(self, key):
if key == KEY_LEFT:
self.marked_pos -= 1
self.validatePos()
elif key == KEY_RIGHT:
self.marked_pos += 1
self.validatePos()
elif key == KEY_HOME:
self.marked_pos = 0
self.validatePos()
elif key == KEY_END:
max_pos = 0
num = 0
for i in self._value:
max_pos += len(str(self.limits[num][1]))
num += 1
self.marked_pos = max_pos - 1
self.validatePos()
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
block_len = [len(str(x[1])) for x in self.limits]
total_len = sum(block_len)
pos = 0
blocknumber = 0
block_len_total = [0, ]
for x in block_len:
pos += block_len[blocknumber]
block_len_total.append(pos)
if pos - 1 >= self.marked_pos:
pass
else:
blocknumber += 1
# length of numberblock
number_len = len(str(self.limits[blocknumber][1]))
# position in the block
posinblock = self.marked_pos - block_len_total[blocknumber]
oldvalue = self._value[blocknumber]
olddec = oldvalue % 10 ** (number_len - posinblock) - (oldvalue % 10 ** (number_len - posinblock - 1))
newvalue = oldvalue - olddec + (10 ** (number_len - posinblock - 1) * number)
self._value[blocknumber] = newvalue
self.marked_pos += 1
self.validate()
self.changed()
def genText(self):
value = ""
mPos = self.marked_pos
num = 0
for i in self._value:
if value: #fixme no heading separator possible
value += self.seperator
if mPos >= len(value) - 1:
mPos += 1
if self.censor_char == "":
value += ("%0" + str(len(str(self.limits[num][1]))) + "d") % i
else:
value += (self.censor_char * len(str(self.limits[num][1])))
num += 1
return value, mPos
def getText(self):
(value, mPos) = self.genText()
return value
def getMulti(self, selected):
(value, mPos) = self.genText()
# only mark cursor when we are selected
# (this code is heavily ink optimized!)
if self.enabled:
return "mtext"[1-selected:], value, [mPos]
else:
return "text", value
def tostring(self, val):
return self.seperator.join([self.saveSingle(x) for x in val])
def saveSingle(self, v):
return str(v)
def fromstring(self, value):
try:
return [int(x) for x in value.split(self.seperator)]
except:
return self.default
def onDeselect(self, session):
if self.last_value != self._value:
self.changedFinal()
self.last_value = copy_copy(self._value)
ip_limits = [(0,255),(0,255),(0,255),(0,255)]
class ConfigIP(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = ip_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
mac_limits = [(1,255),(1,255),(1,255),(1,255),(1,255),(1,255)]
class ConfigMAC(ConfigSequence):
def __init__(self, default):
ConfigSequence.__init__(self, seperator = ":", limits = mac_limits, default = default)
class ConfigMacText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = 17
self.visible_width = visible_width
self.offset = 0
self.overwrite = 17
self.help_window = None
self.value = self.last_value = self.default = default
self.useableChars = '0123456789ABCDEF'
def validateMarker(self):
textlen = len(self.text)
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
elif self.marked_pos < 0:
self.marked_pos = 0
def insertChar(self, ch, pos, owr):
if self.text[pos] == ':':
pos += 1
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def handleKey(self, key):
if key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
if self.text[self.marked_pos-1] == ':':
self.marked_pos -= 2
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
if self.marked_pos < (len(self.text)-1):
if self.text[self.marked_pos+1] == ':':
self.marked_pos += 2
else:
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
if self.text[self.marked_pos] == ':':
self.marked_pos += 1
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPosition(ConfigSequence):
def __init__(self, default, args):
ConfigSequence.__init__(self, seperator = ",", limits = [(0,args[0]),(0,args[1]),(0,args[2]),(0,args[3])], default = default)
clock_limits = [(0,23),(0,59)]
class ConfigClock(ConfigSequence):
def __init__(self, default):
t = localtime(default)
ConfigSequence.__init__(self, seperator = ":", limits = clock_limits, default = [t.tm_hour, t.tm_min])
def increment(self):
# Check if Minutes maxed out
if self._value[1] == 59:
# Increment Hour, reset Minutes
if self._value[0] < 23:
self._value[0] += 1
else:
self._value[0] = 0
self._value[1] = 0
else:
# Increment Minutes
self._value[1] += 1
# Trigger change
self.changed()
def decrement(self):
# Check if Minutes is minimum
if self._value[1] == 0:
# Decrement Hour, set Minutes to 59
if self._value[0] > 0:
self._value[0] -= 1
else:
self._value[0] = 23
self._value[1] = 59
else:
# Decrement Minutes
self._value[1] -= 1
# Trigger change
self.changed()
integer_limits = (0, 9999999999)
class ConfigInteger(ConfigSequence):
def __init__(self, default, limits = integer_limits):
ConfigSequence.__init__(self, seperator = ":", limits = [limits], default = default)
# you need to override this to do input validation
def setValue(self, value):
self._value = [value]
self.changed()
def getValue(self):
return self._value[0]
value = property(getValue, setValue)
def fromstring(self, value):
return int(value)
def tostring(self, value):
return str(value)
class ConfigPIN(ConfigInteger):
def __init__(self, default, len = 4, censor = ""):
assert isinstance(default, int), "ConfigPIN default must be an integer"
ConfigSequence.__init__(self, seperator = ":", limits = [(0, (10**len)-1)], censor_char = censor, default = default)
self.len = len
def getLength(self):
return self.len
class ConfigFloat(ConfigSequence):
def __init__(self, default, limits):
ConfigSequence.__init__(self, seperator = ".", limits = limits, default = default)
def getFloat(self):
return float(self.value[1] / float(self.limits[1][1] + 1) + self.value[0])
float = property(getFloat)
# an editable text...
class ConfigText(ConfigElement, NumericalTextInput):
def __init__(self, default = "", fixed_size = True, visible_width = False):
ConfigElement.__init__(self)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False)
self.marked_pos = 0
self.allmarked = (default != "")
self.fixed_size = fixed_size
self.visible_width = visible_width
self.offset = 0
self.overwrite = fixed_size
self.help_window = None
self.value = self.last_value = self.default = default
def validateMarker(self):
textlen = len(self.text)
if self.fixed_size:
if self.marked_pos > textlen-1:
self.marked_pos = textlen-1
else:
if self.marked_pos > textlen:
self.marked_pos = textlen
if self.marked_pos < 0:
self.marked_pos = 0
if self.visible_width:
if self.marked_pos < self.offset:
self.offset = self.marked_pos
if self.marked_pos >= self.offset + self.visible_width:
if self.marked_pos == textlen:
self.offset = self.marked_pos - self.visible_width
else:
self.offset = self.marked_pos - self.visible_width + 1
if self.offset > 0 and self.offset + self.visible_width > textlen:
self.offset = max(0, len - self.visible_width)
def insertChar(self, ch, pos, owr):
if owr or self.overwrite:
self.text = self.text[0:pos] + ch + self.text[pos + 1:]
elif self.fixed_size:
self.text = self.text[0:pos] + ch + self.text[pos:-1]
else:
self.text = self.text[0:pos] + ch + self.text[pos:]
def deleteChar(self, pos):
if not self.fixed_size:
self.text = self.text[0:pos] + self.text[pos + 1:]
elif self.overwrite:
self.text = self.text[0:pos] + " " + self.text[pos + 1:]
else:
self.text = self.text[0:pos] + self.text[pos + 1:] + " "
def deleteAllChars(self):
if self.fixed_size:
self.text = " " * len(self.text)
else:
self.text = ""
self.marked_pos = 0
def handleKey(self, key):
# this will no change anything on the value itself
# so we can handle it here in gui element
if key == KEY_DELETE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
else:
self.deleteChar(self.marked_pos)
if self.fixed_size and self.overwrite:
self.marked_pos += 1
elif key == KEY_BACKSPACE:
self.timeout()
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
elif self.marked_pos > 0:
self.deleteChar(self.marked_pos-1)
if not self.fixed_size and self.offset > 0:
self.offset -= 1
self.marked_pos -= 1
elif key == KEY_LEFT:
self.timeout()
if self.allmarked:
self.marked_pos = len(self.text)
self.allmarked = False
else:
self.marked_pos -= 1
elif key == KEY_RIGHT:
self.timeout()
if self.allmarked:
self.marked_pos = 0
self.allmarked = False
else:
self.marked_pos += 1
elif key == KEY_HOME:
self.timeout()
self.allmarked = False
self.marked_pos = 0
elif key == KEY_END:
self.timeout()
self.allmarked = False
self.marked_pos = len(self.text)
elif key == KEY_TOGGLEOW:
self.timeout()
self.overwrite = not self.overwrite
elif key == KEY_ASCII:
self.timeout()
newChar = unichr(getPrevAsciiCode())
if not self.useableChars or newChar in self.useableChars:
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
elif key in KEY_NUMBERS:
owr = self.lastKey == getKeyNumber(key)
newChar = self.getKey(getKeyNumber(key))
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, owr)
elif key == KEY_TIMEOUT:
self.timeout()
if self.help_window:
self.help_window.update(self)
return
if self.help_window:
self.help_window.update(self)
self.validateMarker()
self.changed()
def nextFunc(self):
self.marked_pos += 1
self.validateMarker()
self.changed()
def getValue(self):
try:
return self.text.encode("utf-8")
except UnicodeDecodeError:
print "Broken UTF8!"
return self.text
def setValue(self, val):
try:
self.text = val.decode("utf-8")
except UnicodeDecodeError:
self.text = val.decode("utf-8", "ignore")
print "Broken UTF8!"
value = property(getValue, setValue)
_value = property(getValue, setValue)
def getText(self):
return self.text.encode("utf-8")
def getMulti(self, selected):
if self.visible_width:
if self.allmarked:
mark = range(0, min(self.visible_width, len(self.text)))
else:
mark = [self.marked_pos-self.offset]
return "mtext"[1-selected:], self.text[self.offset:self.offset+self.visible_width].encode("utf-8")+" ", mark
else:
if self.allmarked:
mark = range(0, len(self.text))
else:
mark = [self.marked_pos]
return "mtext"[1-selected:], self.text.encode("utf-8")+" ", mark
def onSelect(self, session):
self.allmarked = (self.value != "")
if session is not None:
from Screens.NumericalTextInputHelpDialog import NumericalTextInputHelpDialog
self.help_window = session.instantiateDialog(NumericalTextInputHelpDialog, self)
self.help_window.setAnimationMode(0)
self.help_window.show()
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if self.help_window:
session.deleteDialog(self.help_window)
self.help_window = None
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
def getHTML(self, id):
return '<input type="text" name="' + id + '" value="' + self.value + '" /><br>\n'
def unsafeAssign(self, value):
self.value = str(value)
class ConfigPassword(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False, censor = "*"):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
self.censor_char = censor
self.hidden = True
def getMulti(self, selected):
mtext, text, mark = ConfigText.getMulti(self, selected)
if self.hidden:
text = len(text) * self.censor_char
return mtext, text, mark
def onSelect(self, session):
ConfigText.onSelect(self, session)
self.hidden = False
def onDeselect(self, session):
ConfigText.onDeselect(self, session)
self.hidden = True
# lets the user select between [min, min+stepwidth, min+(stepwidth*2)..., maxval] with maxval <= max depending
# on the stepwidth
# min, max, stepwidth, default are int values
# wraparound: pressing RIGHT key at max value brings you to min value and vice versa if set to True
class ConfigSelectionNumber(ConfigSelection):
def __init__(self, min, max, stepwidth, default = None, wraparound = False):
self.wraparound = wraparound
if default is None:
default = min
default = str(default)
choices = []
step = min
while step <= max:
choices.append(str(step))
step += stepwidth
ConfigSelection.__init__(self, choices, default)
def getValue(self):
return int(ConfigSelection.getValue(self))
def setValue(self, val):
ConfigSelection.setValue(self, str(val))
value = property(getValue, setValue)
def getIndex(self):
return self.choices.index(self.value)
index = property(getIndex)
def isChanged(self):
sv = self.saved_value
strv = str(self.tostring(self.value))
if sv is None and strv == str(self.default):
return False
return strv != str(sv)
def handleKey(self, key):
if not self.wraparound:
if key == KEY_RIGHT:
if len(self.choices) == (self.choices.index(str(self.value)) + 1):
return
if key == KEY_LEFT:
if self.choices.index(str(self.value)) == 0:
return
nchoices = len(self.choices)
if nchoices > 1:
i = self.choices.index(str(self.value))
if key == KEY_LEFT:
self.value = self.choices[(i + nchoices - 1) % nchoices]
elif key == KEY_RIGHT:
self.value = self.choices[(i + 1) % nchoices]
elif key == KEY_HOME:
self.value = self.choices[0]
elif key == KEY_END:
self.value = self.choices[nchoices - 1]
class ConfigNumber(ConfigText):
def __init__(self, default = 0):
ConfigText.__init__(self, str(default), fixed_size = False)
def getValue(self):
try:
return int(self.text)
except ValueError:
if self.text == "true":
self.text = "1"
else:
self.text = str(default)
return int(self.text)
def setValue(self, val):
self.text = str(val)
value = property(getValue, setValue)
_value = property(getValue, setValue)
def isChanged(self):
sv = self.saved_value
strv = self.tostring(self.value)
if sv is None and strv == self.default:
return False
return strv != sv
def conform(self):
pos = len(self.text) - self.marked_pos
self.text = self.text.lstrip("0")
if self.text == "":
self.text = "0"
if pos > len(self.text):
self.marked_pos = 0
else:
self.marked_pos = len(self.text) - pos
def handleKey(self, key):
if key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
ascii = getPrevAsciiCode()
if not (48 <= ascii <= 57):
return
else:
ascii = getKeyNumber(key) + 48
newChar = unichr(ascii)
if self.allmarked:
self.deleteAllChars()
self.allmarked = False
self.insertChar(newChar, self.marked_pos, False)
self.marked_pos += 1
else:
ConfigText.handleKey(self, key)
self.conform()
def onSelect(self, session):
self.allmarked = (self.value != "")
def onDeselect(self, session):
self.marked_pos = 0
self.offset = 0
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value
class ConfigSearchText(ConfigText):
def __init__(self, default = "", fixed_size = False, visible_width = False):
ConfigText.__init__(self, default = default, fixed_size = fixed_size, visible_width = visible_width)
NumericalTextInput.__init__(self, nextFunc = self.nextFunc, handleTimeout = False, search = True)
class ConfigDirectory(ConfigText):
def __init__(self, default="", visible_width=60):
ConfigText.__init__(self, default, fixed_size = True, visible_width = visible_width)
def handleKey(self, key):
pass
def getValue(self):
if self.text == "":
return None
else:
return ConfigText.getValue(self)
def setValue(self, val):
if val is None:
val = ""
ConfigText.setValue(self, val)
def getMulti(self, selected):
if self.text == "":
return "mtext"[1-selected:], _("List of storage devices"), range(0)
else:
return ConfigText.getMulti(self, selected)
def onSelect(self, session):
self.allmarked = (self.value != "")
# a slider.
class ConfigSlider(ConfigElement):
def __init__(self, default = 0, increment = 1, limits = (0, 100)):
ConfigElement.__init__(self)
self.value = self.last_value = self.default = default
self.min = limits[0]
self.max = limits[1]
self.increment = increment
def checkValues(self, value = None):
if value is None:
value = self.value
if value < self.min:
value = self.min
elif value > self.max:
value = self.max
if self.value != value: #avoid call of setter if value not changed
self.value = value
def handleKey(self, key):
if key == KEY_LEFT:
tmp = self.value - self.increment
elif key == KEY_RIGHT:
tmp = self.value + self.increment
elif key == KEY_HOME:
self.value = self.min
return
elif key == KEY_END:
self.value = self.max
return
else:
return
self.checkValues(tmp)
def getText(self):
return "%d / %d" % (self.value, self.max)
def getMulti(self, selected):
self.checkValues()
return "slider", self.value, self.max
def fromstring(self, value):
return int(value)
# a satlist. in fact, it's a ConfigSelection.
class ConfigSatlist(ConfigSelection):
def __init__(self, list, default = None):
if default is not None:
default = str(default)
ConfigSelection.__init__(self, choices = [(str(orbpos), desc) for (orbpos, desc, flags) in list], default = default)
def getOrbitalPosition(self):
if self.value == "":
return None
return int(self.value)
orbital_position = property(getOrbitalPosition)
class ConfigSet(ConfigElement):
def __init__(self, choices, default=None):
if not default: default = []
ConfigElement.__init__(self)
if isinstance(choices, list):
choices.sort()
self.choices = choicesList(choices, choicesList.LIST_TYPE_LIST)
else:
assert False, "ConfigSet choices must be a list!"
if default is None:
default = []
self.pos = -1
default.sort()
self.last_value = self.default = default
self.value = default[:]
def toggleChoice(self, choice):
value = self.value
if choice in value:
value.remove(choice)
else:
value.append(choice)
value.sort()
self.changed()
def handleKey(self, key):
if key in KEY_NUMBERS + [KEY_DELETE, KEY_BACKSPACE]:
if self.pos != -1:
self.toggleChoice(self.choices[self.pos])
elif key == KEY_LEFT:
if self.pos < 0:
self.pos = len(self.choices)-1
else:
self.pos -= 1
elif key == KEY_RIGHT:
if self.pos >= len(self.choices)-1:
self.pos = -1
else:
self.pos += 1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def genString(self, lst):
res = ""
for x in lst:
res += self.description[x]+" "
return res
def getText(self):
return self.genString(self.value)
def getMulti(self, selected):
if not selected or self.pos == -1:
return "text", self.genString(self.value)
else:
tmp = self.value[:]
ch = self.choices[self.pos]
mem = ch in self.value
if not mem:
tmp.append(ch)
tmp.sort()
ind = tmp.index(ch)
val1 = self.genString(tmp[:ind])
val2 = " "+self.genString(tmp[ind+1:])
if mem:
chstr = " "+self.description[ch]+" "
else:
chstr = "("+self.description[ch]+")"
len_val1 = len(val1)
return "mtext", val1+chstr+val2, range(len_val1, len_val1 + len(chstr))
def onDeselect(self, session):
self.pos = -1
if not self.last_value == self.value:
self.changedFinal()
self.last_value = self.value[:]
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
description = property(lambda self: descriptionList(self.choices.choices, choicesList.LIST_TYPE_LIST))
class ConfigDictionarySet(ConfigElement):
def __init__(self, default = {}):
ConfigElement.__init__(self)
self.default = default
self.dirs = {}
self.value = self.default
def getKeys(self):
return self.dir_pathes
def setValue(self, value):
if isinstance(value, dict):
self.dirs = value
self.changed()
def getValue(self):
return self.dirs
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
self.dirs = tmp
def changeConfigValue(self, value, config_key, config_value):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs:
self.dirs[value][config_key] = config_value
else:
self.dirs[value] = {config_key : config_value}
self.changed()
def getConfigValue(self, value, config_key):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs and config_key in self.dirs[value]:
return self.dirs[value][config_key]
return None
def removeConfigValue(self, value, config_key):
if isinstance(value, str) and isinstance(config_key, str):
if value in self.dirs and config_key in self.dirs[value]:
try:
del self.dirs[value][config_key]
except KeyError:
pass
self.changed()
def save(self):
del_keys = []
for key in self.dirs:
if not len(self.dirs[key]):
del_keys.append(key)
for del_key in del_keys:
try:
del self.dirs[del_key]
except KeyError:
pass
self.changed()
self.saved_value = self.tostring(self.dirs)
class ConfigLocations(ConfigElement):
def __init__(self, default=None, visible_width=False):
if not default: default = []
ConfigElement.__init__(self)
self.visible_width = visible_width
self.pos = -1
self.default = default
self.locations = []
self.mountpoints = []
self.value = default[:]
def setValue(self, value):
locations = self.locations
loc = [x[0] for x in locations if x[3]]
add = [x for x in value if not x in loc]
diff = add + [x for x in loc if not x in value]
locations = [x for x in locations if not x[0] in diff] + [[x, self.getMountpoint(x), True, True] for x in add]
#locations.sort(key = lambda x: x[0])
self.locations = locations
self.changed()
def getValue(self):
self.checkChangedMountpoints()
locations = self.locations
for x in locations:
x[3] = x[2]
return [x[0] for x in locations if x[3]]
value = property(getValue, setValue)
def tostring(self, value):
return str(value)
def fromstring(self, val):
return eval(val)
def load(self):
sv = self.saved_value
if sv is None:
tmp = self.default
else:
tmp = self.fromstring(sv)
locations = [[x, None, False, False] for x in tmp]
self.refreshMountpoints()
for x in locations:
if fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
self.locations = locations
def save(self):
locations = self.locations
if self.save_disabled or not locations:
self.saved_value = None
else:
self.saved_value = self.tostring([x[0] for x in locations])
def isChanged(self):
sv = self.saved_value
locations = self.locations
if val is None and not locations:
return False
return self.tostring([x[0] for x in locations]) != sv
def addedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = True
elif x[1] is None and fileExists(x[0]):
x[1] = self.getMountpoint(x[0])
x[2] = True
def removedMount(self, mp):
for x in self.locations:
if x[1] == mp:
x[2] = False
def refreshMountpoints(self):
self.mountpoints = [p.mountpoint for p in harddiskmanager.getMountedPartitions() if p.mountpoint != "/"]
self.mountpoints.sort(key = lambda x: -len(x))
def checkChangedMountpoints(self):
oldmounts = self.mountpoints
self.refreshMountpoints()
newmounts = self.mountpoints
if oldmounts == newmounts:
return
for x in oldmounts:
if not x in newmounts:
self.removedMount(x)
for x in newmounts:
if not x in oldmounts:
self.addedMount(x)
def getMountpoint(self, file):
file = os_path.realpath(file)+"/"
for m in self.mountpoints:
if file.startswith(m):
return m
return None
def handleKey(self, key):
if key == KEY_LEFT:
self.pos -= 1
if self.pos < -1:
self.pos = len(self.value)-1
elif key == KEY_RIGHT:
self.pos += 1
if self.pos >= len(self.value):
self.pos = -1
elif key in (KEY_HOME, KEY_END):
self.pos = -1
def getText(self):
return " ".join(self.value)
def getMulti(self, selected):
if not selected:
valstr = " ".join(self.value)
if self.visible_width and len(valstr) > self.visible_width:
return "text", valstr[0:self.visible_width]
else:
return "text", valstr
else:
i = 0
valstr = ""
ind1 = 0
ind2 = 0
for val in self.value:
if i == self.pos:
ind1 = len(valstr)
valstr += str(val)+" "
if i == self.pos:
ind2 = len(valstr)
i += 1
if self.visible_width and len(valstr) > self.visible_width:
if ind1+1 < self.visible_width/2:
off = 0
else:
off = min(ind1+1-self.visible_width/2, len(valstr)-self.visible_width)
return "mtext", valstr[off:off+self.visible_width], range(ind1-off,ind2-off)
else:
return "mtext", valstr, range(ind1,ind2)
def onDeselect(self, session):
self.pos = -1
# nothing.
class ConfigNothing(ConfigSelection):
def __init__(self):
ConfigSelection.__init__(self, choices = [("","")])
# until here, 'saved_value' always had to be a *string*.
# now, in ConfigSubsection, and only there, saved_value
# is a dict, essentially forming a tree.
#
# config.foo.bar=True
# config.foobar=False
#
# turns into:
# config.saved_value == {"foo": {"bar": "True"}, "foobar": "False"}
#
class ConfigSubsectionContent(object):
pass
# we store a backup of the loaded configuration
# data in self.stored_values, to be able to deploy
# them when a new config element will be added,
# so non-default values are instantly available
# A list, for example:
# config.dipswitches = ConfigSubList()
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
# config.dipswitches.append(ConfigYesNo())
class ConfigSubList(list, object):
def __init__(self):
list.__init__(self)
self.stored_values = {}
def save(self):
for x in self:
x.save()
def load(self):
for x in self:
x.load()
def getSavedValue(self):
res = { }
for i, val in enumerate(self):
sv = val.saved_value
if sv is not None:
res[str(i)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.stored_values.items():
if int(key) < len(self):
self[int(key)].saved_value = val
saved_value = property(getSavedValue, setSavedValue)
def append(self, item):
i = str(len(self))
list.append(self, item)
if i in self.stored_values:
item.saved_value = self.stored_values[i]
item.load()
def dict(self):
return dict([(str(index), value) for index, value in enumerate(self)])
# same as ConfigSubList, just as a dictionary.
# care must be taken that the 'key' has a proper
# str() method, because it will be used in the config
# file.
class ConfigSubDict(dict, object):
def __init__(self):
dict.__init__(self)
self.stored_values = {}
def save(self):
for x in self.values():
x.save()
def load(self):
for x in self.values():
x.load()
def getSavedValue(self):
res = {}
for (key, val) in self.items():
sv = val.saved_value
if sv is not None:
res[str(key)] = sv
return res
def setSavedValue(self, values):
self.stored_values = dict(values)
for (key, val) in self.items():
if str(key) in self.stored_values:
val.saved_value = self.stored_values[str(key)]
saved_value = property(getSavedValue, setSavedValue)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if str(key) in self.stored_values:
item.saved_value = self.stored_values[str(key)]
item.load()
def dict(self):
return self
# Like the classes above, just with a more "native"
# syntax.
#
# some evil stuff must be done to allow instant
# loading of added elements. this is why this class
# is so complex.
#
# we need the 'content' because we overwrite
# __setattr__.
# If you don't understand this, try adding
# __setattr__ to a usual exisiting class and you will.
class ConfigSubsection(object):
def __init__(self):
self.__dict__["content"] = ConfigSubsectionContent()
self.content.items = { }
self.content.stored_values = { }
def __setattr__(self, name, value):
if name == "saved_value":
return self.setSavedValue(value)
assert isinstance(value, (ConfigSubsection, ConfigElement, ConfigSubList, ConfigSubDict)), "ConfigSubsections can only store ConfigSubsections, ConfigSubLists, ConfigSubDicts or ConfigElements"
content = self.content
content.items[name] = value
x = content.stored_values.get(name, None)
if x is not None:
#print "ok, now we have a new item,", name, "and have the following value for it:", x
value.saved_value = x
value.load()
def __getattr__(self, name):
return self.content.items[name]
def getSavedValue(self):
res = self.content.stored_values
for (key, val) in self.content.items.items():
sv = val.saved_value
if sv is not None:
res[key] = sv
elif key in res:
del res[key]
return res
def setSavedValue(self, values):
values = dict(values)
self.content.stored_values = values
for (key, val) in self.content.items.items():
value = values.get(key, None)
if value is not None:
val.saved_value = value
saved_value = property(getSavedValue, setSavedValue)
def save(self):
for x in self.content.items.values():
x.save()
def load(self):
for x in self.content.items.values():
x.load()
def dict(self):
return self.content.items
# the root config object, which also can "pickle" (=serialize)
# down the whole config tree.
#
# we try to keep non-existing config entries, to apply them whenever
# a new config entry is added to a subsection
# also, non-existing config entries will be saved, so they won't be
# lost when a config entry disappears.
class Config(ConfigSubsection):
def __init__(self):
ConfigSubsection.__init__(self)
def pickle_this(self, prefix, topickle, result):
for (key, val) in sorted(topickle.items(), key=lambda x: int(x[0]) if x[0].isdigit() else x[0].lower()):
name = '.'.join((prefix, key))
if isinstance(val, dict):
self.pickle_this(name, val, result)
elif isinstance(val, tuple):
result += [name, '=', str(val[0]), '\n']
else:
result += [name, '=', str(val), '\n']
def pickle(self):
result = []
self.pickle_this("config", self.saved_value, result)
return ''.join(result)
def unpickle(self, lines, base_file=True):
tree = { }
configbase = tree.setdefault("config", {})
for l in lines:
if not l or l[0] == '#':
continue
result = l.split('=', 1)
if len(result) != 2:
continue
(name, val) = result
val = val.strip()
#convert old settings
if l.startswith("config.Nims."):
tmp = name.split('.')
if tmp[3] == "cable":
tmp[3] = "dvbc"
elif tmp[3].startswith ("cable"):
tmp[3] = "dvbc." + tmp[3]
elif tmp[3].startswith("terrestrial"):
tmp[3] = "dvbt." + tmp[3]
else:
if tmp[3] not in ('dvbs', 'dvbc', 'dvbt', 'multiType'):
tmp[3] = "dvbs." + tmp[3]
name =".".join(tmp)
names = name.split('.')
base = configbase
for n in names[1:-1]:
base = base.setdefault(n, {})
base[names[-1]] = val
if not base_file: # not the initial config file..
#update config.x.y.value when exist
try:
configEntry = eval(name)
if configEntry is not None:
configEntry.value = val
except (SyntaxError, KeyError):
pass
# we inherit from ConfigSubsection, so ...
#object.__setattr__(self, "saved_value", tree["config"])
if "config" in tree:
self.setSavedValue(tree["config"])
def saveToFile(self, filename):
text = self.pickle()
try:
import os
f = open(filename + ".writing", "w")
f.write(text)
f.flush()
os.fsync(f.fileno())
f.close()
os.rename(filename + ".writing", filename)
except IOError:
print "Config: Couldn't write %s" % filename
def loadFromFile(self, filename, base_file=True):
self.unpickle(open(filename, "r"), base_file)
config = Config()
config.misc = ConfigSubsection()
class ConfigFile:
def __init__(self):
pass
CONFIG_FILE = resolveFilename(SCOPE_CONFIG, "settings")
def load(self):
try:
config.loadFromFile(self.CONFIG_FILE, True)
except IOError, e:
print "unable to load config (%s), assuming defaults..." % str(e)
def save(self):
# config.save()
config.saveToFile(self.CONFIG_FILE)
def __resolveValue(self, pickles, cmap):
key = pickles[0]
if cmap.has_key(key):
if len(pickles) > 1:
return self.__resolveValue(pickles[1:], cmap[key].dict())
else:
return str(cmap[key].value)
return None
def getResolvedKey(self, key):
names = key.split('.')
if len(names) > 1:
if names[0] == "config":
ret=self.__resolveValue(names[1:], config.content.items)
if ret and len(ret):
return ret
print "getResolvedKey", key, "failed !! (Typo??)"
return ""
def NoSave(element):
element.disableSave()
return element
configfile = ConfigFile()
configfile.load()
def getConfigListEntry(*args):
assert len(args) > 1, "getConfigListEntry needs a minimum of two arguments (descr, configElement)"
return args
def updateConfigElement(element, newelement):
newelement.value = element.value
return newelement
#def _(x):
# return x
#
#config.bla = ConfigSubsection()
#config.bla.test = ConfigYesNo()
#config.nim = ConfigSubList()
#config.nim.append(ConfigSubsection())
#config.nim[0].bla = ConfigYesNo()
#config.nim.append(ConfigSubsection())
#config.nim[1].bla = ConfigYesNo()
#config.nim[1].blub = ConfigYesNo()
#config.arg = ConfigSubDict()
#config.arg["Hello"] = ConfigYesNo()
#
#config.arg["Hello"].handleKey(KEY_RIGHT)
#config.arg["Hello"].handleKey(KEY_RIGHT)
#
##config.saved_value
#
##configfile.save()
#config.save()
#print config.pickle()
cec_limits = [(0,15),(0,15),(0,15),(0,15)]
class ConfigCECAddress(ConfigSequence):
def __init__(self, default, auto_jump = False):
ConfigSequence.__init__(self, seperator = ".", limits = cec_limits, default = default)
self.block_len = [len(str(x[1])) for x in self.limits]
self.marked_block = 0
self.overwrite = True
self.auto_jump = auto_jump
def handleKey(self, key):
if key == KEY_LEFT:
if self.marked_block > 0:
self.marked_block -= 1
self.overwrite = True
elif key == KEY_RIGHT:
if self.marked_block < len(self.limits)-1:
self.marked_block += 1
self.overwrite = True
elif key == KEY_HOME:
self.marked_block = 0
self.overwrite = True
elif key == KEY_END:
self.marked_block = len(self.limits)-1
self.overwrite = True
elif key in KEY_NUMBERS or key == KEY_ASCII:
if key == KEY_ASCII:
code = getPrevAsciiCode()
if code < 48 or code > 57:
return
number = code - 48
else:
number = getKeyNumber(key)
oldvalue = self._value[self.marked_block]
if self.overwrite:
self._value[self.marked_block] = number
self.overwrite = False
else:
oldvalue *= 10
newvalue = oldvalue + number
if self.auto_jump and newvalue > self.limits[self.marked_block][1] and self.marked_block < len(self.limits)-1:
self.handleKey(KEY_RIGHT)
self.handleKey(key)
return
else:
self._value[self.marked_block] = newvalue
if len(str(self._value[self.marked_block])) >= self.block_len[self.marked_block]:
self.handleKey(KEY_RIGHT)
self.validate()
self.changed()
def genText(self):
value = ""
block_strlen = []
for i in self._value:
block_strlen.append(len(str(i)))
if value:
value += self.seperator
value += str(i)
leftPos = sum(block_strlen[:self.marked_block])+self.marked_block
rightPos = sum(block_strlen[:(self.marked_block+1)])+self.marked_block
mBlock = range(leftPos, rightPos)
return value, mBlock
def getMulti(self, selected):
(value, mBlock) = self.genText()
if self.enabled:
return "mtext"[1-selected:], value, mBlock
else:
return "text", value
def getHTML(self, id):
# we definitely don't want leading zeros
return '.'.join(["%d" % d for d in self.value])
class ConfigAction(ConfigElement):
def __init__(self, action, *args):
ConfigElement.__init__(self)
self.value = "(OK)"
self.action = action
self.actionargs = args
def handleKey(self, key):
if (key == KEY_OK):
self.action(*self.actionargs)
def getMulti(self, dummy):
pass
| Open-Plus/opgui | lib/python/Components/config.py | Python | gpl-2.0 | 57,531 |
# -*- coding: utf-8 -*-
"""
Calculate a position from an IP or Address and save it into a file.
Usage:
geolocation.py [(-v | --verbose)] [--no-wait] (-m | --me)
geolocation.py [(-v | --verbose)] (-a | --address) [(-r | --remove)] [(-p | --previous-city)] <address>
geolocation.py [(-v | --verbose)] (-s | --symlinks)
geolocation.py [(-v | --verbose)] --activate
geolocation.py [(-v | --verbose)] --deactivate
geolocation.py (-h | --help)
geolocation.py --version
Options:
<address> Address to be calculated.
-r --remove Remove this point from the json file.
-p --previous-city Save the calculated point into the previous cities file.
-s --symlinks Create symlinks in output directory to upload on deploy.
-h --help Show this screen.
-v --verbose Show the log in the standard output.
--version Show version.
"""
import collections
import json
import logging
import os
import time
import webbrowser
from logging.handlers import RotatingFileHandler
import configobj # fades
import geocoder # fades
from docopt import docopt # fades
logger = logging.getLogger('geolocation')
DIRNAME = os.path.dirname(os.path.abspath(__file__))
SYMLINKS_DIR = os.path.join(DIRNAME, 'output/assets/data')
GPX_FILES = [
os.path.join(DIRNAME, 'geodata/0-etapa.gpx'),
os.path.join(DIRNAME, 'geodata/primera-etapa.gpx'),
os.path.join(DIRNAME, 'geodata/segunda-etapa.gpx'),
os.path.join(DIRNAME, 'geodata/tercera-etapa.gpx'),
]
CITIES_FILENAME = os.path.join(DIRNAME, 'geodata/cities.json')
MY_POSITION_FILENAME = os.path.join(DIRNAME, 'geodata/my-position.json')
SYMLINK_FILES = [
CITIES_FILENAME,
MY_POSITION_FILENAME,
] + GPX_FILES
WAIT_BEFORE_QUERY = 5
MAP_ZOOM = 14
CONF_FILE = os.path.join('~', '.geolocation.ini')
config = configobj.ConfigObj(
infile=CONF_FILE,
encoding='utf-8',
)
def setup_logging(verbose):
logfile = os.path.join(DIRNAME, 'geodata/geolocation.log')
handler = RotatingFileHandler(logfile, maxBytes=1e6, backupCount=10, encoding='utf-8')
logger.addHandler(handler)
formatter = logging.Formatter("%(asctime)s %(name)-10s "
"%(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
if verbose:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
def save_json(data, output):
logger.info('Saving file...')
with open(output, 'w') as fh:
data = json.dumps(
data,
indent=4,
sort_keys=True
)
fh.write(data)
def load_json(output):
data = open(output, 'r').read()
cities = json.loads(
data,
object_pairs_hook=collections.OrderedDict
)
return cities
def setup_output(output):
dirname = os.path.dirname(output)
if not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.exists(output):
# touch file with an empty dict
init = {
'next': [],
'previous': [],
}
# TODO: add 'etapa' info so we can use the same data in
# "Argentina en Python" page
# {
# 'first': [],
# 'second': [],
# 'third': [],
# },
# }
save_json(init, output)
def osmurl_invalid():
logger.info('The URL is not correct. Quitting...')
def is_osmurl_valid(response):
url = 'http://www.openstreetmap.org/#map={zoom}/{lat}/{lng}'.format(
zoom=MAP_ZOOM,
lat=response.lat,
lng=response.lng,
)
logger.info('OSMUrl: %s', url)
answer = None
webbrowser.open_new_tab(url)
while answer not in ('y', 'yes', 'n', 'no'):
answer = input('Is this URL correct?\n {}\n[y/n]: '.format(url))
if answer in ('y', 'yes'):
return True
return False
def create_symlinks(dirname=SYMLINKS_DIR):
if not os.path.exists(dirname):
logger.info('Creating directory: %s', dirname)
os.makedirs(dirname)
def get_output_path(filename):
return os.path.join(
dirname,
os.path.basename(filename)
)
def get_abs_path(filename):
return os.path.abspath(filename)
for filename in SYMLINK_FILES:
destination = get_output_path(filename)
if not os.path.exists(destination):
source = get_abs_path(filename)
logger.info('Creating symlink: %s', destination)
os.symlink(source, destination)
def calc_my_position_ip(output=MY_POSITION_FILENAME):
setup_output(output)
logger.info('Waiting %s seconds...', WAIT_BEFORE_QUERY)
time.sleep(WAIT_BEFORE_QUERY)
logger.info('Querying the server about my ip...')
response = geocoder.ip('me')
logger.info('LatLng: %s', response.latlng)
logger.info('Place: %s', response.address)
return calc_my_position_address(response.address, output)
def calc_my_position_address(address, output, upload=True):
logger.info('Querying the server about "%s"...', address)
response = geocoder.osm(address)
logger.info('LatLng: %s', response.latlng)
logger.info('Place: %s', response.address)
if upload:
save_json(response.latlng, output)
upload_my_position()
return response
def upload_my_position():
command = ' '.join([
'runuser',
'-l',
'humitos',
'-c',
'"scp',
MY_POSITION_FILENAME,
'elblogdehumitos.com:~/apps/argentinaenpython.com.ar/assets/data/"',
])
logger.debug(command)
logger.info('Uploading new "my-position.json" file...')
os.system(command)
logger.info('Upload Finished!')
def calc_address(address, when, output=CITIES_FILENAME):
setup_output(output)
logger.info('Querying the server for: "%s" ...', address)
response = geocoder.osm(address)
logger.info('Got an answer!')
if not is_osmurl_valid(response):
osmurl_invalid()
return
logger.info('Loading old cities from: %s', output)
cities = load_json(output)
osm_id = response.json['osm_id']
last_osm_id = None
if cities[when]:
last_osm_id = cities[when][-1]['osm_id']
if not cities[when] or osm_id != last_osm_id:
logger.info('Adding new city: "%s"', response.address)
cities[when].append(response.json)
save_json(cities, output)
else:
logger.info('This city is already saved. Excluding...')
return response
def remove_address(address, output=CITIES_FILENAME):
logger.info('Querying the server for: "%s" ...', address)
response = geocoder.osm(address)
logger.info('Got an answer!')
logger.info('Place: %s', response.address)
logger.info('Loading old cities from: %s', output)
cities = load_json(output)
removed = False
next_cities = cities['next']
for city in next_cities:
if city['osm_id'] == response.json['osm_id']:
logger.info('City: %s removed!', city['address'])
next_cities.remove(city)
removed = True
break
if removed:
save_json(cities, output)
else:
logger.info('City not found!')
return response
def activate(activate=True):
config['activated'] = activate
config.write()
def deactivate():
activate(False)
if __name__ == '__main__':
arguments = docopt(__doc__, version='Geolocation 0.1')
setup_logging(arguments['--verbose'])
if arguments['--no-wait']:
WAIT_BEFORE_QUERY = 0
if arguments['--activate']:
activate()
elif arguments['--deactivate']:
deactivate()
if arguments['-a'] or arguments['--address']:
q = arguments['<address>'] # .decode('utf8')
if arguments['--remove']:
response = remove_address(q)
else:
if arguments['--previous-city']:
when = 'previous'
else:
when = 'next'
calc_address(q, when)
if arguments['-m'] or arguments['--me']:
calc_my_position_ip()
if arguments['--symlinks']:
create_symlinks()
create_symlinks()
logger.info('Finished!')
| humitos/argentinaenpython.com | web/geolocation.py | Python | gpl-2.0 | 8,271 |
#!/usr/bin/env python3.7
import asyncio
import iterm2
# This script was created with the "basic" environment which does not support adding dependencies
# with pip.
async def main(connection):
# This is an example of using an asyncio context manager to register a custom control
# sequence. You can send a custom control sequence by issuing this command in a
# terminal session in iTerm2 while this script is running:
#
# printf "\033]1337;Custom=id=%s:%s\a" "shared-secret" "create-window"
async with iterm2.CustomControlSequenceMonitor(connection, "shared-secret", r'^create-window$') as mon:
while True:
match = await mon.async_get()
await iterm2.Window.async_create(connection)
# This instructs the script to run the "main" coroutine and to keep running even after it returns.
iterm2.run_forever(main)
| lgouger/iTerm2 | sources/template_basic_daemon.py | Python | gpl-2.0 | 864 |
#!/bin/python
import xbmcgui,xbmc,os,sys
xbmc.executebuiltin('XBMC.RunPlugin(plugin://plugin.video.iptvxtra-de/?zapsidemenu)') | noba3/KoTos | addons/plugin.video.streamnetwork/resources/lib/zapsidemenu.py | Python | gpl-2.0 | 126 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from buildbot.status.web.base import ActionResource
from buildbot.status.web.base import HtmlResource
from buildbot.status.web.base import path_to_authfail
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implements
from buildbot.process.users import users
class IAuth(Interface):
"""
Represent an authentication method.
Note that each IAuth instance contains a link to the BuildMaster that
will be set once the IAuth instance is initialized.
"""
master = Attribute('master', "Link to BuildMaster, set when initialized")
def authenticate(self, user, passwd):
"""Check whether C{user} / C{passwd} are valid."""
def getUserInfo(self, user):
"""return dict with user info.
dict( fullName="", email="", groups=[])
"""
def errmsg(self):
"""Get the reason authentication failed."""
class AuthBase:
master = None # set in status.web.baseweb
err = ""
def errmsg(self):
return self.err
def getUserInfo(self, user):
"""default dummy impl"""
return dict(userName=user, fullName=user, email=user + "@localhost", groups=[user])
class BasicAuth(AuthBase):
implements(IAuth)
"""Implement basic authentication against a list of user/passwd."""
userpass = []
"""List of user/pass tuples."""
def __init__(self, userpass):
"""C{userpass} is a list of (user, passwd)."""
for item in userpass:
assert isinstance(item, tuple) or isinstance(item, list)
u, p = item
assert isinstance(u, str)
assert isinstance(p, str)
self.userpass = userpass
def authenticate(self, user, passwd):
"""Check that C{user}/C{passwd} is a valid user/pass tuple."""
if not self.userpass:
self.err = "Bad self.userpass data"
return False
for u, p in self.userpass:
if user == u and passwd == p:
self.err = ""
return True
self.err = "Invalid username or password"
return False
class HTPasswdAuth(AuthBase):
implements(IAuth)
"""Implement authentication against an .htpasswd file."""
file = ""
"""Path to the .htpasswd file to use."""
def __init__(self, file):
"""C{file} is a path to an .htpasswd file."""
assert os.path.exists(file)
self.file = file
def authenticate(self, user, passwd):
"""Authenticate C{user} and C{passwd} against an .htpasswd file"""
if not os.path.exists(self.file):
self.err = "No such file: " + self.file
return False
# Fetch each line from the .htpasswd file and split it into a
# [user, passwd] array.
lines = [l.rstrip().split(':', 1)
for l in file(self.file).readlines()]
# Keep only the line for this login
lines = [l for l in lines if l[0] == user]
if not lines:
self.err = "Invalid user/passwd"
return False
hash = lines[0][1]
res = self.validatePassword(passwd, hash)
if res:
self.err = ""
else:
self.err = "Invalid user/passwd"
return res
def validatePassword(self, passwd, hash):
# This is the DES-hash of the password. The first two characters are
# the salt used to introduce disorder in the DES algorithm.
from crypt import crypt # @UnresolvedImport
return hash == crypt(passwd, hash[0:2])
class HTPasswdAprAuth(HTPasswdAuth):
implements(IAuth)
"""Implement authentication against an .htpasswd file based on libaprutil"""
file = ""
"""Path to the .htpasswd file to use."""
def __init__(self, file):
HTPasswdAuth.__init__(self, file)
# Try to load libaprutil throug ctypes
self.apr = None
try:
from ctypes import CDLL
from ctypes.util import find_library
lib = find_library("aprutil-1")
if lib:
self.apr = CDLL(lib)
except:
self.apr = None
def validatePassword(self, passwd, hash):
# Use apr_password_validate from libaprutil if libaprutil is available.
# Fallback to DES only checking from HTPasswdAuth
if self.apr:
return self.apr.apr_password_validate(passwd, hash) == 0
else:
return HTPasswdAuth.validatePassword(self, passwd, hash)
class UsersAuth(AuthBase):
"""Implement authentication against users in database"""
implements(IAuth)
def authenticate(self, user, passwd):
"""
It checks for a matching uid in the database for the credentials
and return True if a match is found, False otherwise.
@param user: username portion of user credentials
@type user: string
@param passwd: password portion of user credentials
@type passwd: string
@returns: boolean via deferred.
"""
d = self.master.db.users.getUserByUsername(user)
def check_creds(user):
if user:
if users.check_passwd(passwd, user['bb_password']):
return True
self.err = "no user found with those credentials"
return False
d.addCallback(check_creds)
return d
class AuthFailResource(HtmlResource):
pageTitle = "Authentication Failed"
def content(self, request, cxt):
templates = request.site.buildbot_service.templates
template = templates.get_template("authfail.html")
return template.render(**cxt)
class AuthzFailResource(HtmlResource):
pageTitle = "Authorization Failed"
def content(self, request, cxt):
templates = request.site.buildbot_service.templates
template = templates.get_template("authzfail.html")
return template.render(**cxt)
class LoginResource(ActionResource):
def performAction(self, request):
authz = self.getAuthz(request)
d = authz.login(request)
def on_login(res):
if res:
status = request.site.buildbot_service.master.status
root = status.getBuildbotURL()
return request.requestHeaders.getRawHeaders('referer',
[root])[0]
else:
return path_to_authfail(request)
d.addBoth(on_login)
return d
class LogoutResource(ActionResource):
def performAction(self, request):
authz = self.getAuthz(request)
authz.logout(request)
status = request.site.buildbot_service.master.status
root = status.getBuildbotURL()
return request.requestHeaders.getRawHeaders('referer', [root])[0]
| mitya57/debian-buildbot | buildbot/status/web/auth.py | Python | gpl-2.0 | 7,563 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Community Module Forms"""
from __future__ import absolute_import
from invenio.base.i18n import _
from invenio.utils.forms import InvenioForm as Form, InvenioBaseForm
from wtforms import TextField, \
TextAreaField, \
HiddenField, \
validators
from .models import Community
#
# Form
#
class SearchForm(Form):
"""
Search Form
"""
p = TextField(
validators=[validators.required()]
)
class CommunityForm(Form):
"""
Community form.
"""
field_sets = [
('Information', [
'identifier', 'title', 'description', 'curation_policy',
'page'
], {'classes': 'in'}),
]
field_placeholders = {
}
field_state_mapping = {
}
#
# Methods
#
def get_field_icon(self, name):
return self.field_icons.get(name, '')
def get_field_by_name(self, name):
try:
return self._fields[name]
except KeyError:
return None
def get_field_placeholder(self, name):
return self.field_placeholders.get(name, "")
def get_field_state_mapping(self, field):
try:
return self.field_state_mapping[field.short_name]
except KeyError:
return None
def has_field_state_mapping(self, field):
return field.short_name in self.field_state_mapping
def has_autocomplete(self, field):
return hasattr(field, 'autocomplete')
#
# Fields
#
identifier = TextField(
label=_('Identifier'),
description='Required. Only letters, numbers and dash are allowed. The identifier is used in the URL for the community collection, and cannot be modified later.',
validators=[validators.required(), validators.length(max=100, message="The identifier must be less than 100 characters long."), validators.regexp(u'^[-\w]+$', message='Only letters, numbers and dash are allowed')]
)
title = TextField(
description='Required.',
validators=[validators.required()]
)
description = TextAreaField(
description='Optional. A short description of the community collection, which will be displayed on the index page of the community.',
)
curation_policy = TextAreaField(
description='Optional. Please describe short and precise the policy by which you accepted/reject new uploads in this community.',
)
page = TextAreaField(
description='Optional. A long description of the community collection, which will be displayed on a separate page linked from the index page.',
)
field_icons = {
'identifier': 'barcode',
'title': 'file-alt',
'description': 'pencil',
'curation_policy': 'check',
}
#
# Validation
#
def validate_identifier(self, field):
if field.data:
field.data = field.data.lower()
if Community.query.filter_by(id=field.data).first():
raise validators.ValidationError("The identifier already exists. Please choose a different one.")
class EditCommunityForm(CommunityForm):
"""
Same as collection form, except identifier is removed.
"""
identifier = None
class DeleteCommunityForm(InvenioBaseForm):
"""
Form to confirm deletion of a collection:
"""
delete = HiddenField(default='yes', validators=[validators.required()])
| PXke/invenio | invenio/modules/communities/forms.py | Python | gpl-2.0 | 4,175 |
# Base folder support
# Copyright (C) 2002 John Goerzen
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from threading import *
from offlineimap import threadutil
from offlineimap.threadutil import InstanceLimitedThread
from offlineimap.ui import UIBase
import os.path, re
class BaseFolder:
def __init__(self):
self.uidlock = Lock()
def getname(self):
"""Returns name"""
return self.name
def suggeststhreads(self):
"""Returns true if this folder suggests using threads for actions;
false otherwise. Probably only IMAP will return true."""
return 0
def waitforthread(self):
"""For threading folders, waits until there is a resource available
before firing off a thread. For all others, returns immediately."""
pass
def getcopyinstancelimit(self):
"""For threading folders, returns the instancelimitname for
InstanceLimitedThreads."""
raise NotImplementedException
def storesmessages(self):
"""Should be true for any backend that actually saves message bodies.
(Almost all of them). False for the LocalStatus backend. Saves
us from having to slurp up messages just for localstatus purposes."""
return 1
def getvisiblename(self):
return self.name
def getrepository(self):
"""Returns the repository object that this folder is within."""
return self.repository
def getroot(self):
"""Returns the root of the folder, in a folder-specific fashion."""
return self.root
def getsep(self):
"""Returns the separator for this folder type."""
return self.sep
def getfullname(self):
if self.getroot():
return self.getroot() + self.getsep() + self.getname()
else:
return self.getname()
def getfolderbasename(self):
foldername = self.getname()
foldername = foldername.replace(self.repository.getsep(), '.')
foldername = re.sub('/\.$', '/dot', foldername)
foldername = re.sub('^\.$', 'dot', foldername)
return foldername
def isuidvalidityok(self):
if self.getsaveduidvalidity() != None:
return self.getsaveduidvalidity() == self.getuidvalidity()
else:
self.saveuidvalidity()
return 1
def _getuidfilename(self):
return os.path.join(self.repository.getuiddir(),
self.getfolderbasename())
def getsaveduidvalidity(self):
if hasattr(self, '_base_saved_uidvalidity'):
return self._base_saved_uidvalidity
uidfilename = self._getuidfilename()
if not os.path.exists(uidfilename):
self._base_saved_uidvalidity = None
else:
file = open(uidfilename, "rt")
self._base_saved_uidvalidity = long(file.readline().strip())
file.close()
return self._base_saved_uidvalidity
def saveuidvalidity(self):
newval = self.getuidvalidity()
uidfilename = self._getuidfilename()
self.uidlock.acquire()
try:
file = open(uidfilename + ".tmp", "wt")
file.write("%d\n" % newval)
file.close()
os.rename(uidfilename + ".tmp", uidfilename)
self._base_saved_uidvalidity = newval
finally:
self.uidlock.release()
def getuidvalidity(self):
raise NotImplementedException
def cachemessagelist(self):
"""Reads the message list from disk or network and stores it in
memory for later use. This list will not be re-read from disk or
memory unless this function is called again."""
raise NotImplementedException
def getmessagelist(self):
"""Gets the current message list.
You must call cachemessagelist() before calling this function!"""
raise NotImplementedException
def getmessage(self, uid):
"""Returns the content of the specified message."""
raise NotImplementedException
def savemessage(self, uid, content, flags, rtime):
"""Writes a new message, with the specified uid.
If the uid is < 0, the backend should assign a new uid and return it.
If the backend cannot assign a new uid, it returns the uid passed in
WITHOUT saving the message.
If the backend CAN assign a new uid, but cannot find out what this UID
is (as is the case with many IMAP servers), it returns 0 but DOES save
the message.
IMAP backend should be the only one that can assign a new uid.
If the uid is > 0, the backend should set the uid to this, if it can.
If it cannot set the uid to that, it will save it anyway.
It will return the uid assigned in any case.
"""
raise NotImplementedException
def getmessagetime(self, uid):
"""Return the received time for the specified message."""
raise NotImplementedException
def getmessageflags(self, uid):
"""Returns the flags for the specified message."""
raise NotImplementedException
def savemessageflags(self, uid, flags):
"""Sets the specified message's flags to the given set."""
raise NotImplementedException
def addmessageflags(self, uid, flags):
"""Adds the specified flags to the message's flag set. If a given
flag is already present, it will not be duplicated."""
newflags = self.getmessageflags(uid)
for flag in flags:
if not flag in newflags:
newflags.append(flag)
newflags.sort()
self.savemessageflags(uid, newflags)
def addmessagesflags(self, uidlist, flags):
for uid in uidlist:
self.addmessageflags(uid, flags)
def deletemessageflags(self, uid, flags):
"""Removes each flag given from the message's flag set. If a given
flag is already removed, no action will be taken for that flag."""
newflags = self.getmessageflags(uid)
for flag in flags:
if flag in newflags:
newflags.remove(flag)
newflags.sort()
self.savemessageflags(uid, newflags)
def deletemessagesflags(self, uidlist, flags):
for uid in uidlist:
self.deletemessageflags(uid, flags)
def deletemessage(self, uid):
raise NotImplementedException
def deletemessages(self, uidlist):
for uid in uidlist:
self.deletemessage(uid)
def syncmessagesto_neguid_msg(self, uid, dest, applyto, register = 1):
if register:
UIBase.getglobalui().registerthread(self.getaccountname())
UIBase.getglobalui().copyingmessage(uid, self, applyto)
successobject = None
successuid = None
message = self.getmessage(uid)
flags = self.getmessageflags(uid)
rtime = self.getmessagetime(uid)
for tryappend in applyto:
successuid = tryappend.savemessage(uid, message, flags, rtime)
if successuid >= 0:
successobject = tryappend
break
# Did we succeed?
if successobject != None:
if successuid: # Only if IMAP actually assigned a UID
# Copy the message to the other remote servers.
for appendserver in \
[x for x in applyto if x != successobject]:
appendserver.savemessage(successuid, message, flags, rtime)
# Copy to its new name on the local server and delete
# the one without a UID.
self.savemessage(successuid, message, flags, rtime)
self.deletemessage(uid) # It'll be re-downloaded.
else:
# Did not find any server to take this message. Ignore.
pass
def syncmessagesto_neguid(self, dest, applyto):
"""Pass 1 of folder synchronization.
Look for messages in self with a negative uid. These are messages in
Maildirs that were not added by us. Try to add them to the dests,
and once that succeeds, get the UID, add it to the others for real,
add it to local for real, and delete the fake one."""
uidlist = [uid for uid in self.getmessagelist().keys() if uid < 0]
threads = []
usethread = None
if applyto != None:
usethread = applyto[0]
for uid in uidlist:
if usethread and usethread.suggeststhreads():
usethread.waitforthread()
thread = InstanceLimitedThread(\
usethread.getcopyinstancelimit(),
target = self.syncmessagesto_neguid_msg,
name = "New msg sync from %s" % self.getvisiblename(),
args = (uid, dest, applyto))
thread.setDaemon(1)
thread.start()
threads.append(thread)
else:
self.syncmessagesto_neguid_msg(uid, dest, applyto, register = 0)
for thread in threads:
thread.join()
def copymessageto(self, uid, applyto, register = 1):
# Sometimes, it could be the case that if a sync takes awhile,
# a message might be deleted from the maildir before it can be
# synced to the status cache. This is only a problem with
# self.getmessage(). So, don't call self.getmessage unless
# really needed.
if register:
UIBase.getglobalui().registerthread(self.getaccountname())
UIBase.getglobalui().copyingmessage(uid, self, applyto)
message = ''
# If any of the destinations actually stores the message body,
# load it up.
for object in applyto:
if object.storesmessages():
message = self.getmessage(uid)
break
flags = self.getmessageflags(uid)
rtime = self.getmessagetime(uid)
for object in applyto:
newuid = object.savemessage(uid, message, flags, rtime)
if newuid > 0 and newuid != uid:
# Change the local uid.
self.savemessage(newuid, message, flags, rtime)
self.deletemessage(uid)
uid = newuid
def syncmessagesto_copy(self, dest, applyto):
"""Pass 2 of folder synchronization.
Look for messages present in self but not in dest. If any, add
them to dest."""
threads = []
dest_messagelist = dest.getmessagelist()
for uid in self.getmessagelist().keys():
if uid < 0: # Ignore messages that pass 1 missed.
continue
if not uid in dest_messagelist:
if self.suggeststhreads():
self.waitforthread()
thread = InstanceLimitedThread(\
self.getcopyinstancelimit(),
target = self.copymessageto,
name = "Copy message %d from %s" % (uid,
self.getvisiblename()),
args = (uid, applyto))
thread.setDaemon(1)
thread.start()
threads.append(thread)
else:
self.copymessageto(uid, applyto, register = 0)
for thread in threads:
thread.join()
def syncmessagesto_delete(self, dest, applyto):
"""Pass 3 of folder synchronization.
Look for message present in dest but not in self.
If any, delete them."""
deletelist = []
self_messagelist = self.getmessagelist()
for uid in dest.getmessagelist().keys():
if uid < 0:
continue
if not uid in self_messagelist:
deletelist.append(uid)
if len(deletelist):
UIBase.getglobalui().deletingmessages(deletelist, applyto)
for object in applyto:
object.deletemessages(deletelist)
def syncmessagesto_flags(self, dest, applyto):
"""Pass 4 of folder synchronization.
Look for any flag matching issues -- set dest message to have the
same flags that we have."""
# As an optimization over previous versions, we store up which flags
# are being used for an add or a delete. For each flag, we store
# a list of uids to which it should be added. Then, we can call
# addmessagesflags() to apply them in bulk, rather than one
# call per message as before. This should result in some significant
# performance improvements.
addflaglist = {}
delflaglist = {}
for uid in self.getmessagelist().keys():
if uid < 0: # Ignore messages missed by pass 1
continue
selfflags = self.getmessageflags(uid)
destflags = dest.getmessageflags(uid)
addflags = [x for x in selfflags if x not in destflags]
for flag in addflags:
if not flag in addflaglist:
addflaglist[flag] = []
addflaglist[flag].append(uid)
delflags = [x for x in destflags if x not in selfflags]
for flag in delflags:
if not flag in delflaglist:
delflaglist[flag] = []
delflaglist[flag].append(uid)
for object in applyto:
for flag in addflaglist.keys():
UIBase.getglobalui().addingflags(addflaglist[flag], flag, [object])
object.addmessagesflags(addflaglist[flag], [flag])
for flag in delflaglist.keys():
UIBase.getglobalui().deletingflags(delflaglist[flag], flag, [object])
object.deletemessagesflags(delflaglist[flag], [flag])
def syncmessagesto(self, dest, applyto = None):
"""Syncs messages in this folder to the destination.
If applyto is specified, it should be a list of folders (don't forget
to include dest!) to which all write actions should be applied.
It defaults to [dest] if not specified. It is important that
the UID generator be listed first in applyto; that is, the other
applyto ones should be the ones that "copy" the main action."""
if applyto == None:
applyto = [dest]
self.syncmessagesto_neguid(dest, applyto)
self.syncmessagesto_copy(dest, applyto)
self.syncmessagesto_delete(dest, applyto)
# Now, the message lists should be identical wrt the uids present.
# (except for potential negative uids that couldn't be placed
# anywhere)
self.syncmessagesto_flags(dest, applyto)
| brong/brong-offlineimap | offlineimap/folder/Base.py | Python | gpl-2.0 | 15,637 |
#!/bin/python
# -*- coding: utf-8 -*-
# Author: Pavel Studenik
# Email: [email protected]
# Date: 24.9.2013
import json
import logging
import os
import re
from django.utils import timezone
from datetime import datetime, timedelta
from urlparse import urlparse
from xml.dom.minidom import parseString
import git
from dateutil.parser import parse
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.template import Context, Template
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from elasticsearch import Elasticsearch
from taggit.managers import TaggableManager
from apps.core.signals import recipe_changed, recipe_finished
from apps.taskomatic.models import TaskPeriod, TaskPeriodSchedule
from validators import validator_dir_exists
from apps.core.utils.date_helpers import toUTC
logger = logging.getLogger("main")
UNKNOW = 0
ABOART = 1
WAIT = 2
WARN = 3
FAIL = 4
PASS = 5
NEW = 6
CANCEL = 7
SCHEDULED = 8
PANIC = 9
FAILINSTALL = 10
SKIP = 11
RESULT_CHOICES = (
(UNKNOW, "unknow"),
(ABOART, "aborted"),
(CANCEL, "cancelled"),
(WAIT, "waiting"),
(SCHEDULED, "scheduled"),
(NEW, "new"),
(WARN, "warn"),
(WARN, "warning"),
(FAIL, "fail"),
(PASS, "pass"),
(PANIC, "panic"),
(FAILINSTALL, "failinstall"),
(SKIP, "skip"),
)
NONE = 0
WAIVED = 11
USERSTATUS_CHOICES = (
(NONE, u"none"),
(WAIVED, u"waived"),
)
RETURN = 0
RETURNWHENGREEN = 1
RESERVED = 2
EVENT_FINISH_ENUM = (
(RETURN, "return"),
(RETURNWHENGREEN, "return when ok"),
(RESERVED, "reserve system")
)
def render_label(data, rule):
rule = "{%% load core_extras %%}%s" % rule
template = Template(rule)
context = Context(data)
# print "%s - %s" % ( data, rule)
return template.render(context)
class EnumResult:
UNKNOW = 0
ABOART = 1
WAIT = 2
WARN = 3
FAIL = 4
PASS = 5
NEW = 6
CANCEL = 7
SCHEDULED = 8
PANIC = 9
FAILINSTALL = 10
SKIP = 11
def __init__(self):
self.enums = dict(RESULT_CHOICES)
def _get(self, value):
if isinstance(value, int):
return self.enums.get(value)
elif isinstance(value, str):
return [it[0] for it in self.enums.items() if it[1] == value.lower()][0]
@staticmethod
def choices():
er = EnumResult()
return er.enums.items()
@staticmethod
def get(value):
er = EnumResult()
return er._get(value)
@staticmethod
def broken_list():
return [EnumResult.FAIL, EnumResult.FAILINSTALL, EnumResult.ABOART, EnumResult.PANIC, EnumResult.WARN]
class Arch(models.Model):
name = models.CharField(max_length=32, unique=True)
def __unicode__(self):
return self.name
class Distro(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return self.name
class ObjParams(object):
def get_params(self):
params = {}
for it in self.params.split("\n"):
if not it:
continue
try:
n, v = it.strip().split("=", 1)
params.update({n: v})
except ValueError:
logger.warning("valid params: %s" % self.params.split("\n"))
return params
def clean(self):
self.params = self.params.strip()
try:
self.get_params()
except ValueError:
raise ValidationError("Non-valid notation, please use a=b")
class AppTag(models.Model):
title = models.CharField(max_length=256)
package = models.CharField(max_length=256, blank=True, null=True)
def __unicode__(self):
return "%s" % self.title
def get_absolute_url(self):
return "?app=%s" % self.title
def count(self):
return self.tests.all().count()
class Bug(models.Model):
uid = models.IntegerField()
title = models.CharField(max_length=256)
status = models.CharField(max_length=16)
def __unicode__(self):
return "bz%d %s" % (self.uid, self.title)
def updateFromBugzilla(self):
pass
class Meta:
ordering = ("uid",)
class Git(models.Model):
name = models.CharField(max_length=64, blank=True, null=True)
localurl = models.CharField(_("Viewer"), max_length=255)
url = models.CharField(max_length=255, unique=True)
path = models.CharField(max_length=255, blank=True, null=True,
help_text="Only local directory file:///mnt/git/..", validators=[validator_dir_exists])
path_absolute = None
cmd = None
log = None
__groups = None
__cache_tests_path = None
__cache_tests_name = None
def __unicode__(self):
return self.name
@staticmethod
def getGitFromFolder(folder):
"""
Create/load Git object based git repository (folder)
"""
cmd = git.cmd.Git(folder)
url = re.sub(r'.*(@|//)', '', cmd.config('remote.origin.url')).strip()
name = os.path.basename(folder)
oGit, new = Git.objects.get_or_create(url=url, defaults={
"name": name
})
oGit.path_absolute = folder
if not new and oGit.name != name:
oGit.name = name
oGit.save()
return oGit
def refresh(self):
"""
Refresh (git pull) repository
"""
git = self.__getGitCmd()
# git.fetch()
git.reset('--hard', 'HEAD')
counter = 5
try:
git.pull()
return
except:
if counter == 0:
self.__getLog().warning("Problem with pulling of "
"the repository '%s'" % self.name)
return
counter -= 1
def __load_test_cache(self):
tests = Test.objects.filter(git=self)\
.select_related('owner')\
.exclude(Q(folder__isnull=True) | Q(folder__exact=''))\
.order_by('folder')
self.__cache_tests_path = \
{row.folder: row for row in tests}
self.__cache_tests_name = {row.name: row for row in tests}
return list(tests)
def getTestFromPath(self, path):
if self.__cache_tests_path is None:
self.__load_test_cache()
return self.__cache_tests_path.get(path)
def getTestFromName(self, name):
if self.__cache_tests_name is None:
self.__load_test_cache()
return self.__cache_tests_name.get(name)
def getTestFromNameRegEx(self, reg):
if self.__cache_tests_name is None:
self.__load_test_cache()
for name in self.__cache_tests_name:
if reg.match(name):
return self.__cache_tests_name.get(name)
def updateInformationsAboutTests(self):
"""
Update informations about tests from Makefiles.
"""
def row2list(items):
data = []
if type(items) is not list:
items = [items, ]
for item in items:
for it in item.split():
data.append(it)
return set(data)
old_tests = self.__load_test_cache()
number_of_tests = len(old_tests)
git = self.__getGitCmd()
# git ls-files --full-name *Makefile
mkFiles = git.ls_files('--full-name', '*Makefile')
for mkFile in mkFiles.splitlines():
folder = os.path.dirname(mkFile)
info = self.__parseMakefile("%s/%s" % (self.path_absolute, mkFile))
name = None
if 'Name' not in info:
self.__getLog()\
.warning("The test '%s:%s' doesn't contain the 'Name' "
"attribute in Makefile" % (self.name, folder))
continue
else:
name = re.sub('\s+.*', '', info.get('Name'))
test = self.getTestFromPath(folder)
if test is None:
test = self.getTestFromName(name)
if test is not None:
# The test has bad folder
test.folder = folder
self.__getLog()\
.warning("The folder '%s:%s' and attribute Name '%s' "
"of test are inconsistent" %
(self.name, folder, name))
new = False
if test is None:
test = self.__try_to_get_removed_test(folder)
if test is not None:
# The test was move to new place
test.folder = folder
else:
# This test is really new
new = True
test = Test(name=name, git=self, folder=folder)
test.owner = Author.parseAuthor(info.get('Owner'))
# test.save()
self.__getLog()\
.info("The new test '%s' was find in folder '%s:%s'."
% (name, self.name, folder))
if not new:
if test in old_tests:
old_tests.remove(test)
# This is old
owner = Author.passiveParseAuthor(info.get('Owner'))
if test.owner.email != owner.get('email', test.owner.email):
# changed owner
test.owner = Author.parseAuthor(info.get('Owner'))
test.name = name
if 'Description' in info and \
test.description != info.get('Description'):
test.description = info.get('Description')
if 'TestTime' in info and test.time != info.get('TestTime'):
test.time = info.get('TestTime')
if 'Type' in info and test.type != info.get('Type'):
test.type = info.get('Type')
test.save()
if 'RunFor' in info:
self.__updateGroups(test, info.get('RunFor'))
if 'RunApp' in info:
apps = row2list(info.get('RunApp'))
if apps != set([it.title for it in test.apps.all()]):
test.apps.clear()
for it in apps:
app, status = AppTag.objects.get_or_create(title=it)
test.apps.add(app)
if 'Bug' in info:
bugs = row2list(info.get('Bug'))
if bugs != set(["%s" % it.uid for it in test.bugs.all()]):
test.bugs.clear()
for it in bugs:
bug, status = Bug.objects.get_or_create(uid=it)
test.bugs.add(bug)
self.__updateDependences(test, info.get('RhtsRequires'))
test.save()
# deactivate deleted tests
if len(old_tests) > 0.5 * number_of_tests:
self.__getLog().warning(
"Probably is there something wrong with repo '%s'!!!\n"
"We want tu deactivate more then 50% of tests. Skipped."
% self.name)
else:
for test in old_tests:
if test.is_enable:
test.is_enable = False
test.save()
self.__getLog().info("The test '%s:%s' was disabled."
% (self.name, test.name))
# Check all new commits in this git repo
self.__check_history()
def __getGitCmd(self):
if not self.path_absolute:
raise Exception("Missing the absolute path to repository '%s'" %
self.name)
if not self.cmd:
self.cmd = git.cmd.Git(self.path_absolute)
return self.cmd
def __getLog(self):
if not self.log:
self.log = logging.getLogger()
return self.log
def __getVariables(self, rows):
lex = dict()
for row in rows:
rr = re.match(r"^(export\s+)?([A-Za-z0-9_]+)=\"?([^#]*)\"?(#.*)?$",
row)
if rr:
lex[rr.group(2)] = re.sub(r'\$\(?([A-Za-z0-9_]+)\)?',
lambda mo: lex.get(mo.group(1), ''),
rr.group(3)).strip()
return lex
def __getMakefileInfo(self, rows, lex):
info = dict()
for row in rows:
rr = re.match(r"^\s*@echo\s+\"([A-Za-z0-9_]+):\s+([^\"]*)\".*$",
row)
if rr:
key = rr.group(1)
val = rr.group(2)
if key in info:
if not isinstance(info[key], list):
oval = info[key]
info[key] = list()
info[key].append(oval)
info[key].append(val)
else:
info[key] = re.sub(r'\$\(?([A-Za-z0-9_]+)\)?',
lambda mo: lex.get(mo.group(1), ''),
val).strip()
return info
def __parseMakefile(self, mkfile):
rows = list()
with open(mkfile) as fd:
rows = fd.readlines()
return self.__getMakefileInfo(rows, self.__getVariables(rows))
def __updateGroups(self, test, row):
if isinstance(row, str):
row = row.split()
if not self.__groups:
self.__groups = {it.name.lower():
it for it in GroupOwner.objects.all()}
new_grups = list()
for it in row:
if it not in self.__groups:
# self.__getLog().warning(
# "This group '%s' in test '%s' doesn't exist." %
# (it, test.folder))
continue
new_grups.append(self.__groups.get(it))
# Remove unsupported groups
for group in test.groups.all():
if group not in new_grups:
test.groups.remove(group)
self.__getLog().debug(
"Removed the old group '%s' from test '%s'."
% (group.name, test.name))
else:
new_grups.remove(group)
# Add new groups
for group in new_grups:
test.groups.add(group)
self.__getLog().debug(
"Added a new group '%s' to test '%s'."
% (group.name, test.name))
def __updateDependences(self, test, rows):
if isinstance(rows, str):
rows = rows.split()
if not rows:
rows = list()
dep_old = list(test.dependencies.all())
dep_new = list()
for row in rows:
depName = re.sub(r'(test\(|\))', '', row)
if depName == row:
# This dependence is not a test, it is probably classic package
continue
depName = depName.strip()
depTest = self.getTestFromName(depName)
if depTest is None:
# This dependence is probably from different repo
depTest = Test.objects.filter(name__endswith=depName)\
.only('name', 'git', 'is_enable')[:1]
if len(depTest) == 0:
# This dependence does not exist
self.__getLog().warning(
"This test '%s' has got a non-existing dependence '%s'"
% (test.name, depName))
continue
else:
depTest = depTest[0]
dep_new.append(depTest)
# Removing old/unsupported dependencies
for dep in dep_old:
if dep not in dep_new:
test.dependencies.remove(dep)
self.__getLog().debug(
"Removed the old dependence '%s' from test '%s'."
% (dep.name, test.name))
else:
dep_new.remove(dep)
# Adding new dependencies
for dep in dep_new:
if dep == test:
self.__getLog().warning(
"This test '%s' has got as a dependence itself."
% test.name)
continue
if dep not in dep_old:
if not dep.is_enable:
self.__getLog().warning(
"This dependence '%s' of the test '%s' is disabled."
% (dep.name, test.name))
continue
self.__getLog().debug(
"Added a new dependence '%s' to test '%s'."
% (dep.name, test.name))
test.dependencies.add(dep)
def __try_to_get_removed_test(self, folder):
"""
This method try to find the test fromwhere was test moved.
"""
git = self.__getGitCmd()
# git log --follow --summary -M -- '*/Makefile'
checkDays = int(settings.CHECK_COMMMITS_PREVIOUS_DAYS)
if not checkDays:
checkDays = 1
output = git.log('--since=%s.days' % checkDays,
'--summary', '-M', '--name-status',
'--pretty=%H',
'--follow', 'HEAD',
'%s/Makefile' % folder)
res = re.search(
r'.*R\d+\s+(?P<to>[^\s]+)/Makefile\s+(?P<from>[^\s]+)/Makefile.*',
output, re.M)
if res:
return self.getTestFromPath(res.group('from'))
def __check_history(self):
git = self.__getGitCmd()
checkDays = int(settings.CHECK_COMMMITS_PREVIOUS_DAYS)
if not checkDays:
checkDays = 1
# git log --decorate=full --since=1 --simplify-by-decoration /
# --pretty=%H|%aN|%ae|%ai|%d --follow HEAD <folder>
rows = git.log('--decorate=full',
'--since=%s.days' % checkDays,
'--simplify-by-decoration',
'--pretty=%H|%aN|%ae|%ai|%d',
'--follow', 'HEAD',
".").split('\n')
for row in rows:
if len(row.strip()) > 0:
self.__saveCommits(row)
def __saveCommits(self, row):
# 1731d5af22c22469fa7b181d1e33cd52731619a0|Jiri Mikulka|
# [email protected]|2013-01-31 17:45:06 +0100|
# (tag: RHN-Satellite-CoreOS-RHN-Satellite-Other-Sanity-spacewalk-
# create-channel-1_0-2)
try:
chash, name, email, date, tag = row.split('|')
except:
self.__getLog().error("Bad format of commit log in repo '%s':\n%s"
% (self.name, row))
return
if tag:
author, status = Author.objects\
.get_or_create(email=email, defaults={"name": name})
res = re.findall(r'tag:\s+([^\s]+?)-([0-9\_\-]+)[^0-9\_\-]', row)
for it in res:
tagName, version = it
testName = "/%s" % tagName.replace('-', '.')
test = self.getTestFromNameRegEx(re.compile(testName))
if test is None:
self.__getLog().warning(
"This test '%s' was not found." % testName)
continue
data = dict()
data['author'] = author
data['date'] = toUTC(date)
data['version'] = version
commit, status = TestHistory.objects\
.get_or_create(commit=chash, test=test, defaults=data)
if status:
self.__getLog().debug("Added new commit for test '%s'."
% test.name)
def get_count(self):
return len(Test.objects.filter(git__id=self.id))
def save(self, *args, **kwargs):
for key in ("url", "localurl"):
# remove last char "/" in url
url = getattr(self, key)
if url.endswith("/"):
url = url[:-1]
setattr(self, key, url)
super(Git, self).save(*args, **kwargs)
class Author(models.Model):
DEFAULT_AUTHOR = ("Unknown", "[email protected]")
name = models.CharField(max_length=255, unique=False,
default=DEFAULT_AUTHOR[0], db_index=True)
email = models.EmailField(default=DEFAULT_AUTHOR[1], db_index=True)
is_enabled = models.BooleanField(default=True)
def __unicode__(self):
return "%s <%s>" % (self.name, self.email)
@staticmethod
def FromUser(user):
if user.is_anonymous():
return None
try:
return Author.objects.get(email=user.email)
except Author.DoesNotExist:
return None
@staticmethod
def passiveParseAuthor(row):
"""
Parse author from line "name <[email protected]>"
return dict
"""
rr = re.search(r"((?P<name>[^@<]*)(\s|$)\s*)?"
r"<?((?P<email>[A-z0-9_\.\+]+"
r"@[A-z0-9_\.]+\.[A-z]{2,3}))?>?", row)
if rr is None:
return None
res = dict()
# Parse owner
if rr.group('name'):
res['name'] = rr.group('name').strip()
if rr.group('email'):
res['email'] = rr.group('email').strip()
return res
@staticmethod
def parseAuthor(row):
"""
Parse author from line "name <[email protected]>"
return object
"""
res = Author.passiveParseAuthor(row)
if res is not None:
auths = Author.objects.filter(**res)
if len(auths) > 0:
return auths[0]
else:
res = dict()
email = res.get('email', Author.DEFAULT_AUTHOR[1])
name = res.get('name', Author.DEFAULT_AUTHOR[0])
owner, status = Author.objects\
.get_or_create(email=email, defaults={'name': name})
return owner
class GroupOwner(models.Model):
name = models.CharField(max_length=255, unique=True)
owners = models.ManyToManyField(Author, blank=True)
email_notification = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ["name", ]
class Test(models.Model):
name = models.CharField(max_length=255, unique=True, db_index=True)
git = models.ForeignKey(Git, blank=True, null=True, db_index=True)
owner = models.ForeignKey(Author, blank=True, null=True, db_index=True)
description = models.TextField(blank=True, null=True)
external_links = models.TextField(
blank=True, null=True,
help_text="external links which separated by ';'")
dependencies = models.ManyToManyField("Test", blank=True)
time = models.CharField(max_length=6, blank=True, null=True)
type = models.CharField(max_length=32, blank=True, null=True)
folder = models.CharField(
max_length=256, blank=True, null=True, db_index=True)
is_enable = models.BooleanField("enable", default=True, db_index=True)
groups = models.ManyToManyField(GroupOwner, blank=True)
bugs = models.ManyToManyField(Bug, blank=True)
apps = models.ManyToManyField(AppTag, blank=True, related_name="tests")
class Meta:
ordering = ["-is_enable", "name"]
def __unicode__(self):
return self.name
def __eq__(self, other):
# if not isinstance(other, self.__class__):
# return False
if not isinstance(other, models.Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
if self.id and other.id:
return self.id == other.id
return self.name == other.name and self.git == other.git
def get_absolute_url(self):
return "%s?test_id=%s" % (reverse("tests-list"), self.id)
def get_detail_url(self):
return "%s" % reverse("test-detail", args=[self.id])
def get_reposituory_url(self):
if not self.git:
return None
return settings.GIT_TREE_PARRENT % (self.git.localurl, self.folder)
def delete(self, *args, **kwargs):
# not possible to remove test
# dependencies on old runs
self.is_enable = False
self.save()
def save(self, *args, **kwargs):
if not self.owner:
self.owner = Author.parseAuthor("")
return super(Test, self).save(*args, **kwargs)
def get_external_links(self):
if not self.external_links:
return []
return [it.strip() for it in self.external_links.split() if it]
class TestHistory(models.Model):
test = models.ForeignKey(Test)
version = models.CharField(max_length=24, null=True)
date = models.DateTimeField()
author = models.ForeignKey(Author, null=True)
commit = models.CharField(max_length=64, null=True)
def __unicode__(self):
return "%s %s" % (self.commit, self.date)
class Meta:
verbose_name = _("history of test")
verbose_name_plural = _("history of tests")
def get_absolute_url(self):
# FIXME maybe create url from db record
# for example: return self.test.git.url % self.commit
default_str = "%s/commitdiff/%s"
if hasattr(settings, "EXTERNAL_GIT_VIEWER"):
default_str = settings.EXTERNAL_GIT_VIEWER
return default_str % (self.test.git.localurl, self.commit)
class System(models.Model):
hostname = models.CharField(max_length=255, blank=True, db_index=True)
ram = models.IntegerField(null=True, blank=True)
cpu = models.CharField(max_length=255, blank=True)
hdd = models.CharField(max_length=255, blank=True)
parent = models.ForeignKey("System", null=True, blank=True)
group = models.SmallIntegerField(null=True, blank=True)
def __unicode__(self):
return self.hostname
class JobTemplate(models.Model):
whiteboard = models.CharField(max_length=255, unique=True)
is_enable = models.BooleanField(default=False)
event_finish = models.SmallIntegerField(
choices=EVENT_FINISH_ENUM, default=RETURN)
schedule = models.ForeignKey(TaskPeriod, null=True, blank=True)
position = models.SmallIntegerField(default=0)
grouprecipes = models.CharField(
max_length=255, null=False, blank=True, default="{{ whiteboard }}",
help_text="example: {{arch}} {{whiteboard|nostartsdate}}")
tags = TaggableManager(blank=True)
group = settings.BEAKER_JOB_GROUP
is_set_recipe = False
def __unicode__(self):
return self.whiteboard
def save(self, *args, **kwargs):
model = self.__class__
if self.position is None:
# Append
try:
last = model.objects.order_by("period", "-position")[0]
self.position = last.position + 1
except IndexError:
# First row
self.position = 0
return super(model, self).save(*args, **kwargs)
class Meta:
ordering = ('schedule', 'position',)
@models.permalink
def get_absolute_url(self):
return ("beaker-xml", [self.id])
def admin_url(self):
content_type = ContentType.objects.get_for_model(self.__class__)
return reverse("admin:%s_%s_change" % (
content_type.app_label, content_type.model), args=(self.id,))
def is_return(self):
return (self.event_finish == RETURN)
def get_tags(self):
return ", ".join([it.name for it in self.tags.all()])
def clone(self):
recipes = list(self.trecipes.all())
tags = self.tags.all()
self.pk = None
label = "Clone %s" % self.whiteboard
tmp_label = "%s" % label
for it in range(100):
if len(JobTemplate.objects.filter(whiteboard=label)) > 0:
label = "%s v. %s" % (tmp_label, it)
continue
break
self.whiteboard = label
self.save()
for recipe in recipes:
recipe.clone(self)
for it in tags:
self.tags.add(str(it))
def check_set_recipe(self):
"""
The method checks parameters for beaker's recipe. If recipes contain
same hostname then the recipe has to be used in own recipeSet.
The property is set as attribute 'is_set_recipe'.
"""
hostnames = []
for it in self.trecipes.all():
if not it.hostname:
continue
if it.hostname in hostnames:
self.is_set_recipe = True
return True
hostnames.append(it.hostname)
return False
class DistroTemplate(models.Model):
name = models.CharField(max_length=255, blank=True, help_text="Only alias")
family = models.CharField(max_length=255, blank=True, null=True)
variant = models.CharField(max_length=255, blank=True, null=True)
distroname = models.CharField(max_length=255, blank=True, null=True,
help_text="If field is empty, then it will use latest compose.")
def __unicode__(self):
return self.name
class Meta:
ordering = ('name', 'distroname',)
def save(self, *args, **kwargs):
model = self.__class__
if not self.name.strip():
self.name = "%s %s %s" % (
self.family, self.variant, self.distroname)
return super(model, self).save(*args, **kwargs)
def tpljobs_counter(self):
return RecipeTemplate.objects.filter(distro=self).count()
def get_op(self):
if not self.distroname.find("%") < 0:
return "like"
return "="
class Repository(models.Model):
name = models.CharField(max_length=255, blank=True)
url = models.URLField(unique=True)
def __unicode__(self):
return "%s" % self.name
class Meta:
ordering = ('name', 'url')
verbose_name = _("repository")
verbose_name_plural = _("repositories")
class RecipeTemplate(models.Model, ObjParams):
NONE, RECIPE_MEMBERS, STANDALONE = 0, 1, 2
ROLE_ENUM = (
(NONE, "None"),
(RECIPE_MEMBERS, "RECIPE_MEMBERS"),
(STANDALONE, "STANDALONE"),
)
jobtemplate = models.ForeignKey(JobTemplate, related_name="trecipes")
name = models.CharField(max_length=255, blank=True)
kernel_options = models.CharField(max_length=255, blank=True)
kernel_options_post = models.CharField(max_length=255, blank=True)
ks_meta = models.CharField(max_length=255, blank=True)
role = models.SmallIntegerField(choices=ROLE_ENUM, default=NONE)
arch = models.ManyToManyField(Arch)
memory = models.CharField(max_length=255, blank=True)
disk = models.CharField(
max_length=255, blank=True, help_text="Value is in GB")
hostname = models.CharField(
max_length=255, blank=True,
help_text="Set to '= system42.beaker.example.com' if you want your recipe to run on exactly this system")
labcontroller = models.CharField(
max_length=255, blank=True, help_text="= hostname.lab.example.com")
hvm = models.BooleanField(_("Support virtualizaion"), default=False)
params = models.TextField(_("Extra XML parameter"), blank=True)
# generator = models.TextField(_("Recipe generator"), blank=True, help_text="rules for generating recipes")
packages = models.CharField(
_("Extra packages"), max_length=256, blank=True,
help_text="Separate by white space. For example: vim xen")
distro = models.ForeignKey(DistroTemplate)
is_virtualguest = models.BooleanField(default=False)
virtualhost = models.ForeignKey("RecipeTemplate", null=True, blank=True,
related_name="virtualguests")
schedule = models.CharField(
_("Schedule period"), max_length=255, blank=True,
help_text="For example: s390x: 0,2,4; x86_64: 1,3,5,6")
external_repos = models.ManyToManyField(Repository, blank=True)
def __unicode__(self):
name = self.name
if not self.name:
name = "(empty)"
return "%s - %s" % (self.id, name)
def set_role(self, value):
try:
self.role = [it[0] for it in self.ROLE_ENUM if value == it[1]][0]
except IndexError:
print "VALUE %s isn't possible to set as ROLE" % value
def get_role(self):
return [it[1] for it in self.ROLE_ENUM if self.role == it[0]][0]
def get_arch(self):
return self.arch.all()
def archs(self):
return ", ".join([it.name for it in self.get_arch()])
def get_extra_packages(self):
default_packages = list(settings.BEAKER_DEFAULT_PACKAGES)
return self.packages.split() + default_packages
def get_extra_repos(self):
return self.external_repos.all()
def get_tasks(self):
return self.tasks.filter(test__is_enable=True).select_related(
"test").order_by("priority")
# TODO: Remove Arch rotation
# This solution of rotation of Arch is not good idea.
# Better idea is TasksList.
def getArchsForToday(self):
"""
Return list of architecures for today
"""
# Weekday as a decimal number [0(Sunday),6].
weekday = int(timezone.now().strftime("%w"))
# archs = [it.name for it in self.arch.all()]
schedule = self.__parse_schedule_period(self.schedule)
res = list()
for it in schedule:
if ((it[2] == weekday and it[1]) or
(it[2] != weekday and not it[1])):
res.append(Arch.objects.get(name=it[0]))
if res:
return res
# if empty return all archs
return self.get_arch()
def parse(self, st):
return self.__parse_schedule_period(st)
def __parse_schedule_period(self, string):
# i386: 1; s390x: 2; x86_64: 3; i386: 4; x86_64: 5,6,0
# x86_64: !5; i386: 5
if not string:
return []
data = []
for it in string.split(";"):
if not it.strip():
continue
try:
key, val = it.split(":")
except ValueError:
raise ValueError("Parse error: %s" % it)
val = val.strip()
op = True
if val.startswith("!"):
# negation - complement [0-6] for example
# !1 - run every day expect monday
val = val[1:]
op = False
vals = [it.strip() for it in val.split(",")]
key = key.strip()
for val in vals:
if not val:
continue
data.append((key, op, int(val.strip())))
return data
def save(self, *args, **kwargs):
self.__parse_schedule_period(self.schedule)
super(self.__class__, self).save(*args, **kwargs)
class Meta:
ordering = ('name',)
def is_return(self):
return self.jobtemplate.is_return()
def is_reserve(self):
return not self.is_return()
def is_enabled(self):
return self.jobtemplate.is_enable
def clone(self, jobtemplate=None):
groups = self.grouptemplates.all()
tasks = self.tasks.all()
archs = self.arch.all()
self.pk = None
if jobtemplate:
self.jobtemplate = jobtemplate
self.save()
for arch in archs:
self.arch.add(arch)
for it in groups:
it.pk = None
it.recipe = self
it.save()
for it in tasks:
it.pk = None
it.recipe = self
it.save()
class TaskRoleEnum(models.Model):
name = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
class GroupTemplate(models.Model):
name = models.CharField(max_length=64)
description = models.TextField(blank=True, null=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
def clone(self):
tests = self.grouptests.all()
group = GroupTemplate(self.__dict__)
group.pk = None
group.name = "Clone %s" % self.name
group.save()
for test in tests:
test.pk = None
test.group = group
test.save()
class GroupTaskTemplate(ObjParams, models.Model):
group = models.ForeignKey(GroupTemplate, related_name="grouptasks")
recipe = models.ForeignKey(RecipeTemplate, related_name="grouptemplates")
params = models.TextField(blank=True)
priority = models.SmallIntegerField(default=0)
role = models.ForeignKey(TaskRoleEnum, null=True, blank=True)
def __unicode__(self):
return self.group.name
class Meta:
ordering = ('priority',)
class GroupTestTemplate(ObjParams, models.Model):
test = models.ForeignKey(Test)
group = models.ForeignKey(GroupTemplate, related_name="grouptests")
params = models.TextField(blank=True)
priority = models.SmallIntegerField(default=0)
role = models.ForeignKey(TaskRoleEnum, null=True, blank=True)
def __unicode__(self):
return self.test.name
def get_role(self):
if self.role:
return self.role.name
class Meta:
ordering = ('priority',)
class TaskTemplate(ObjParams, models.Model):
BEGIN, PRE_GROUP, POST_GROUP, END = 0, 1, 2, 3
ENUM_POSTION = (
(BEGIN, "Begin"),
(PRE_GROUP, "Pre"),
(POST_GROUP, "Post"),
(END, "End"), )
test = models.ForeignKey(Test)
recipe = models.ForeignKey(RecipeTemplate, related_name="tasks")
params = models.TextField(blank=True)
priority = models.SmallIntegerField(default=0)
role = models.ForeignKey(TaskRoleEnum, null=True, blank=True)
position = models.SmallIntegerField(
default=POST_GROUP, choices=ENUM_POSTION)
def __unicode__(self):
return self.test.name
def get_role(self):
if self.role:
return self.role.name
def set_role(self, value):
if value in ["None", ""]:
self.role = None
else:
self.role, status = TaskRoleEnum.objects.get_or_create(name=value)
self.save()
class Job(models.Model):
template = models.ForeignKey(JobTemplate)
uid = models.CharField("Job ID", max_length=12, unique=True)
date = models.DateTimeField(auto_now_add=True, db_index=True)
schedule = models.ForeignKey(TaskPeriodSchedule, null=True, blank=True)
is_running = models.BooleanField(default=False)
# this is for checking (no used for data from beaker)
is_finished = models.BooleanField(default=False)
def __unicode__(self):
return self.uid
def get_uid(self):
return self.uid.replace("J:","")
def get_url_beaker(self):
return "%s/%s/" % (settings.BEAKER_SERVER, self.uid)
def get_original_job(self):
Job.objects.filter(schedule=self.schedule, template=self.template,
uid__gt=self.uid).order_by('uid')[:1]
class Recipe(models.Model):
UNKNOW = 0
RUNNING = 1
COMPLETED = 2
WAITING = 3
QUEUED = 4
ABORTED = 5
CANCELLED = 6
NEW = 7
SCHEDULED = 8
PROCESSED = 9
RESERVED = 10
INSTALLING = 11
STATUS_CHOICES = (
(UNKNOW, u"Unknow"),
(NEW, u"New"),
(SCHEDULED, u"Scheduled"),
(RUNNING, u"Running"),
(COMPLETED, u"Completed"),
(WAITING, u"Waiting"),
(QUEUED, u"Queued"),
(ABORTED, u"Aborted"),
(CANCELLED, u"Cancelled"),
(PROCESSED, u"Processed"),
(RESERVED, u"Reserved"),
(INSTALLING, u"Installing")
)
job = models.ForeignKey(Job, related_name="recipes")
uid = models.CharField("Recipe ID", max_length=12, unique=True)
whiteboard = models.CharField(
"Whiteboard",
max_length=256,
blank=True,
null=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES, default=UNKNOW)
result = models.SmallIntegerField(
choices=EnumResult.choices(), default=UNKNOW)
resultrate = models.FloatField(default=-1.)
system = models.ForeignKey(System,)
arch = models.ForeignKey(Arch,)
distro = models.ForeignKey(Distro,)
parentrecipe = models.ForeignKey("Recipe", null=True, blank=True)
statusbyuser = models.SmallIntegerField(
choices=USERSTATUS_CHOICES, default=NONE)
def __unicode__(self):
return self.uid
def get_template(self):
return self.job.template
def set_result(self, value):
try:
self.result = \
[it[0] for it in RESULT_CHOICES if it[1] == value.lower()][0]
except IndexError:
logger.error("IndexError: result %s %s %s" %
(value, self.result, EnumResult.choices()))
def get_uid(self):
return self.uid.replace("R:", "")
def get_result(self):
if self.statusbyuser == WAIVED:
return [it[1] for it in USERSTATUS_CHOICES if it[0] == WAIVED][0]
return [it[1] for it in RESULT_CHOICES if it[0] == self.result][0]
def set_status(self, value):
status = self.status
try:
self.status = [it[0]
for it in self.STATUS_CHOICES if it[1] == value][0]
except IndexError:
logger.error("IndexError: status '%s' (actual status: %s) %s" %
(value, self.status, self.STATUS_CHOICES))
return
if status != self.status:
if self.status in (self.COMPLETED, self.ABORTED,
self.RESERVED, self.CANCELLED):
recipe_finished.send(sender="models:Recipe", recipe=self)
else:
recipe_changed.send(sender="models:Recipe", recipe=self)
def get_status(self):
try:
return [it[1]
for it in self.STATUS_CHOICES if it[0] == self.status][0]
except IndexError:
return "uknow-%s" % self.status
def set_waived(self):
self.statusbyuser = WAIVED
self.save()
def recount_result(self):
# result = Task.objects.values('result', "statusbyuser").filter(
# recipe=self).annotate(Count('result')).order_by("uid")
total, total_ok, waived = 0, 0, False
running = None
failed_test = []
i = 0
for it in Task.objects.filter(recipe=self).order_by("uid"):
if i == 0 and it.result in [EnumResult.FAIL, EnumResult.WARN, EnumResult.ABOART]:
self.result = FAILINSTALL
# self.save()
i += 1
if it.result in [EnumResult.PASS, EnumResult.SKIP] or it.statusbyuser == WAIVED:
total_ok += 1
total += 1
if it.statusbyuser == WAIVED:
waived = True
if it.result in [EnumResult.WARN, EnumResult.FAIL] and it.statusbyuser != WAIVED:
failed_test.append(it)
if it.result == NEW and not running:
running = it
if waived:
if failed_test:
self.result = failed_test[0].result
else:
self.result = EnumResult.PASS
if running and running.test.name == settings.RESERVE_TEST \
and total_ok + 1 == total:
self.set_waived()
if total != 0:
self.resultrate = total_ok * 100. / total
else:
self.resultrate = 0
if waived and total_ok == total:
self.set_waived()
def get_date(self):
return self.job.date
def get_distro_label(self):
dn = self.distro.name
raw = dn.split("-")
if len(raw) > 2:
return "-".join(raw[:-1])
else:
return dn
def get_dict(self):
return {
"arch": self.arch.name,
"distro": self.distro.name,
"distro_label": self.get_distro_label(),
"whiteboard": self.whiteboard,
}
def get_label(self):
return render_label(self.get_dict(), self.job.template.grouprecipes)
def is_running(self):
# this makes about 1000 requests into DB, I think it is not necessary here.
# self.recount_result()
return (self.status in (self.RUNNING, self.RESERVED))
def is_result_pass(self):
return (EnumResult.PASS == self.result)
def reschedule(self):
# TODO
pass
class Task(models.Model):
uid = models.CharField("Task ID", max_length=12, unique=True)
recipe = models.ForeignKey(Recipe, related_name="tasks")
test = models.ForeignKey(Test)
result = models.SmallIntegerField(choices=RESULT_CHOICES,
default=UNKNOW, db_index=True)
status = models.SmallIntegerField(
choices=Recipe.STATUS_CHOICES, default=UNKNOW)
duration = models.FloatField(default=-1.)
datestart = models.DateTimeField(null=True, blank=True)
statusbyuser = models.SmallIntegerField(
choices=USERSTATUS_CHOICES, default=NONE)
alias = models.CharField(max_length=32, blank=True, null=True)
def __unicode__(self):
return self.uid
def logfiles(self):
return list(FileLog.objects.filter(task=self).values("path"))
def set_result(self, value):
try:
self.result = [it[0]
for it in RESULT_CHOICES if it[1] == value.lower()][0]
except IndexError:
logger.error("IndexError: Task result %s %s %s" %
(value, self.result, RESULT_CHOICES))
def get_result(self):
if self.statusbyuser == WAIVED:
return [it[1] for it in USERSTATUS_CHOICES if it[0] == WAIVED][0]
return [it[1] for it in RESULT_CHOICES if it[0] == self.result][0]
def set_status(self, value):
try:
self.status = [it[0]
for it in Recipe.STATUS_CHOICES if it[1].lower() == value.lower()][0]
except IndexError:
logger.error("IndexError: Task status %s %s %s" %
(value, self.status, Recipe.STATUS_CHOICES))
def is_completed(self):
return (self.status == Recipe.COMPLETED)
def set_waived(self):
self.statusbyuser = WAIVED
self.save()
self.recipe.recount_result()
self.recipe.save()
def get_uid(self):
return self.uid.replace("T:", "")
class PhaseLabel(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return self.name
class PhaseResult(models.Model):
task = models.ForeignKey(Task)
phase = models.ForeignKey(PhaseLabel)
duration = models.FloatField()
date = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return self.phase
class SkippedPhase(models.Model):
id_task = models.IntegerField()
id_phase = models.IntegerField()
class CheckProgress(models.Model):
ARCHIVE_LAST_CHECKS = 1000
datestart = models.DateTimeField(default=timezone.now)
dateend = models.DateTimeField(null=True, blank=True)
totalsum = models.IntegerField()
actual = models.IntegerField(default=0)
class Meta:
verbose_name = _("Check progress")
verbose_name_plural = _("Check progress")
def __unicode__(self):
return "%s" % self.datestart
def counter(self):
self.actual += 1
self.save()
def percent(self):
if self.totalsum == 0:
return None
return int(self.actual * 100 / self.totalsum)
def finished(self):
self.dateend = timezone.now()
self.save()
def get_duration(self):
if self.dateend:
return (self.dateend - self.datestart)
@staticmethod
def Restore():
for it in CheckProgress.objects.filter(dateend=None):
it.dateend = timezone.now()
it.save()
@staticmethod
def Clean():
for it in CheckProgress.objects.order_by("-id")[CheckProgress.ARCHIVE_LAST_CHECKS:]:
it.delete()
@staticmethod
def IsRunning():
for it in CheckProgress.objects.filter(dateend=None).order_by("-datestart"):
duration = (timezone.now() - it.datestart)
if duration.days * 24 + duration.seconds / (60. * 60) > 1:
it.dateend = timezone.now()
it.save()
else:
return True
return False
class Event(models.Model):
"""Universal object for craating notification for users"""
ALERT_SUCCESS = 0
ALERT_INFO = 1
ALERT_WARNING = 2
ALERT_DANGER = 3
ENUM_ALERT = (
(ALERT_SUCCESS, "success"),
(ALERT_INFO, "info"),
(ALERT_WARNING, "warning"),
(ALERT_DANGER, "danger"),
)
title = models.CharField(max_length=126)
url = models.CharField(max_length=256)
description = models.TextField()
alert = models.SmallIntegerField(
choices=ENUM_ALERT, default=ALERT_INFO)
is_enabled = models.BooleanField(default=True)
datestart = models.DateTimeField(default=timezone.now)
dateend = models.DateTimeField(default=timezone.now, null=True, blank=True)
def __unicode__(self):
return "%s" % (self.title)
def get_alert(self):
return filter(lambda x: x[0] == self.alert, self.ENUM_ALERT)[0][1]
class FileLog(models.Model):
recipe = models.ForeignKey(Recipe)
task = models.ForeignKey(Task, blank=True, null=True,
related_name="logfiles")
url = models.CharField(max_length=256, unique=True)
path = models.CharField(max_length=256, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
index_id = models.CharField(max_length=126, blank=True, null=True)
is_downloaded = models.BooleanField(_("File is downlaod"), default=False)
is_indexed = models.BooleanField(_("File is indexed"), default=False)
to_removed = models.BooleanField(default=False)
status_code = models.SmallIntegerField(default=0)
logger = logging.getLogger("indexing")
def __unicode__(self):
return "%s" % self.path
def absolute_path(self):
return os.path.realpath(
os.path.join(settings.STORAGE_ROOT, "./%s" % self.path))
def get_absolute_url(self):
return "%s%s" % (settings.STORAGE_URL, self.path)
def delete(self, *args, **kwargs):
def clean_dir(path):
if path is None:
self.logger.warning("This file is not download %s" % self.id)
return
path_dir = os.path.dirname(path)
if path_dir in ("/", path):
return
absolute_path = os.path.join(
settings.STORAGE_ROOT, "./%s" % path_dir)
if os.path.exists(absolute_path):
if len(os.listdir(absolute_path)) == 0:
logger.info("empty dir to remove %s" % absolute_path)
os.rmdir(absolute_path)
clean_dir(path_dir)
if self.is_downloaded:
file_path = self.absolute_path()
clean_dir(self.path)
if os.path.exists(file_path):
os.remove(file_path)
else:
self.logger.warning("the file %s doesn't exist" % file_path)
if settings.ELASTICSEARCH:
self.index_remove()
super(FileLog, self).delete(*args, **kwargs)
def set_task_uid(self):
# get taskid from path
# /recipes/3545476/tasks/51898719/logs/journal.xml
logparse = urlparse(self.url)
res = re.match(r'.*/tasks/([0-9]+)/logs/[^/]+$', logparse.path)
try:
if res:
uid = res.group(1)
self.task = Task.objects.get(uid="T:%s" % uid)
else:
res = re.match(r'.*[+]/([0-9]+)/[^/]+$', logparse.path)
if res:
uid = res.group(1)
self.task = Task.objects.get(uid="T:%s" % uid)
except Task.DoesNotExist:
logger.warn("%s doesn't exists for %s" %
(uid, self.path))
def save(self, *args, **kwargs):
if not self.task:
self.set_task_uid()
super(FileLog, self).save(*args, **kwargs)
def parse_journal(self):
if self.get_basename() != "journal.xml":
return False
def get_element_time(parser, value):
for it in parser.getElementsByTagName(value):
for item in it.childNodes:
return parse(item.data)
file_path = self.absolute_path()
f = open(file_path)
content = f.read()
f.close()
parser = parseString(content)
starttime = get_element_time(parser, "starttime")
# endtime = get_element_time(parser, "endtime")
if self.task and starttime:
self.task.datestart = starttime
# self.task.endtime = endtime
self.task.save()
return starttime
def get_basename(self):
return os.path.basename(self.path)
@staticmethod
def clean_old(days=settings.LOGFILE_LIFETIME):
to_delete = timezone.now() - timedelta(days=days)
logs = FileLog.objects.filter(
created__lt=to_delete).order_by("created")
logger.info("%d logs to prepare remove" % len(logs))
# remove all file and dirs
for it in logs[:settings.MAX_LOGS_IN_ONE_CHECK]:
logger.debug("remove file %s" % it)
it.delete()
def index_remove(self):
if not self.is_indexed:
return
es = Elasticsearch(settings.ELASTICSEARCH, timeout=60)
name = self.get_basename()
try:
es.delete(index=name.lower(), doc_type="log", id=self.id)
except Exception as e:
logger.debug("delete index: %s" % e)
self.is_indexed = False
self.save()
def index(self):
es = Elasticsearch(settings.ELASTICSEARCH, timeout=60)
f = open(self.absolute_path())
c = unicode(f.read(settings.ELASTICSEARCH_MAX_SIZE), errors='ignore')
content = json.dumps(c)
f.close()
name = self.get_basename()
try:
res = es.index(index=name.lower(), doc_type="log", id=self.id,
body={"content": content,
"job": self.recipe.job.id,
"whiteboard": self.recipe.job.template.whiteboard,
"recipe": self.recipe.uid,
"datestart": self.task.datestart if self.task else '',
"duration": self.task.duration if self.task else '',
"period": self.recipe.job.schedule.id if self.recipe.job.schedule else None,
"task": self.task.uid if self.task and self.task.uid else '',
"file_id": self.id,
"path": self.path})
if res["created"]:
self.index_id = res["_id"]
if str(self.id) != res["_id"]:
logger.debug("file %s has incorect id %s" %
(self.id, res["_id"]))
self.is_indexed = True
self.save()
except Exception as e:
logger.debug("indexing: %s" % e)
| SatelliteQE/GreenTea | apps/core/models.py | Python | gpl-2.0 | 55,908 |
import os
from pkg_resources import resource_filename
def get_fn(name):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``thermopyl/data``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
name : str
Name of the file to load (with respect to the reference/ folder).
"""
fn = resource_filename('thermopyl', os.path.join('data', name))
if not os.path.exists(fn):
raise ValueError('Sorry! %s does not exists. If you just '
'added it, you\'ll have to re install' % fn)
return fn
def make_path(filename):
try:
path = os.path.split(filename)[0]
os.makedirs(path)
except OSError:
pass
def build_pandas_dataframe(filenames):
"""
Build pandas dataframe for property data and compounds.
Parameters
----------
filenames : list
List of ThermoML filenames to process.
Returns
-------
data : pandas DataFrame
Compiled ThermoML dataframe
compounds : pandas Dataframe
Compounds dataframe
"""
import pandas as pd
from thermopyl import Parser
data = []
compound_dict = {}
for filename in filenames:
print(filename)
try:
parser = Parser(filename)
current_data = parser.parse()
current_data = pd.DataFrame(current_data)
data.append(current_data)
compound_dict.update(parser.compound_name_to_formula)
except Exception as e:
print(e)
data = pd.concat(data, copy=False, ignore_index=True) # Because the input data is a list of DataFrames, this saves a LOT of memory! Ignore the index to return unique index.
compounds = pd.Series(compound_dict)
return [data, compounds]
def pandas_dataframe(thermoml_path=None):
"""Read the ThermoPyL dataset into a Pandas dataframe.
Parameters
----------
thermoml_path : str, optional, default=None
If specified, search here for the `data.h5` file compiled by `thermoml-build-pandas`.
If None, will try environment variable `THERMOML_PATH` followed by `$HOME/.thermopyl`
Returns
-------
df : pandas.core.frame.DataFrame
pandas dataframe containing ThermoML data
"""
import os, os.path
if thermoml_path is None:
# Try to obtain the path to the local ThermoML Archive mirror from an environment variable.
if 'THERMOML_PATH' in os.environ:
# Check THERMOML_PATH environment variable
hdf5_filename = os.path.join(os.environ["THERMOML_PATH"], 'data.h5')
else:
# Use default path of ~/.thermoml
hdf5_filename = os.path.join(os.environ["HOME"], '.thermoml', 'data.h5')
else:
hdf5_filename = os.path.join(thermoml_path, 'data.h5')
if not os.path.exists(hdf5_filename):
if thermoml_path is None:
msg = 'Could not find `data.h5` in either $THERMOML_PATH or ~/.thermoml\n'
msg += 'Make sure you have run `thermoml-build-pandas` and it has completed successfully'
else:
msg = 'Could not find `data.h5` in specified path `%s`' % thermoml_path
raise Exception(msg)
import pandas as pd
df = pd.read_hdf(hdf5_filename)
return df
| choderalab/ThermoPyL | thermopyl/utils.py | Python | gpl-2.0 | 3,405 |
#!/usr/bin/python
import spimage
import pylab
# Read input image
img = spimage.sp_image_read('../ring/raw_ring.h5',0)
# Convolute with a 2 pixel
# standard deviation gaussian
img_blur = spimage.sp_gaussian_blur(img,2.0)
rel_diff = abs((pylab.real(img_blur.image)-
pylab.real(img.image))
/pylab.real(img_blur.image))
# Plot relative difference
pylab.imshow(rel_diff,vmin = 0,vmax = 0.5)
pylab.colorbar()
pylab.show()
| FilipeMaia/hawk | examples/python/plot_pattern_roughness.py | Python | gpl-2.0 | 448 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2017-2018 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" LaTeX Genealogy Tree adapter for Trees """
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from abc import ABCMeta, abstractmethod
import os
import shutil
import re
from subprocess import Popen, PIPE
from io import StringIO
import tempfile
import logging
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ...utils.file import search_for
from ...lib import Person, EventType, EventRoleType, Date
from ...display.place import displayer as _pd
from ...utils.file import media_path_full
from . import BaseDoc, PAPER_PORTRAIT
from ..menu import NumberOption, TextOption, EnumeratedListOption
from ...constfunc import win
from ...config import config
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
LOG = logging.getLogger(".treedoc")
#-------------------------------------------------------------------------
#
# Private Constants
#
#-------------------------------------------------------------------------
_DETAIL = [{'name': _("Full"), 'value': "full"},
{'name': _("Medium"), 'value': "medium"},
{'name': _("Short"), 'value': "short"}]
_MARRIAGE = [{'name': _("Default"), 'value': ""},
{'name': _("Above"), 'value': "marriage above"},
{'name': _("Below"), 'value': "marriage below"},
{'name': _("Not shown"), 'value': "no marriage"}]
_COLOR = [{'name': _("None"), 'value': "none"},
{'name': _("Default"), 'value': "default"},
{'name': _("Preferences"), 'value': "preferences"}]
_TIMEFLOW = [{'name': _("Down (↓)"), 'value': ""},
{'name': _("Up (↑)"), 'value': "up"},
{'name': _("Right (→)"), 'value': "right"},
{'name': _("Left (←)"), 'value': "left"}]
_EDGES = [{'name': _("Perpendicular"), 'value': ""},
{'name': _("Rounded"), 'value': "rounded", },
{'name': _("Swing"), 'value': "swing", },
{'name': _("Mesh"), 'value': 'mesh'}]
_NOTELOC = [{'name': _("Top"), 'value': "t"},
{'name': _("Bottom"), 'value': "b"}]
_NOTESIZE = [{'name': _("Tiny"), 'value': "tiny"},
{'name': _("Script"), 'value': "scriptsize"},
{'name': _("Footnote"), 'value': "footnotesize"},
{'name': _("Small"), 'value': "small"},
{'name': _("Normal"), 'value': "normalsize"},
{'name': _("Large"), 'value': "large"},
{'name': _("Very large"), 'value': "Large"},
{'name': _("Extra large"), 'value': "LARGE"},
{'name': _("Huge"), 'value': "huge"},
{'name': _("Extra huge"), 'value': "Huge"}]
if win():
_LATEX_FOUND = search_for("lualatex.exe")
DETACHED_PROCESS = 8
else:
_LATEX_FOUND = search_for("lualatex")
def escape(text):
lookup = {
'&': '\\&',
'%': '\\%',
'$': '\\$',
'#': '\\#',
'_': '\\_',
'{': '\\{',
'}': '\\}',
'~': '\\~{}',
'^': '\\^{}',
'\\': '\\textbackslash{}'
}
pattern = re.compile('|'.join([re.escape(key) for key in lookup.keys()]))
return pattern.sub(lambda match: lookup[match.group(0)], text)
#------------------------------------------------------------------------------
#
# TreeOptions
#
#------------------------------------------------------------------------------
class TreeOptions:
"""
Defines all of the controls necessary
to configure the genealogy tree reports.
"""
def add_menu_options(self, menu):
"""
Add all graph related options to the menu.
:param menu: The menu the options should be added to.
:type menu: :class:`.Menu`
:return: nothing
"""
################################
category = _("Node Options")
################################
detail = EnumeratedListOption(_("Node detail"), "full")
for item in _DETAIL:
detail.add_item(item["value"], item["name"])
detail.set_help(_("Detail of information to be shown in a node."))
menu.add_option(category, "detail", detail)
marriage = EnumeratedListOption(_("Marriage"), "")
for item in _MARRIAGE:
marriage.add_item(item["value"], item["name"])
marriage.set_help(_("Position of marriage information."))
menu.add_option(category, "marriage", marriage)
nodesize = NumberOption(_("Node size"), 25, 5, 100, 5)
nodesize.set_help(_("One dimension of a node, in mm. If the timeflow "
"is up or down then this is the width, otherwise "
"it is the height."))
menu.add_option(category, "nodesize", nodesize)
levelsize = NumberOption(_("Level size"), 35, 5, 100, 5)
levelsize.set_help(_("One dimension of a node, in mm. If the timeflow "
"is up or down then this is the height, otherwise "
"it is the width."))
menu.add_option(category, "levelsize", levelsize)
nodecolor = EnumeratedListOption(_("Color"), "none")
for item in _COLOR:
nodecolor.add_item(item["value"], item["name"])
nodecolor.set_help(_("Node color."))
menu.add_option(category, "nodecolor", nodecolor)
################################
category = _("Tree Options")
################################
timeflow = EnumeratedListOption(_("Timeflow"), "")
for item in _TIMEFLOW:
timeflow.add_item(item["value"], item["name"])
timeflow.set_help(_("Direction that the graph will grow over time."))
menu.add_option(category, "timeflow", timeflow)
edges = EnumeratedListOption(_("Edge style"), "")
for item in _EDGES:
edges.add_item(item["value"], item["name"])
edges.set_help(_("Style of the edges between nodes."))
menu.add_option(category, "edges", edges)
leveldist = NumberOption(_("Level distance"), 5, 1, 20, 1)
leveldist.set_help(_("The minimum amount of free space, in mm, "
"between levels. For vertical graphs, this "
"corresponds to spacing between rows. For "
"horizontal graphs, this corresponds to spacing "
"between columns."))
menu.add_option(category, "leveldist", leveldist)
################################
category = _("Note")
################################
note = TextOption(_("Note to add to the tree"), [""])
note.set_help(_("This text will be added to the tree."))
menu.add_option(category, "note", note)
noteloc = EnumeratedListOption(_("Note location"), 't')
for item in _NOTELOC:
noteloc.add_item(item["value"], item["name"])
noteloc.set_help(_("Whether note will appear on top "
"or bottom of the page."))
menu.add_option(category, "noteloc", noteloc)
notesize = EnumeratedListOption(_("Note size"), 'normalsize')
for item in _NOTESIZE:
notesize.add_item(item["value"], item["name"])
notesize.set_help(_("The size of note text."))
menu.add_option(category, "notesize", notesize)
#------------------------------------------------------------------------------
#
# TreeDoc
#
#------------------------------------------------------------------------------
class TreeDoc(metaclass=ABCMeta):
"""
Abstract Interface for genealogy tree document generators. Output formats
for genealogy tree reports must implement this interface to be used by the
report system.
"""
@abstractmethod
def start_tree(self, option_list):
"""
Write the start of a tree.
"""
@abstractmethod
def end_tree(self):
"""
Write the end of a tree.
"""
@abstractmethod
def start_subgraph(self, level, subgraph_type, family, option_list=None):
"""
Write the start of a subgraph.
"""
@abstractmethod
def end_subgraph(self, level):
"""
Write the end of a subgraph.
"""
@abstractmethod
def write_node(self, db, level, node_type, person, marriage_flag,
option_list=None):
"""
Write the contents of a node.
"""
#------------------------------------------------------------------------------
#
# TreeDocBase
#
#------------------------------------------------------------------------------
class TreeDocBase(BaseDoc, TreeDoc):
"""
Base document generator for all Graphviz document generators. Classes that
inherit from this class will only need to implement the close function.
The close function will generate the actual file of the appropriate type.
"""
def __init__(self, options, paper_style):
BaseDoc.__init__(self, None, paper_style)
self._filename = None
self._tex = StringIO()
self._paper = paper_style
get_option = options.menu.get_option_by_name
self.detail = get_option('detail').get_value()
self.marriage = get_option('marriage').get_value()
self.nodesize = get_option('nodesize').get_value()
self.levelsize = get_option('levelsize').get_value()
self.nodecolor = get_option('nodecolor').get_value()
self.timeflow = get_option('timeflow').get_value()
self.edges = get_option('edges').get_value()
self.leveldist = get_option('leveldist').get_value()
self.note = get_option('note').get_value()
self.noteloc = get_option('noteloc').get_value()
self.notesize = get_option('notesize').get_value()
def write_start(self):
"""
Write the start of the document.
"""
paper_size = self._paper.get_size()
name = paper_size.get_name().lower()
if name == 'custom size':
width = str(paper_size.get_width())
height = str(paper_size.get_width())
paper = 'papersize={%scm,%scm}' % (width, height)
elif name in ('a', 'b', 'c', 'd', 'e'):
paper = 'ansi' + name + 'paper'
else:
paper = name + 'paper'
if self._paper.get_orientation() == PAPER_PORTRAIT:
orientation = 'portrait'
else:
orientation = 'landscape'
lmargin = self._paper.get_left_margin()
rmargin = self._paper.get_right_margin()
tmargin = self._paper.get_top_margin()
bmargin = self._paper.get_bottom_margin()
if lmargin == rmargin == tmargin == bmargin:
margin = 'margin=%scm'% lmargin
else:
if lmargin == rmargin:
margin = 'hmargin=%scm' % lmargin
else:
margin = 'hmargin={%scm,%scm}' % (lmargin, rmargin)
if tmargin == bmargin:
margin += ',vmargin=%scm' % tmargin
else:
margin += ',vmargin={%scm,%scm}' % (tmargin, bmargin)
self.write(0, '\\documentclass[%s]{article}\n' % orientation)
self.write(0, '\\IfFileExists{libertine.sty}{\n')
self.write(0, ' \\usepackage{libertine}\n')
self.write(0, '}{}\n')
self.write(0, '\\usepackage[%s,%s]{geometry}\n' % (paper, margin))
self.write(0, '\\usepackage[all]{genealogytree}\n')
self.write(0, '\\usepackage{color}\n')
self.write(0, '\\begin{document}\n')
if self.nodecolor == 'preferences':
scheme = config.get('colors.scheme')
male_bg = config.get('colors.male-dead')[scheme][1:]
female_bg = config.get('colors.female-dead')[scheme][1:]
neuter_bg = config.get('colors.unknown-dead')[scheme][1:]
self.write(0, '\\definecolor{male-bg}{HTML}{%s}\n' % male_bg)
self.write(0, '\\definecolor{female-bg}{HTML}{%s}\n' % female_bg)
self.write(0, '\\definecolor{neuter-bg}{HTML}{%s}\n' % neuter_bg)
if ''.join(self.note) != '' and self.noteloc == 't':
for line in self.note:
self.write(0, '{\\%s %s}\\par\n' % (self.notesize, line))
self.write(0, '\\bigskip\n')
self.write(0, '\\begin{tikzpicture}\n')
def start_tree(self, option_list):
self.write(0, '\\genealogytree[\n')
self.write(0, 'processing=database,\n')
if self.marriage:
info = self.detail + ' ' + self.marriage
else:
info = self.detail
self.write(0, 'database format=%s,\n' % info)
if self.timeflow:
self.write(0, 'timeflow=%s,\n' % self.timeflow)
if self.edges:
self.write(0, 'edges=%s,\n' % self.edges)
if self.leveldist != 5:
self.write(0, 'level distance=%smm,\n' % self.leveldist)
if self.nodesize != 25:
self.write(0, 'node size=%smm,\n' % self.nodesize)
if self.levelsize != 35:
self.write(0, 'level size=%smm,\n' % self.levelsize)
if self.nodecolor == 'none':
self.write(0, 'tcbset={male/.style={},\n')
self.write(0, ' female/.style={},\n')
self.write(0, ' neuter/.style={}},\n')
if self.nodecolor == 'preferences':
self.write(0, 'tcbset={male/.style={colback=male-bg},\n')
self.write(0, ' female/.style={colback=female-bg},\n')
self.write(0, ' neuter/.style={colback=neuter-bg}},\n')
for option in option_list:
self.write(0, '%s,\n' % option)
self.write(0, ']{\n')
def end_tree(self):
self.write(0, '}\n')
def write_end(self):
"""
Write the end of the document.
"""
self.write(0, '\\end{tikzpicture}\n')
if ''.join(self.note) != '' and self.noteloc == 'b':
self.write(0, '\\bigskip\n')
for line in self.note:
self.write(0, '\\par{\\%s %s}\n' % (self.notesize, line))
self.write(0, '\\end{document}\n')
def start_subgraph(self, level, subgraph_type, family, option_list=None):
options = ['id=%s' % family.gramps_id]
if option_list:
options.extend(option_list)
if subgraph_type == 'sandclock':
self.write(level, 'sandclock{\n')
else:
self.write(level, '%s[%s]{\n' % (subgraph_type, ','.join(options)))
def end_subgraph(self, level):
self.write(level, '}\n')
def write_node(self, db, level, node_type, person, marriage_flag,
option_list=None):
options = ['id=%s' % person.gramps_id]
if option_list:
options.extend(option_list)
self.write(level, '%s[%s]{\n' % (node_type, ','.join(options)))
if person.gender == Person.MALE:
self.write(level+1, 'male,\n')
elif person.gender == Person.FEMALE:
self.write(level+1, 'female,\n')
elif person.gender == Person.UNKNOWN:
self.write(level+1, 'neuter,\n')
name = person.get_primary_name()
nick = name.get_nick_name()
surn = name.get_surname()
name_parts = [self.format_given_names(name),
'\\nick{{{}}}'.format(escape(nick)) if nick else '',
'\\surn{{{}}}'.format(escape(surn)) if surn else '']
self.write(level+1, 'name = {{{}}},\n'.format(
' '.join([e for e in name_parts if e])))
for eventref in person.get_event_ref_list():
if eventref.role == EventRoleType.PRIMARY:
event = db.get_event_from_handle(eventref.ref)
self.write_event(db, level+1, event)
if marriage_flag:
for handle in person.get_family_handle_list():
family = db.get_family_from_handle(handle)
for eventref in family.get_event_ref_list():
if eventref.role == EventRoleType.FAMILY:
event = db.get_event_from_handle(eventref.ref)
self.write_event(db, level+1, event)
for attr in person.get_attribute_list():
if str(attr.get_type()) == 'Occupation':
self.write(level+1, 'profession = {%s},\n' %
escape(attr.get_value()))
if str(attr.get_type()) == 'Comment':
self.write(level+1, 'comment = {%s},\n' %
escape(attr.get_value()))
for mediaref in person.get_media_list():
media = db.get_media_from_handle(mediaref.ref)
path = media_path_full(db, media.get_path())
if os.path.isfile(path):
if win():
path = path.replace('\\', '/')
self.write(level+1, 'image = {{%s}%s},\n' %
os.path.splitext(path))
break # first image only
self.write(level, '}\n')
def write_event(self, db, level, event):
"""
Write an event.
"""
modifier = None
if event.type == EventType.BIRTH:
event_type = 'birth'
if 'died' in event.description.lower():
modifier = 'died'
if 'stillborn' in event.description.lower():
modifier = 'stillborn'
# modifier = 'out of wedlock'
elif event.type == EventType.BAPTISM:
event_type = 'baptism'
elif event.type == EventType.ENGAGEMENT:
event_type = 'engagement'
elif event.type == EventType.MARRIAGE:
event_type = 'marriage'
elif event.type == EventType.DIVORCE:
event_type = 'divorce'
elif event.type == EventType.DEATH:
event_type = 'death'
elif event.type == EventType.BURIAL:
event_type = 'burial'
if 'killed' in event.description.lower():
modifier = 'killed'
elif event.type == EventType.CREMATION:
event_type = 'burial'
modifier = 'cremated'
else:
return
date = event.get_date_object()
if date.get_calendar() == Date.CAL_GREGORIAN:
calendar = 'AD' # GR
elif date.get_calendar() == Date.CAL_JULIAN:
calendar = 'JU'
else:
calendar = ''
if date.get_modifier() == Date.MOD_ABOUT:
calendar = 'ca' + calendar
date_str = self.format_iso(date.get_ymd(), calendar)
if date.get_modifier() == Date.MOD_BEFORE:
date_str = '/' + date_str
elif date.get_modifier() == Date.MOD_AFTER:
date_str = date_str + '/'
elif date.is_compound():
stop_date = self.format_iso(date.get_stop_ymd(), calendar)
date_str = date_str + '/' + stop_date
place = escape(_pd.display_event(db, event))
if modifier:
event_type += '+'
self.write(level, '%s = {%s}{%s}{%s},\n' %
(event_type, date_str, place, modifier))
elif place == '':
event_type += '-'
self.write(level, '%s = {%s},\n' % (event_type, date_str))
else:
self.write(level, '%s = {%s}{%s},\n' %
(event_type, date_str, place))
def format_given_names(self, name):
"""
Format given names.
"""
first = name.get_first_name()
call = name.get_call_name()
if call:
if call in first:
where = first.index(call)
return '{before}\\pref{{{call}}}{after}'.format(
before=escape(first[:where]),
call=escape(call),
after=escape(first[where+len(call):]))
else:
# ignore erroneous call name
return escape(first)
else:
return escape(first)
def format_iso(self, date_tuple, calendar):
"""
Format an iso date.
"""
year, month, day = date_tuple
if year == 0:
iso_date = ''
elif month == 0:
iso_date = str(year)
elif day == 0:
iso_date = '%s-%s' % (year, month)
else:
iso_date = '%s-%s-%s' % (year, month, day)
if calendar and calendar != 'AD':
iso_date = '(%s)%s' % (calendar, iso_date)
return iso_date
def write(self, level, text):
"""
Write indented text.
"""
self._tex.write(' '*level + text)
def open(self, filename):
""" Implement TreeDocBase.open() """
self._filename = os.path.normpath(os.path.abspath(filename))
self.write_start()
def close(self):
"""
This isn't useful by itself. Other classes need to override this and
actually generate a file.
"""
self.write_end()
#------------------------------------------------------------------------------
#
# TreeTexDoc
#
#------------------------------------------------------------------------------
class TreeTexDoc(TreeDocBase):
"""
TreeTexDoc implementation that generates a .tex file.
"""
def close(self):
""" Implements TreeDocBase.close() """
TreeDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".tex":
self._filename += ".tex"
with open(self._filename, 'w', encoding='utf-8') as texfile:
texfile.write(self._tex.getvalue())
#------------------------------------------------------------------------------
#
# TreePdfDoc
#
#------------------------------------------------------------------------------
class TreePdfDoc(TreeDocBase):
"""
TreePdfDoc implementation that generates a .pdf file.
"""
def close(self):
""" Implements TreeDocBase.close() """
TreeDocBase.close(self)
# Make sure the extension is correct
if self._filename[-4:] != ".pdf":
self._filename += ".pdf"
with tempfile.TemporaryDirectory() as tmpdir:
basename = os.path.basename(self._filename)
args = ['lualatex', '-output-directory', tmpdir,
'-jobname', basename[:-4]]
if win():
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE,
creationflags=DETACHED_PROCESS)
else:
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
proc.communicate(input=self._tex.getvalue().encode('utf-8'))
shutil.copy(os.path.join(tmpdir, basename), self._filename)
#------------------------------------------------------------------------------
#
# Various Genealogy Tree formats.
#
#------------------------------------------------------------------------------
FORMATS = []
if _LATEX_FOUND:
FORMATS += [{'type' : "pdf",
'ext' : "pdf",
'descr': _("PDF"),
'mime' : "application/pdf",
'class': TreePdfDoc}]
FORMATS += [{'type' : "tex",
'ext' : "tex",
'descr': _("LaTeX File"),
'mime' : "application/x-latex",
'class': TreeTexDoc}]
| prculley/gramps | gramps/gen/plug/docgen/treedoc.py | Python | gpl-2.0 | 24,461 |
from Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.MessageBox import MessageBox
from Components.InputDevice import iInputDevices, iRcTypeControl
from Components.Sources.StaticText import StaticText
from Components.Sources.List import List
from Components.config import config, ConfigYesNo, getConfigListEntry, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap, HelpableActionMap
from Tools.Directories import resolveFilename, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
from boxbranding import getBoxType, getMachineBrand, getMachineName
class InputDeviceSelection(Screen,HelpableScreen):
skin = """
<screen name="InputDeviceSelection" position="center,center" size="560,400" title="Select input device">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on"/>
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on"/>
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on"/>
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on"/>
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1"/>
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1"/>
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1"/>
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1"/>
<widget source="list" render="Listbox" position="5,50" size="550,280" zPosition="10" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
<!-- device, description, devicepng, divpng -->
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (2, 8), size = (54, 54), png = 2), # index 3 is the interface pixmap
MultiContentEntryText(pos = (65, 6), size = (450, 54), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_CENTER|RT_WRAP, text = 1), # index 1 is the interfacename
],
"fonts": [gFont("Regular", 28),gFont("Regular", 20)],
"itemHeight": 70
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,340" zPosition="1" size="560,2"/>
<widget source="introduction" render="Label" position="0,350" size="560,50" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1"/>
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.edittext = _("Press OK to edit the settings.")
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Select"))
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("")
self["introduction"] = StaticText(self.edittext)
self.devices = [(iInputDevices.getDeviceName(x),x) for x in iInputDevices.getDeviceList()]
print "[InputDeviceSelection] found devices :->", len(self.devices),self.devices
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"cancel": (self.close, _("Exit input device selection.")),
"ok": (self.okbuttonClick, _("Select input device.")),
}, -2)
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": (self.close, _("Exit input device selection.")),
"green": (self.okbuttonClick, _("Select input device.")),
}, -2)
self.currentIndex = 0
self.list = []
self["list"] = List(self.list)
self.updateList()
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def layoutFinished(self):
self.setTitle(_("Select input device"))
def cleanup(self):
self.currentIndex = 0
def buildInterfaceList(self, device, description, type, isinputdevice = True):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
activepng = None
devicepng = None
enabled = iInputDevices.getDeviceAttribute(device, 'enabled')
if type == 'remote':
if config.misc.rcused.getValue() == 0:
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_rcnew-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_rcnew.png"))
else:
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_rcold-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_rcold.png"))
elif type == 'keyboard':
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_keyboard-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_keyboard.png"))
elif type == 'mouse':
if enabled:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_mouse-configured.png"))
else:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_mouse.png"))
elif isinputdevice:
devicepng = LoadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/input_rcnew.png"))
return ((device, description, devicepng, divpng))
def updateList(self):
self.list = []
if iRcTypeControl.multipleRcSupported():
self.list.append(self.buildInterfaceList('rctype', _('Configure remote control type'), None, False))
for x in self.devices:
dev_type = iInputDevices.getDeviceAttribute(x[1], 'type')
self.list.append(self.buildInterfaceList(x[1],_(x[0]), dev_type))
self["list"].setList(self.list)
self["list"].setIndex(self.currentIndex)
def okbuttonClick(self):
selection = self["list"].getCurrent()
self.currentIndex = self["list"].getIndex()
if selection is not None:
if selection[0] == 'rctype':
self.session.open(RemoteControlType)
else:
self.session.openWithCallback(self.DeviceSetupClosed, InputDeviceSetup, selection[0])
def DeviceSetupClosed(self, *ret):
self.updateList()
class InputDeviceSetup(Screen, ConfigListScreen):
skin = """
<screen name="InputDeviceSetup" position="center,center" size="560,440" title="Input device setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,350" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, device):
Screen.__init__(self, session)
self.inputDevice = device
iInputDevices.currentDevice = self.inputDevice
self.onChangedEntry = [ ]
self.setup_title = _("Input device setup")
self.isStepSlider = None
self.enableEntry = None
self.repeatEntry = None
self.delayEntry = None
self.nameEntry = None
self.enableConfigEntry = None
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
# for generating strings into .po only
devicenames = [_("%s %s front panel") % (getMachineBrand(), getMachineName()),_("%s %s front panel") % (getMachineBrand(), getMachineName()),_("%s %s remote control (native)") % (getMachineBrand(), getMachineName()),_("%s %s advanced remote control (native)") % (getMachineBrand(), getMachineName()),_("%s %s ir keyboard") % (getMachineBrand(), getMachineName()),_("%s %s ir mouse") % (getMachineBrand(), getMachineName())]
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
self.onClose.append(self.cleanup)
def layoutFinished(self):
self.setTitle(self.setup_title)
listWidth = self["config"].l.getItemSize().width()
# use 20% of list width for sliders
self["config"].l.setSeperation(int(listWidth*.8))
def cleanup(self):
iInputDevices.currentDevice = ""
def createSetup(self):
self.list = [ ]
label = _("Change repeat and delay settings?")
cmd = "self.enableEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".enabled)"
exec (cmd)
label = _("Interval between keys when repeating:")
cmd = "self.repeatEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".repeat)"
exec (cmd)
label = _("Delay before key repeat starts:")
cmd = "self.delayEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".delay)"
exec (cmd)
label = _("Devicename:")
cmd = "self.nameEntry = getConfigListEntry(label, config.inputDevices." + self.inputDevice + ".name)"
exec (cmd)
if self.enableEntry:
if isinstance(self.enableEntry[1], ConfigYesNo):
self.enableConfigEntry = self.enableEntry[1]
self.list.append(self.enableEntry)
if self.enableConfigEntry:
if self.enableConfigEntry.getValue() is True:
self.list.append(self.repeatEntry)
self.list.append(self.delayEntry)
else:
self.repeatEntry[1].setValue(self.repeatEntry[1].default)
self["config"].invalidate(self.repeatEntry)
self.delayEntry[1].setValue(self.delayEntry[1].default)
self["config"].invalidate(self.delayEntry)
self.nameEntry[1].setValue(self.nameEntry[1].default)
self["config"].invalidate(self.nameEntry)
self["config"].list = self.list
self["config"].l.setList(self.list)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
if self["config"].getCurrent() == self.enableEntry:
self["introduction"].setText(_("Current device: ") + str(iInputDevices.getDeviceAttribute(self.inputDevice, 'name')) )
else:
self["introduction"].setText(_("Current value: ") + self.getCurrentValue() + _(" ms"))
def newConfig(self):
current = self["config"].getCurrent()
if current:
if current == self.enableEntry:
self.createSetup()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
ConfigListScreen.keyRight(self)
self.newConfig()
def confirm(self, confirmed):
if not confirmed:
print "not confirmed"
return
else:
self.nameEntry[1].setValue(iInputDevices.getDeviceAttribute(self.inputDevice, 'name'))
cmd = "config.inputDevices." + self.inputDevice + ".name.save()"
exec (cmd)
self.keySave()
def apply(self):
self.session.openWithCallback(self.confirm, MessageBox, _("Use these input device settings?"), MessageBox.TYPE_YESNO, timeout=20, default=True)
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout=20, default=True)
else:
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getValue())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class RemoteControlType(Screen, ConfigListScreen):
rcList = [
("0", _("Default")),
("3", _("MaraM9")),
("4", _("DMM normal")),
("5", _("et9000/et9100")),
("6", _("DMM advanced")),
("7", _("et5000/6000")),
("8", _("VU+")),
("9", _("et8000/et10000")),
("11", _("et9200/9500/6500")),
("13", _("et4000")),
("14", _("XP1000")),
("16", _("HD1100/HD1200/HD500C/et7x00/et8500")),
("17", _("XP3000")),
("18", _("F1/F3")),
("19", _("HD2400"))
]
defaultRcList = [
("et4000", 13),
("et5000", 7),
("et6000", 7),
("et6500", 11),
("et7x00",16),
("et8000", 9),
("et8500",16),
("et9000", 5),
("et9100", 5),
("et9200", 11),
("et9500", 11),
("et10000", 9),
("hd1100",16),
("hd1200",16),
("hd500c",16),
("hd2400",19),
("formuler1",18),
("formuler3",18),
("xp1000", 14),
("xp3000", 17)
]
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = ["RemoteControlType", "Setup" ]
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.keyCancel,
"save": self.keySave,
}, -1)
self["key_green"] = StaticText(_("Save"))
self["key_red"] = StaticText(_("Cancel"))
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session)
rctype = config.plugins.remotecontroltype.rctype.getValue()
self.rctype = ConfigSelection(choices = self.rcList, default = str(rctype))
self.list.append(getConfigListEntry(_("Remote control type"), self.rctype))
self["config"].list = self.list
self.defaultRcType = None
self.getDefaultRcType()
def getDefaultRcType(self):
data = iRcTypeControl.getBoxType()
for x in self.defaultRcList:
if x[0] in data:
self.defaultRcType = x[1]
break
def setDefaultRcType(self):
#iRcTypeControl.writeRcType(config.plugins.remotecontroltype.rctype.getValue())
iRcTypeControl.writeRcType(self.defaultRcType)
def keySave(self):
if config.plugins.remotecontroltype.rctype.getValue() == int(self.rctype.getValue()):
self.close()
else:
self.setNewSetting()
self.session.openWithCallback(self.keySaveCallback, MessageBox, _("Is this setting ok?"), MessageBox.TYPE_YESNO, timeout=20, default=True, timeout_default=False)
def keySaveCallback(self, answer):
if answer is False:
self.restoreOldSetting()
else:
config.plugins.remotecontroltype.rctype.value = int(self.rctype.getValue())
config.plugins.remotecontroltype.save()
self.close()
def restoreOldSetting(self):
if config.plugins.remotecontroltype.rctype.getValue() == 0:
self.setDefaultRcType()
else:
iRcTypeControl.writeRcType(config.plugins.remotecontroltype.rctype.getValue())
def setNewSetting(self):
if int(self.rctype.value) == 0:
self.setDefaultRcType()
else:
iRcTypeControl.writeRcType(int(self.rctype.value))
def keyCancel(self):
self.restoreOldSetting()
self.close()
| vitmod/dvbapp | lib/python/Screens/InputDeviceSetup.py | Python | gpl-2.0 | 16,079 |
import gzip
import os
import sys
import pickle as pkl
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage:', sys.argv[0], 'DIR1 DIR2')
sys.exit(0)
dir1 = sys.argv[1]
dir2 = sys.argv[2]
print('checking songs ...')
songs1 = pkl.load(gzip.open(os.path.join(dir1, 'all_songs.pkl.gz'), 'rb'))
songs2 = pkl.load(gzip.open(os.path.join(dir2, 'all_songs.pkl.gz'), 'rb'))
assert np.all(np.array(songs1) == np.array(songs2))
print('checking playlists ...')
fname = 'playlists_train_test_s4'
pl1 = pkl.load(gzip.open(os.path.join(dir1, '%s.pkl.gz' % fname), 'rb'))
pl2 = pkl.load(gzip.open(os.path.join(dir2, '%s.pkl.gz' % fname), 'rb'))
assert np.all(np.array(pl1) == np.array(pl2))
print('checking features ...')
x1 = pkl.load(gzip.open(os.path.join(dir1, 'X.pkl.gz'), 'rb'))
x2 = pkl.load(gzip.open(os.path.join(dir2, 'X.pkl.gz'), 'rb'))
assert np.all(np.isclose(x1, x2))
print('checking labels (sparse boolean matrices) ...')
for fname in ['Y_train', 'Y_test']:
print(' checking %s ...' % fname)
y1 = pkl.load(gzip.open(os.path.join(dir1, '%s.pkl.gz' % fname), 'rb'))
y2 = pkl.load(gzip.open(os.path.join(dir2, '%s.pkl.gz' % fname), 'rb'))
assert type(y1) == type(y2)
if type(y1) == csr_matrix:
y1 = y1.tocsc()
y2 = y2.tocsc()
elif type(y1) == csc_matrix:
y1 = y1.tocsr()
y2 = y2.tocsr()
else:
assert False, 'NOT CSR or CSC format'
assert np.all(np.equal(y1.indices, y2.indices))
# NOTE: the csr sparse representation of the same dense matrix can have different indices,
# so transform them to another representation may result in the same indices.
print('checking song popularities ...')
for fname in ['song2pop', 'song2pop_train']:
pops1 = pkl.load(gzip.open(os.path.join(dir1, '%s.pkl.gz' % fname), 'rb'))
pops2 = pkl.load(gzip.open(os.path.join(dir2, '%s.pkl.gz' % fname), 'rb'))
for sid in pops1:
assert pops1[sid] == pops2[sid]
print('checking user playlists indices ...')
fname = 'cliques_train'
clq1 = pkl.load(gzip.open(os.path.join(dir1, fname + '.pkl.gz'), 'rb'))
clq2 = pkl.load(gzip.open(os.path.join(dir2, fname + '.pkl.gz'), 'rb'))
assert len(clq1) == len(clq2)
nclqs = len(clq1)
for i in range(nclqs):
assert np.all(clq1[i] == clq2[i])
| chengsoonong/digbeta | dchen/music/src/check_equal_s4.py | Python | gpl-3.0 | 2,368 |
from ase import *
a = 4.0 # approximate lattice constant
b = a / 2
ag = Atoms('Ag',
cell=[(0,b,b), (b,0,b), (b,b,0)],
pbc=1,
calculator=EMT()) # use EMT potential
cell = ag.get_cell()
traj = PickleTrajectory('Ag.traj', 'w')
for x in np.linspace(0.95, 1.05, 5):
ag.set_cell(cell * x, scale_atoms=True)
traj.write(ag)
| freephys/python_ase | doc/tutorials/eos/eos1.py | Python | gpl-3.0 | 360 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of NSUChat2.
#
# Copyright (©) 2014 Marcel Ribeiro Dantas
#
# <mribeirodantas at fedoraproject.org>
#
# NSUChat2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# NSUChat2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NSUChat2. If not, see <http://www.gnu.org/licenses/>.
| mribeirodantas/nsuchat2 | __init__.py | Python | gpl-3.0 | 794 |
"""Add PersonalDataType.position
Revision ID: 98f411f40bb
Revises: 468343faea20
Create Date: 2015-11-05 16:02:43.260085
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '98f411f40bb'
down_revision = '468343faea20'
def upgrade():
op.drop_constraint('ck_form_items_valid_enum_personal_data_type', 'form_items', schema='event_registration')
op.create_check_constraint('valid_enum_personal_data_type', 'form_items',
"personal_data_type IN ({})".format(', '.join(map(str, range(1, 10)))),
schema='event_registration')
def downgrade():
op.drop_constraint('ck_form_items_valid_enum_personal_data_type', 'form_items', schema='event_registration')
op.create_check_constraint('valid_enum_personal_data_type', 'form_items',
"personal_data_type IN ({})".format(', '.join(map(str, range(1, 9)))),
schema='event_registration')
| belokop/indico_bare | migrations/versions/201511051602_98f411f40bb_add_personaldatatype_position.py | Python | gpl-3.0 | 991 |
'''
Created on 2013 mai 24
@author: peio
'''
import subprocess
import os
from tempfile import NamedTemporaryFile
from Bio import SeqIO
from Bio.Seq import Seq
class IsIndelError(Exception):
pass
class OutsideAlignment(Exception):
pass
class BetweenSegments(Exception):
pass
class SeqCoords(object):
'''This class creates a coord system relation between tow secuences.
First it alignes the sequences and creates de coord system.
It uses internally 0 coord system.
The coord system is a list of start, stop segments in both secuences
ej:
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
ATCTAGGCTGCTACGATTAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGCTAGCTGATCG
ATCTAGGCTGCTACGA-TAGCTGATCGATGTTATCGTAGATCTAGCTGATCATGC-AGCTGATCG
0-15, 17-54, 56-64
0-15, 16-53, 54-62
1 2 3 4 5 6
01234567890123456789012345678901234567890123456789012345678901234
012345678901234567890123 4567890123456789012345678901 2345678901234
ATCTAGGCTGCTACGATTAGCTGA-CGATGTTATCGTAGATCTAGCTGATCAT-CTAGCTGATCG
ATCTAGGCT-CTACGATTAGCTGATCGATGTTATCGTAGATC-AGCTGATCATGCTAGCTGATCG
012345678 90123456789012345678901234567890 123456789012345678901234
0-8, 10-23, 24-40, 42-51, 52-62
0-8, 9-22, 24-40, 41-50, 52-62
'''
def __init__(self, seq1, seq2):
"Both secuences are biopython secuences"
self.coord_system = self._get_coord_system(seq1, seq2)
self.seq1_name = seq1.id
self.seq2_name = seq2.id
self._seq2_len = len(seq2)
def _get_coord_system(self, seq1, seq2):
out_fhand, reverse = get_water_alignment(seq1, seq2)
self.reverse = reverse
coord_system = build_relations_from_aligment(out_fhand)
out_fhand.close()
return coord_system
def _reverse_pos(self, pos):
reverse = self.reverse
if reverse:
return self._seq2_len - pos - 1
else:
return pos
def _get_segment(self, pos, seq_name):
'returns the segment index of the given position'
segments = self.coord_system[seq_name]
for index, (start, stop) in enumerate(segments):
if pos >= start and pos <= stop:
return index, (start, stop)
if pos < segments[0][0] or pos > segments[-1][1]:
raise OutsideAlignment
else:
raise BetweenSegments
def _to_seq_pos(self, pos, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
segment2 = self._get_segment(pos, seq2_name)
segment2_index, segment2 = segment2
segment1 = self.coord_system[seq1_name][segment2_index]
return segment1[0] + pos - segment2[0]
def to_seq1_pos(self, seq2_pos):
seq2_pos = self._reverse_pos(seq2_pos)
return self._to_seq_pos(seq2_pos, to_seq1=True)
def to_seq2_pos(self, seq1_pos):
seq2_pos = self._to_seq_pos(seq1_pos, to_seq1=False)
return self._reverse_pos(seq2_pos)
def _to_seq_slice(self, start, end, to_seq1=True):
if to_seq1:
seq1_name = self.seq1_name
seq2_name = self.seq2_name
else:
seq2_name = self.seq1_name
seq1_name = self.seq2_name
stop = end - 1
segment2_start = self._get_segment(start, seq2_name)
segment2_stop = self._get_segment(stop, seq2_name)
segment2_index_start, segment2_start = segment2_start
segment2_index_stop, segment2_stop = segment2_stop
if segment2_index_start != segment2_index_stop:
raise BetweenSegments
segment1 = self.coord_system[seq1_name][segment2_index_start]
start = segment1[0] + start - segment2_start[0]
stop = segment1[0] + stop - segment2_stop[0]
return (start, stop + 1)
def to_seq1_slice(self, start, end):
if self.reverse:
start = self._reverse_pos(start)
end = self._reverse_pos(end)
slice2 = self._to_seq_slice(start, end, to_seq1=True)
if self.reverse:
return slice2[1], slice2[0]
return slice2
def to_seq2_slice(self, start, end):
slice1 = self._to_seq_slice(start, end, to_seq1=False)
if self.reverse:
start = self._reverse_pos(slice1[1])
end = self._reverse_pos(slice1[0])
else:
start, end = slice1
return (start, end)
def build_relations_from_aligment(fhand):
'It returns a relations dict given an alignment in markx10 format'
#print open(fhand.name).read()
#we parse the aligment
in_seq_section = 0
seq, al_start, seq_len = None, None, None
seq0_name = None
for line in fhand:
line = line.strip()
if not line:
continue
if line[0] == '>' and line[1] != '>':
if seq0_name is None:
seq0_name = line.split()[0][1:]
else:
seq1_name = line.split()[0][1:]
if in_seq_section:
seq0 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq0_name}
in_seq_section += 1
seq = ''
continue
if not in_seq_section:
continue
if '; sq_len:' in line:
seq_len = int(line.split(':')[-1])
if '; al_display_start:' in line:
al_start = int(line.split(':')[-1])
if line[0] not in (';', '#'):
seq += line
seq1 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': seq1_name}
#now we get the segments
gap = '-'
segments = []
segment0, segment1 = None, None
seq0_start, seq1_start = seq0['al_start'], seq1['al_start']
seq0_start_delta, seq1_start_delta = seq0_start, seq1_start
seq0_delta, seq1_delta = 0, 0
for index, (nucl0, nucl1) in enumerate(zip(seq0['seq'], seq1['seq'])):
seq0_index = seq0_start_delta + index - seq0_delta
seq1_index = seq1_start_delta + index - seq1_delta
if nucl0 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq0_start = seq0_index
seq1_start = seq1_index + 1
seq0_delta += 1
elif nucl1 == gap:
segment0 = seq0_start, seq0_index - 1
segment1 = seq1_start, seq1_index - 1
seq1_start = seq1_index
seq0_start = seq0_index + 1
seq1_delta += 1
if segment0 and segment1:
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
segment0, segment1 = None, None
else:
segment0 = seq0_start, seq0_index
segment1 = seq1_start, seq1_index
segment = {seq0['name']: segment0, seq1['name']: segment1}
segments.append(segment)
relations = {}
for seg in segments:
for seq_name, limits in seg.items():
if seq_name not in relations:
relations[seq_name] = []
relations[seq_name].append(limits)
return relations
def _get_water_score(fhand):
for line in fhand:
if line.startswith('# Score:'):
return float(line.split(':')[1].strip())
return None
def get_water_alignment(seq1, seq2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10'):
out_fhand = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False)
out_fhand2 = NamedTemporaryFile()
_do_water_alignment(seq1, seq2, out_fhand2, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=True)
forw_score = _get_water_score(out_fhand)
rev_score = _get_water_score(out_fhand2)
if forw_score > rev_score:
out_fhand.seek(0)
return out_fhand, False
else:
out_fhand2.seek(0)
return out_fhand2, True
def _do_water_alignment(seq1, seq2, out_fhand, gap_open=10.0, gap_extend=0.5,
out_fmt='markx10', reverse2=False):
seq1_fhand = NamedTemporaryFile()
seq2_fhand = NamedTemporaryFile()
SeqIO.write(seq1, seq1_fhand, 'fasta')
SeqIO.write(seq2, seq2_fhand, 'fasta')
seq1_fhand.flush()
seq2_fhand.flush()
cmd = ['water', '-asequence', seq1_fhand.name, '-bsequence',
seq2_fhand.name, '-outfile', out_fhand.name, '-gapopen',
str(gap_open), '-gapextend', str(gap_extend), '-aformat3', out_fmt]
if reverse2:
cmd.append('-sreverse2')
stdout = open(os.devnull, 'w')
stderr = open(os.devnull, 'w')
subprocess.check_call(cmd, stdout=stdout, stderr=stderr)
def get_amino_change(seq_ref, seq_estscan, snv):
if snv.is_indel:
raise IsIndelError()
position = snv.pos
alt_allele = snv.alleles[1]
seq_coord = SeqCoords(seq_ref, seq_estscan)
estscan_pos = seq_coord.to_seq2_pos(position)
if estscan_pos is None:
return None
estscan_frame = (estscan_pos % 3) + 1
estscan_start = estscan_pos + estscan_frame - 1
estscan_stop = estscan_start + 2
# check if there is a frameshift in the ref_seq
ref_slice = seq_coord.to_seq1_slice(estscan_start, estscan_stop)
if ref_slice is None:
return None
ref_seq_aa = seq_ref[ref_slice[0]: ref_slice[1] + 1].seq[:3].translate()
estscan_seq_aa = seq_estscan[estscan_start: estscan_stop + 1].seq[:3]
ref_aa = str(estscan_seq_aa.translate())
if str(ref_seq_aa) != str(ref_aa):
return None
aminos = {'ref_amino': ref_aa, 'alt_amino': []}
for alt_allele in snv.alleles[1:]:
alt_seq = [nucl for nucl in (estscan_seq_aa)]
alt_seq[estscan_frame - 1] = alt_allele
alt_seq = Seq("".join(alt_seq))
alt_aa = str(alt_seq.translate())
aminos['alt_amino'].append(alt_aa)
return aminos
| JoseBlanca/vcf_crumbs | vcf_crumbs/prot_change.py | Python | gpl-3.0 | 10,257 |
from otree.api import Currency as c, currency_range
from . import pages
from ._builtin import Bot
from .models import Constants
from random import randint
import time
class PlayerBot(Bot):
def play_round(self):
yield (pages.Bienvenue_Page)
yield (pages.ID_Etudiant_Page,
{'id_etudiant': "étudiant_" + str(time.time())})
| anthropo-lab/XP | EPHEMER/EDHEC_Project/connect_and_filter_en/tests.py | Python | gpl-3.0 | 359 |
#Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from Xlib.display import Display
from Xlib import X
from Xlib.ext.xtest import fake_input
from Xlib.ext import record
from Xlib.protocol import rq
import Xlib.XK
from .base import PyKeyboardMeta, PyKeyboardEventMeta
import time
import string
special_X_keysyms = {
' ': "space",
'\t': "Tab",
'\n': "Return", # for some reason this needs to be cr, not lf
'\r': "Return",
'\e': "Escape",
'!': "exclam",
'#': "numbersign",
'%': "percent",
'$': "dollar",
'&': "ampersand",
'"': "quotedbl",
'\'': "apostrophe",
'(': "parenleft",
')': "parenright",
'*': "asterisk",
'=': "equal",
'+': "plus",
',': "comma",
'-': "minus",
'.': "period",
'/': "slash",
':': "colon",
';': "semicolon",
'<': "less",
'>': "greater",
'?': "question",
'@': "at",
'[': "bracketleft",
']': "bracketright",
'\\': "backslash",
'^': "asciicircum",
'_': "underscore",
'`': "grave",
'{': "braceleft",
'|': "bar",
'}': "braceright",
'~': "asciitilde"
}
class PyKeyboard(PyKeyboardMeta):
"""
The PyKeyboard implementation for X11 systems (mostly linux). This
allows one to simulate keyboard input.
"""
def __init__(self, display=None):
PyKeyboardMeta.__init__(self)
self.display = Display(display)
self.display2 = Display(display)
self.special_key_assignment()
def press_key(self, character=''):
"""
Press a given character key. Also works with character keycodes as
integers, but not keysyms.
"""
try: # Detect uppercase or shifted character
shifted = self.is_char_shifted(character)
except AttributeError: # Handle the case of integer keycode argument
fake_input(self.display, X.KeyPress, character)
self.display.sync()
else:
if shifted:
fake_input(self.display, X.KeyPress, self.shift_key)
keycode = self.lookup_character_keycode(character)
fake_input(self.display, X.KeyPress, keycode)
self.display.sync()
def release_key(self, character=''):
"""
Release a given character key. Also works with character keycodes as
integers, but not keysyms.
"""
try: # Detect uppercase or shifted character
shifted = self.is_char_shifted(character)
except AttributeError: # Handle the case of integer keycode argument
fake_input(self.display, X.KeyRelease, character)
self.display.sync()
else:
if shifted:
fake_input(self.display, X.KeyRelease, self.shift_key)
keycode = self.lookup_character_keycode(character)
fake_input(self.display, X.KeyRelease, keycode)
self.display.sync()
def special_key_assignment(self):
"""
Determines the keycodes for common special keys on the keyboard. These
are integer values and can be passed to the other key methods.
Generally speaking, these are non-printable codes.
"""
#This set of keys compiled using the X11 keysymdef.h file as reference
#They comprise a relatively universal set of keys, though there may be
#exceptions which may come up for other OSes and vendors. Countless
#special cases exist which are not handled here, but may be extended.
#TTY Function Keys
self.backspace_key = self.lookup_character_keycode('BackSpace')
self.tab_key = self.lookup_character_keycode('Tab')
self.linefeed_key = self.lookup_character_keycode('Linefeed')
self.clear_key = self.lookup_character_keycode('Clear')
self.return_key = self.lookup_character_keycode('Return')
self.enter_key = self.return_key # Because many keyboards call it "Enter"
self.pause_key = self.lookup_character_keycode('Pause')
self.scroll_lock_key = self.lookup_character_keycode('Scroll_Lock')
self.sys_req_key = self.lookup_character_keycode('Sys_Req')
self.escape_key = self.lookup_character_keycode('Escape')
self.delete_key = self.lookup_character_keycode('Delete')
#Modifier Keys
self.shift_l_key = self.lookup_character_keycode('Shift_L')
self.shift_r_key = self.lookup_character_keycode('Shift_R')
self.shift_key = self.shift_l_key # Default Shift is left Shift
self.alt_l_key = self.lookup_character_keycode('Alt_L')
self.alt_r_key = self.lookup_character_keycode('Alt_R')
self.alt_key = self.alt_l_key # Default Alt is left Alt
self.control_l_key = self.lookup_character_keycode('Control_L')
self.control_r_key = self.lookup_character_keycode('Control_R')
self.control_key = self.control_l_key # Default Ctrl is left Ctrl
self.caps_lock_key = self.lookup_character_keycode('Caps_Lock')
self.capital_key = self.caps_lock_key # Some may know it as Capital
self.shift_lock_key = self.lookup_character_keycode('Shift_Lock')
self.meta_l_key = self.lookup_character_keycode('Meta_L')
self.meta_r_key = self.lookup_character_keycode('Meta_R')
self.super_l_key = self.lookup_character_keycode('Super_L')
self.windows_l_key = self.super_l_key # Cross-support; also it's printed there
self.super_r_key = self.lookup_character_keycode('Super_R')
self.windows_r_key = self.super_r_key # Cross-support; also it's printed there
self.hyper_l_key = self.lookup_character_keycode('Hyper_L')
self.hyper_r_key = self.lookup_character_keycode('Hyper_R')
#Cursor Control and Motion
self.home_key = self.lookup_character_keycode('Home')
self.up_key = self.lookup_character_keycode('Up')
self.down_key = self.lookup_character_keycode('Down')
self.left_key = self.lookup_character_keycode('Left')
self.right_key = self.lookup_character_keycode('Right')
self.end_key = self.lookup_character_keycode('End')
self.begin_key = self.lookup_character_keycode('Begin')
self.page_up_key = self.lookup_character_keycode('Page_Up')
self.page_down_key = self.lookup_character_keycode('Page_Down')
self.prior_key = self.lookup_character_keycode('Prior')
self.next_key = self.lookup_character_keycode('Next')
#Misc Functions
self.select_key = self.lookup_character_keycode('Select')
self.print_key = self.lookup_character_keycode('Print')
self.print_screen_key = self.print_key # Seems to be the same thing
self.snapshot_key = self.print_key # Another name for printscreen
self.execute_key = self.lookup_character_keycode('Execute')
self.insert_key = self.lookup_character_keycode('Insert')
self.undo_key = self.lookup_character_keycode('Undo')
self.redo_key = self.lookup_character_keycode('Redo')
self.menu_key = self.lookup_character_keycode('Menu')
self.apps_key = self.menu_key # Windows...
self.find_key = self.lookup_character_keycode('Find')
self.cancel_key = self.lookup_character_keycode('Cancel')
self.help_key = self.lookup_character_keycode('Help')
self.break_key = self.lookup_character_keycode('Break')
self.mode_switch_key = self.lookup_character_keycode('Mode_switch')
self.script_switch_key = self.lookup_character_keycode('script_switch')
self.num_lock_key = self.lookup_character_keycode('Num_Lock')
#Keypad Keys: Dictionary structure
keypad = ['Space', 'Tab', 'Enter', 'F1', 'F2', 'F3', 'F4', 'Home',
'Left', 'Up', 'Right', 'Down', 'Prior', 'Page_Up', 'Next',
'Page_Down', 'End', 'Begin', 'Insert', 'Delete', 'Equal',
'Multiply', 'Add', 'Separator', 'Subtract', 'Decimal',
'Divide', 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
self.keypad_keys = {k: self.lookup_character_keycode('KP_'+str(k)) for k in keypad}
self.numpad_keys = self.keypad_keys
#Function Keys/ Auxilliary Keys
#FKeys
self.function_keys = [None] + [self.lookup_character_keycode('F'+str(i)) for i in range(1,36)]
#LKeys
self.l_keys = [None] + [self.lookup_character_keycode('L'+str(i)) for i in range(1,11)]
#RKeys
self.r_keys = [None] + [self.lookup_character_keycode('R'+str(i)) for i in range(1,16)]
#Unsupported keys from windows
self.kana_key = None
self.hangeul_key = None # old name - should be here for compatibility
self.hangul_key = None
self.junjua_key = None
self.final_key = None
self.hanja_key = None
self.kanji_key = None
self.convert_key = None
self.nonconvert_key = None
self.accept_key = None
self.modechange_key = None
self.sleep_key = None
def lookup_character_keycode(self, character):
"""
Looks up the keysym for the character then returns the keycode mapping
for that keysym.
"""
keysym = Xlib.XK.string_to_keysym(character)
if keysym == 0:
keysym = Xlib.XK.string_to_keysym(special_X_keysyms[character])
return self.display.keysym_to_keycode(keysym)
class PyKeyboardEvent(PyKeyboardEventMeta):
"""
The PyKeyboardEvent implementation for X11 systems (mostly linux). This
allows one to listen for keyboard input.
"""
def __init__(self, display=None):
self.display = Display(display)
self.display2 = Display(display)
self.ctx = self.display2.record_create_context(
0,
[record.AllClients],
[{
'core_requests': (0, 0),
'core_replies': (0, 0),
'ext_requests': (0, 0, 0, 0),
'ext_replies': (0, 0, 0, 0),
'delivered_events': (0, 0),
'device_events': (X.KeyPress, X.KeyRelease),
'errors': (0, 0),
'client_started': False,
'client_died': False,
}])
self.lock_meaning = None
#Get these dictionaries for converting keysyms and strings
self.keysym_to_string, self.string_to_keysym = self.get_translation_dicts()
#Identify and register special groups of keys
self.modifier_keycodes = {}
self.all_mod_keycodes = []
self.keypad_keycodes = []
#self.configure_keys()
#Direct access to the display's keycode-to-keysym array
#print('Keycode to Keysym map')
#for i in range(len(self.display._keymap_codes)):
# print('{0}: {1}'.format(i, self.display._keymap_codes[i]))
PyKeyboardEventMeta.__init__(self)
def run(self):
"""Begin listening for keyboard input events."""
self.state = True
if self.capture:
self.display2.screen().root.grab_keyboard(True, X.KeyPressMask | X.KeyReleaseMask, X.GrabModeAsync, X.GrabModeAsync, 0, 0, X.CurrentTime)
self.display2.record_enable_context(self.ctx, self.handler)
self.display2.record_free_context(self.ctx)
def stop(self):
"""Stop listening for keyboard input events."""
self.state = False
self.display.record_disable_context(self.ctx)
self.display.ungrab_keyboard(X.CurrentTime)
self.display.flush()
self.display2.record_disable_context(self.ctx)
self.display2.ungrab_keyboard(X.CurrentTime)
self.display2.flush()
def handler(self, reply):
"""Upper level handler of keyboard events."""
data = reply.data
while len(data):
event, data = rq.EventField(None).parse_binary_value(data, self.display.display, None, None)
if self.escape(event): # Quit if this returns True
self.stop()
else:
self._tap(event)
def _tap(self, event):
keycode = event.detail
press_bool = (event.type == X.KeyPress)
#Detect modifier states from event.state
for mod, bit in self.modifier_bits.items():
self.modifiers[mod] = event.state & bit
if keycode in self.all_mod_keycodes:
keysym = self.display.keycode_to_keysym(keycode, 0)
character = self.keysym_to_string[keysym]
else:
character = self.lookup_char_from_keycode(keycode)
#All key events get passed to self.tap()
self.tap(keycode,
character,
press=press_bool)
def lookup_char_from_keycode(self, keycode):
"""
This will conduct a lookup of the character or string associated with a
given keycode.
"""
#TODO: Logic should be strictly adapted from X11's src/KeyBind.c
#Right now the logic is based off of
#http://tronche.com/gui/x/xlib/input/keyboard-encoding.html
#Which I suspect is not the whole story and may likely cause bugs
keysym_index = 0
#TODO: Display's Keysyms per keycode count? Do I need this?
#If the Num_Lock is on, and the keycode corresponds to the keypad
if self.modifiers['Num_Lock'] and keycode in self.keypad_keycodes:
if self.modifiers['Shift'] or self.modifiers['Shift_Lock']:
keysym_index = 0
else:
keysym_index = 1
elif not self.modifiers['Shift'] and self.modifiers['Caps_Lock']:
#Use the first keysym if uppercase or uncased
#Use the uppercase keysym if the first is lowercase (second)
keysym_index = 0
keysym = self.display.keycode_to_keysym(keycode, keysym_index)
#TODO: Support Unicode, Greek, and special latin characters
if keysym & 0x7f == keysym and chr(keysym) in 'abcdefghijklmnopqrstuvwxyz':
keysym_index = 1
elif self.modifiers['Shift'] and self.modifiers['Caps_Lock']:
keysym_index = 1
keysym = self.display.keycode_to_keysym(keycode, keysym_index)
#TODO: Support Unicode, Greek, and special latin characters
if keysym & 0x7f == keysym and chr(keysym) in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
keysym_index = 0
elif self.modifiers['Shift'] or self.modifiers['Shift_Lock']:
keysym_index = 1
if self.modifiers['Mode_switch']:
keysym_index += 2
#Finally! Get the keysym
keysym = self.display.keycode_to_keysym(keycode, keysym_index)
#If the character is ascii printable, return that character
if keysym & 0x7f == keysym and self.ascii_printable(keysym):
return chr(keysym)
#If the character was not printable, look for its name
try:
char = self.keysym_to_string[keysym]
except KeyError:
print('Unable to determine character.')
print('Keycode: {0} KeySym {1}'.format(keycode, keysym))
return None
else:
return char
def escape(self, event):
if event.detail == self.lookup_character_keycode('Escape'):
return True
return False
def configure_keys(self):
"""
This function locates the keycodes corresponding to special groups of
keys and creates data structures of them for use by the PyKeyboardEvent
instance; including the keypad keys and the modifiers.
The keycodes pertaining to the keyboard modifiers are assigned by the
modifier name in a dictionary. This dictionary can be accessed in the
following manner:
self.modifier_keycodes['Shift'] # All keycodes for Shift Masking
It also assigns certain named modifiers (Alt, Num_Lock, Super), which
may be dynamically assigned to Mod1 - Mod5 on different platforms. This
should generally allow the user to do the following lookups on any
system:
self.modifier_keycodes['Alt'] # All keycodes for Alt Masking
self.modifiers['Alt'] # State of Alt mask, non-zero if "ON"
"""
modifier_mapping = self.display.get_modifier_mapping()
all_mod_keycodes = []
mod_keycodes = {}
mod_index = [('Shift', X.ShiftMapIndex), ('Lock', X.LockMapIndex),
('Control', X.ControlMapIndex), ('Mod1', X.Mod1MapIndex),
('Mod2', X.Mod2MapIndex), ('Mod3', X.Mod3MapIndex),
('Mod4', X.Mod4MapIndex), ('Mod5', X.Mod5MapIndex)]
#This gets the list of all keycodes per Modifier, assigns to name
for name, index in mod_index:
codes = [v for v in list(modifier_mapping[index]) if v]
mod_keycodes[name] = codes
all_mod_keycodes += codes
def lookup_keycode(string):
keysym = self.string_to_keysym[string]
return self.display.keysym_to_keycode(keysym)
#Dynamically assign Lock to Caps_Lock, Shift_Lock, Alt, Num_Lock, Super,
#and mode switch. Set in both mod_keycodes and self.modifier_bits
#Try to assign Lock to Caps_Lock or Shift_Lock
shift_lock_keycode = lookup_keycode('Shift_Lock')
caps_lock_keycode = lookup_keycode('Caps_Lock')
if shift_lock_keycode in mod_keycodes['Lock']:
mod_keycodes['Shift_Lock'] = [shift_lock_keycode]
self.modifier_bits['Shift_Lock'] = self.modifier_bits['Lock']
self.lock_meaning = 'Shift_Lock'
elif caps_lock_keycode in mod_keycodes['Lock']:
mod_keycodes['Caps_Lock'] = [caps_lock_keycode]
self.modifier_bits['Caps_Lock'] = self.modifier_bits['Lock']
self.lock_meaning = 'Caps_Lock'
else:
self.lock_meaning = None
#print('Lock is bound to {0}'.format(self.lock_meaning))
#Need to find out which Mod# to use for Alt, Num_Lock, Super, and
#Mode_switch
num_lock_keycodes = [lookup_keycode('Num_Lock')]
alt_keycodes = [lookup_keycode(i) for i in ['Alt_L', 'Alt_R']]
super_keycodes = [lookup_keycode(i) for i in ['Super_L', 'Super_R']]
mode_switch_keycodes = [lookup_keycode('Mode_switch')]
#Detect Mod number for Alt, Num_Lock, and Super
for name, keycodes in list(mod_keycodes.items()):
for alt_key in alt_keycodes:
if alt_key in keycodes:
mod_keycodes['Alt'] = keycodes
self.modifier_bits['Alt'] = self.modifier_bits[name]
for num_lock_key in num_lock_keycodes:
if num_lock_key in keycodes:
mod_keycodes['Num_Lock'] = keycodes
self.modifier_bits['Num_Lock'] = self.modifier_bits[name]
for super_key in super_keycodes:
if super_key in keycodes:
mod_keycodes['Super'] = keycodes
self.modifier_bits['Super'] = self.modifier_bits[name]
for mode_switch_key in mode_switch_keycodes:
if mode_switch_key in keycodes:
mod_keycodes['Mode_switch'] = keycodes
self.modifier_bits['Mode_switch'] = self.modifier_bits[name]
#Assign the mod_keycodes to a local variable for access
self.modifier_keycodes = mod_keycodes
self.all_mod_keycodes = all_mod_keycodes
#TODO: Determine if this might fail, perhaps iterate through the mapping
#and identify all keycodes with registered keypad keysyms?
#Acquire the full list of keypad keycodes
self.keypad_keycodes = []
keypad = ['Space', 'Tab', 'Enter', 'F1', 'F2', 'F3', 'F4', 'Home',
'Left', 'Up', 'Right', 'Down', 'Prior', 'Page_Up', 'Next',
'Page_Down', 'End', 'Begin', 'Insert', 'Delete', 'Equal',
'Multiply', 'Add', 'Separator', 'Subtract', 'Decimal',
'Divide', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for keyname in keypad:
keypad_keycode = self.lookup_character_keycode('KP_' + keyname)
self.keypad_keycodes.append(keypad_keycode)
def lookup_character_keycode(self, character):
"""
Looks up the keysym for the character then returns the keycode mapping
for that keysym.
"""
keysym = self.string_to_keysym.get(character, 0)
if keysym == 0:
keysym = self.string_to_keysym.get(special_X_keysyms[character], 0)
return self.display.keysym_to_keycode(keysym)
def get_translation_dicts(self):
"""
Returns dictionaries for the translation of keysyms to strings and from
strings to keysyms.
"""
keysym_to_string_dict = {}
string_to_keysym_dict = {}
#XK loads latin1 and miscellany on its own; load latin2-4 and greek
Xlib.XK.load_keysym_group('latin2')
Xlib.XK.load_keysym_group('latin3')
Xlib.XK.load_keysym_group('latin4')
Xlib.XK.load_keysym_group('greek')
#Make a standard dict and the inverted dict
for string, keysym in Xlib.XK.__dict__.items():
if string.startswith('XK_'):
string_to_keysym_dict[string[3:]] = keysym
keysym_to_string_dict[keysym] = string[3:]
return keysym_to_string_dict, string_to_keysym_dict
def ascii_printable(self, keysym):
"""
If the keysym corresponds to a non-printable ascii character this will
return False. If it is printable, then True will be returned.
ascii 11 (vertical tab) and ascii 12 are printable, chr(11) and chr(12)
will return '\x0b' and '\x0c' respectively.
"""
if 0 <= keysym < 9:
return False
elif 13 < keysym < 32:
return False
elif keysym > 126:
return False
else:
return True
| audiossis/RemoteKbM | Files/PyUserInput/pykeyboard/x11.py | Python | gpl-3.0 | 22,679 |
import theano
import theano.tensor as T
## common activation functions
sigmoid = T.nnet.sigmoid
def softmax(x):
# expected input dimensions:
# 0 = minibatches
# 1 = units
# 2 = states
r = x.reshape((x.shape[0]*x.shape[1], x.shape[2]))
# r 0 = minibatches * units
# r 1 = states
# this is the expected input for theano.nnet.softmax
s = T.nnet.softmax(r)
# reshape back to original shape
return s.reshape(x.shape)
def softmax_with_zero(x):
# expected input dimensions:
# 0 = minibatches
# 1 = units
# 2 = states
r = x.reshape((x.shape[0]*x.shape[1], x.shape[2]))
# r 0 = minibatches * units
# r 1 = states
r0 = T.concatenate([r, T.zeros_like(r)[:, 0:1]], axis=1) # add row of zeros for zero energy state
# this is the expected input for theano.nnet.softmax
p0 = T.nnet.softmax(r0)
# reshape back to original shape, but with the added state
return p0.reshape((x.shape[0], x.shape[1], x.shape[2] + 1))
| benanne/morb | morb/activation_functions.py | Python | gpl-3.0 | 1,031 |
from gchecky.controller import ControllerLevel_1, Controller
from gchecky import model as gmodel
from django.conf import settings
class GController(Controller):
def __init__(self, automatically_charge = False, *args, **kwargs):
self.automatically_charge = automatically_charge
return super(GController, self).__init__(*args, **kwargs)
def on_retrieve_order(self, order_id, context=None):
from store.models import Cart
if Cart.objects.filter(google_id = order_id).count() > 0:
return Cart.objects.get(google_id = order_id)
return None
def handle_new_order(self, message, order_id, order, context):
from store.models import Cart
#XXX set the Cart's cart_xml and google_id here, have to get that somehow
cart = Cart.objects.get(pk = int(message.shopping_cart.merchant_private_data))
cart.google_id = order_id,
cart.cart_xml = message.toxml()
cart.state = message.fulfillment_order_state
cart.payment = message.financial_order_state
cart.save()
return gmodel.ok_t()
def handle_order_state_change(self, message, order_id, order, context):
assert order is not None
if message.new_fulfillment_order_state != message.previous_fulfillment_order_state:
order.state = message.new_fulfillment_order_state
if message.new_financial_order_state != message.previous_financial_order_state:
order.state = message.new_financial_order_state
order.save
if order.state == 'NEW' and order.payment == 'CHARGEABLE':
self.charge_order(order_id, order.get_total())
return gmodel.ok_t()
def handle_charge_amount(self, message, order_id, order, context):
assert order is not None
if order.state == 'NEW':
order.state = 'PROCESSING'
order.local_status = '0'
order.is_active = None
self.process_order(order_id)
order.save()
return gmodel.ok_t()
def handle_chargeback_amount(self, message, order_id, order, context):
pass
def on_xml_sent(self, context):
self.__log(context = context, tag = 'to')
def on_xml_received(self, context):
self.__log(context = context, tag = 'from')
def __log(self, context, tag, error = None, description = None):
from store.models import Message
cart = None
if context.order_id is not None:
cart = self.on_retrieve_order(order_id = context.order_id, context = context)
else:
context.serial = 'PLACEHOLDER' #XXX
message = Message(cart = cart, serial = context.serial, tag = tag, input_xml = context.xml, output_xml = context.response_xml, error = error, description = description)
message.save()
__controller__ = None
def get_controller():
from store.checkout import __controller__
if __controller__ is None:
#__controller__ = ControllerLevel_1(
__controller__ = GController(
vendor_id = settings.CHECKOUT_VENDOR_ID,
merchant_key = settings.CHECKOUT_MERCHANT_KEY,
currency = settings.CHECKOUT_CURRENCY,
is_sandbox = settings.CHECKOUT_IS_SANDBOX,#)
automatically_charge = False)
return __controller__
| BlessedEj/objets-d-art | store/checkout.py | Python | gpl-3.0 | 3,341 |
# shipMiningBonusOREfrig1
#
# Used by:
# Variations of ship: Venture (3 of 3)
type = "passive"
def handler(fit, module, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining"),
"miningAmount", module.getModifiedItemAttr("shipBonusOREfrig1"),
skill="Mining Frigate")
| Ebag333/Pyfa | eos/effects/shipminingbonusorefrig1.py | Python | gpl-3.0 | 369 |
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1026"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'about.ui'
#
# Created: Tue Jan 10 15:56:58 2012
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_About(object):
def setupUi(self, About):
About.setObjectName("About")
About.resize(462, 249)
self.verticalLayoutWidget = QtGui.QWidget(About)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 0, 441, 208))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.version = QtGui.QLabel(self.verticalLayoutWidget)
self.version.setObjectName("version")
self.verticalLayout.addWidget(self.version)
self.retranslateUi(About)
QtCore.QMetaObject.connectSlotsByName(About)
def retranslateUi(self, About):
About.setWindowTitle(QtGui.QApplication.translate("About", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("About", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'DejaVu Sans\'; font-size:10pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><img src=\":/ete icons/ete_logo.png\" /></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-weight:600;\"><span style=\" font-size:11pt;\">ETE: a python Environment for Tree Exploration</span></p>\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:11pt; font-weight:600;\"><a href=\"http://ete.cgenomics.org\"><span style=\" text-decoration: underline; color:#0057ae;\">http://ete.cgenomics.org</span></a></p>\n"
"<p align=\"center\" style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.version.setText(QtGui.QApplication.translate("About", "VERSION", None, QtGui.QApplication.UnicodeUTF8))
import ete_resources_rc
| zhangjiajie/tax_benchmark | script/ete2/treeview/_about.py | Python | gpl-3.0 | 4,290 |
[10, [0, 2, 172, 14, 8, 9, 10]] # 10 u'\n' 10
[32, [0, 2, 96, 14, 8, 250, 248]] # 32 u' ' 32
[33, [0, 2, 14, 108, 1, 20, 2, 20, 1, 68, 2, 32, 108, 14, 8, 254, 253]] # 33 u'!' 33
[34, [0, 2, 14, 8, 255, 250, 68, 1, 35, 2, 16, 1, 43, 2, 76, 48, 14, 8, 253, 1]] # 34 u'"' 34
[35, [0, 2, 14, 8, 254, 250, 36, 1, 64, 2, 36, 1, 72, 2, 35, 1, 108, 2, 32, 1, 100, 2, 109, 14, 8, 252, 253]] # 35 u'#' 35
[36, [0, 2, 14, 8, 254, 250, 20, 1, 48, 18, 22, 40, 22, 18, 48, 2, 39, 1, 108, 2, 64, 14, 8, 252, 253]] # 36 u'$' 36
[37, [0, 2, 14, 8, 254, 250, 100, 1, 28, 16, 20, 24, 2, 64, 1, 8, 252, 250, 2, 64, 1, 24, 20, 16, 28, 2, 32, 14, 8, 252, 253]] # 37 u'%' 37
[38, [0, 2, 14, 8, 254, 250, 65, 1, 42, 24, 22, 20, 34, 20, 22, 26, 28, 78, 2, 32, 14, 8, 252, 253]] # 38 u'&' 38
[39, [0, 2, 14, 3, 2, 14, 8, 255, 244, 14, 4, 2, 68, 1, 35, 2, 108, 32, 14, 3, 2, 14, 8, 251, 2, 14, 4, 2]] # 39 u"'" 39
[40, [0, 2, 14, 8, 255, 250, 100, 32, 1, 42, 44, 46, 2, 32, 14, 58]] # 40 u'(' 40
[41, [0, 2, 14, 8, 255, 250, 100, 1, 46, 44, 42, 2, 64, 14, 58]] # 41 u')' 41
[42, [0, 2, 14, 8, 254, 251, 33, 1, 68, 2, 46, 1, 72, 2, 65, 1, 74, 2, 68, 1, 78, 2, 47, 14, 8, 252, 253]] # 42 u'*' 42
[43, [0, 2, 14, 8, 254, 251, 33, 1, 68, 2, 46, 1, 72, 2, 111, 14, 8, 252, 253]] # 43 u'+' 43
[44, [0, 2, 14, 3, 2, 14, 43, 14, 4, 2, 20, 16, 1, 28, 26, 2, 18, 32, 14, 3, 2, 14, 8, 253, 248, 14, 4, 2]] # 44 u',' 44
[45, [0, 2, 14, 8, 254, 253, 52, 1, 64, 2, 32, 60, 14, 72]] # 45 u'-' 45
[46, [0, 2, 14, 28, 1, 20, 2, 47, 14, 8, 254, 253]] # 46 u'.' 46
[47, [0, 2, 14, 8, 254, 250, 1, 8, 4, 6, 2, 32, 108, 14, 8, 252, 253]] # 47 u'/' 47
[48, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 16, 1, 22, 68, 18, 16, 30, 76, 26, 24, 2, 64, 14, 3, 2, 14, 8, 249, 250, 14, 4, 2]] # 48 u'0' 48
[49, [0, 2, 14, 8, 255, 250, 84, 1, 18, 108, 2, 24, 1, 32, 2, 32, 14, 58]] # 49 u'1' 49
[50, [0, 2, 14, 8, 254, 250, 84, 1, 18, 32, 30, 28, 26, 40, 26, 44, 64, 2, 32, 14, 8, 252, 253]] # 50 u'2' 50
[51, [0, 2, 14, 8, 254, 250, 84, 1, 18, 32, 30, 28, 26, 24, 2, 16, 1, 30, 28, 26, 40, 22, 2, 28, 96, 14, 8, 252, 253]] # 51 u'3' 51
[52, [0, 2, 14, 8, 254, 250, 65, 1, 72, 8, 3, 4, 108, 2, 48, 14, 8, 252, 253]] # 52 u'4' 52
[53, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 36, 22, 56, 36, 64, 2, 32, 108, 14, 8, 252, 253]] # 53 u'5' 53
[54, [0, 2, 14, 8, 254, 250, 52, 1, 48, 30, 28, 26, 40, 22, 52, 34, 16, 2, 48, 108, 14, 8, 252, 253]] # 54 u'6' 54
[55, [0, 2, 14, 8, 254, 250, 100, 1, 64, 107, 2, 80, 14, 8, 252, 253]] # 55 u'7' 55
[56, [0, 2, 14, 8, 254, 250, 16, 1, 22, 20, 18, 32, 18, 20, 22, 40, 26, 28, 30, 2, 32, 1, 30, 28, 26, 40, 2, 80, 14, 8, 252, 253]] # 56 u'8' 56
[57, [0, 2, 14, 8, 254, 250, 16, 1, 16, 34, 52, 22, 40, 26, 28, 30, 48, 2, 32, 60, 14, 8, 252, 253]] # 57 u'9' 57
[58, [0, 2, 14, 76, 68, 1, 28, 2, 28, 1, 28, 2, 47, 14, 8, 254, 253]] # 58 u':' 58
[59, [0, 2, 14, 3, 2, 14, 8, 255, 248, 14, 4, 2, 16, 68, 1, 28, 2, 28, 1, 44, 26, 2, 18, 32, 14, 3, 2, 14, 8, 251, 248, 14, 4, 2]] # 59 u';' 59
[60, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 100, 48, 1, 58, 62, 2, 32, 14, 3, 2, 14, 8, 251, 250, 14, 4, 2]] # 60 u'<' 60
[61, [0, 2, 14, 75, 68, 1, 64, 2, 44, 1, 72, 2, 96, 44, 14, 8, 252, 255]] # 61 u'=' 61
[62, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 100, 1, 62, 58, 2, 80, 14, 3, 2, 14, 8, 249, 250, 14, 4, 2]] # 62 u'>' 62
[63, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 84, 1, 18, 16, 30, 28, 26, 28, 2, 28, 1, 28, 2, 48, 14, 3, 2, 14, 8, 249, 250, 14, 4, 2]] # 63 u'?' 63
[64, [0, 2, 14, 8, 254, 250, 50, 1, 26, 24, 20, 18, 16, 44, 18, 36, 22, 40, 26, 76, 30, 48, 2, 32, 14, 8, 252, 253]] # 64 u'@' 64
[65, [0, 2, 14, 8, 254, 250, 1, 36, 67, 77, 44, 2, 71, 1, 64, 2, 46, 14, 8, 252, 253]] # 65 u'A' 65
[66, [0, 2, 14, 8, 254, 250, 1, 48, 18, 20, 22, 40, 2, 32, 1, 18, 20, 22, 56, 2, 16, 1, 108, 2, 80, 14, 8, 252, 253]] # 66 u'B' 66
[67, [0, 2, 14, 8, 254, 250, 64, 20, 1, 26, 40, 22, 68, 18, 32, 30, 2, 46, 60, 14, 8, 252, 253]] # 67 u'C' 67
[68, [0, 2, 14, 8, 254, 250, 1, 48, 18, 68, 22, 56, 2, 16, 1, 108, 2, 80, 14, 8, 252, 253]] # 68 u'D' 68
[69, [0, 2, 14, 8, 254, 250, 1, 100, 64, 2, 72, 60, 1, 32, 2, 40, 60, 1, 64, 2, 32, 14, 8, 252, 253]] # 69 u'E' 69
[70, [0, 2, 14, 8, 254, 250, 1, 100, 64, 2, 72, 60, 1, 32, 2, 60, 64, 14, 8, 252, 253]] # 70 u'F' 70
[71, [0, 2, 14, 8, 254, 250, 50, 1, 16, 60, 56, 22, 68, 18, 48, 2, 32, 108, 14, 8, 252, 253]] # 71 u'G' 71
[72, [0, 2, 14, 8, 254, 250, 1, 100, 2, 60, 1, 64, 2, 52, 1, 108, 2, 32, 14, 8, 252, 253]] # 72 u'H' 72
[73, [0, 2, 14, 8, 255, 250, 100, 1, 32, 2, 24, 1, 108, 2, 24, 1, 32, 2, 32, 14, 58]] # 73 u'I' 73
[74, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 84, 2, 32, 108, 14, 8, 252, 253]] # 74 u'J' 74
[75, [0, 2, 14, 8, 254, 250, 1, 100, 2, 64, 1, 58, 24, 2, 16, 1, 62, 2, 32, 14, 8, 252, 253]] # 75 u'K' 75
[76, [0, 2, 14, 8, 254, 250, 100, 1, 108, 64, 2, 32, 14, 8, 252, 253]] # 76 u'L' 76
[77, [0, 2, 14, 8, 254, 250, 1, 100, 77, 67, 108, 2, 32, 14, 8, 252, 253]] # 77 u'M' 77
[78, [0, 2, 14, 8, 254, 250, 1, 100, 8, 4, 250, 100, 2, 108, 32, 14, 8, 252, 253]] # 78 u'N' 78
[79, [0, 2, 14, 8, 254, 250, 1, 100, 64, 108, 72, 2, 96, 14, 8, 252, 253]] # 79 u'O' 79
[80, [0, 2, 14, 8, 254, 250, 1, 100, 48, 30, 28, 26, 56, 2, 111, 14, 8, 252, 253]] # 80 u'P' 80
[81, [0, 2, 14, 8, 254, 250, 34, 1, 30, 26, 24, 22, 68, 18, 32, 30, 60, 26, 30, 2, 32, 14, 8, 252, 253]] # 81 u'Q' 81
[82, [0, 2, 14, 8, 254, 250, 1, 100, 48, 30, 28, 26, 56, 2, 16, 1, 62, 2, 32, 14, 8, 252, 253]] # 82 u'R' 82
[83, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 70, 18, 32, 30, 2, 32, 92, 14, 8, 252, 253]] # 83 u'S' 83
[84, [0, 2, 14, 8, 254, 250, 100, 1, 64, 2, 40, 1, 108, 2, 64, 14, 8, 252, 253]] # 84 u'T' 84
[85, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 32, 108, 14, 8, 252, 253]] # 85 u'U' 85
[86, [0, 2, 14, 107, 100, 1, 109, 99, 2, 32, 108, 14, 8, 251, 253]] # 86 u'V' 86
[87, [0, 2, 14, 107, 100, 1, 9, 2, 250, 1, 3, 1, 253, 2, 6, 0, 0, 2, 32, 108, 14, 8, 251, 253]] # 87 u'W' 87
[88, [0, 2, 14, 8, 254, 250, 1, 8, 4, 6, 2, 72, 1, 8, 4, 250, 2, 32, 14, 8, 252, 253]] # 88 u'X' 88
[89, [0, 2, 14, 8, 254, 250, 100, 1, 8, 2, 253, 60, 2, 52, 1, 8, 2, 3, 2, 32, 108, 14, 8, 252, 253]] # 89 u'Y' 89
[90, [0, 2, 14, 8, 254, 250, 100, 1, 64, 8, 252, 250, 64, 2, 32, 14, 8, 252, 253]] # 90 u'Z' 90
[91, [0, 2, 14, 8, 255, 250, 1, 100, 32, 2, 108, 1, 40, 2, 64, 14, 58]] # 91 u'[' 91
[92, [0, 2, 14, 8, 254, 250, 100, 1, 8, 4, 250, 2, 32, 14, 8, 252, 253]] # 92 u'\\' 92
[93, [0, 2, 14, 8, 255, 250, 100, 1, 32, 108, 40, 2, 64, 14, 58]] # 93 u']' 93
[94, [0, 2, 14, 8, 254, 250, 68, 1, 34, 46, 2, 77, 14, 8, 252, 1]] # 94 u'^' 94
[95, [0, 2, 14, 40, 28, 1, 64, 2, 33, 14, 74]] # 95 u'_' 95
[96, [0, 2, 14, 3, 2, 14, 8, 255, 244, 14, 4, 2, 100, 1, 45, 2, 77, 14, 3, 2, 14, 8, 251, 2, 14, 4, 2]] # 96 u'`' 96
[97, [0, 2, 14, 75, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 32, 14, 8, 252, 253]] # 97 u'a' 97
[98, [0, 2, 14, 8, 254, 250, 1, 100, 2, 76, 1, 34, 16, 30, 44, 26, 24, 38, 2, 44, 96, 14, 8, 252, 253]] # 98 u'b' 98
[99, [0, 2, 14, 75, 66, 1, 56, 26, 44, 30, 48, 2, 32, 14, 8, 252, 253]] # 99 u'c' 99
[100, [0, 2, 14, 8, 254, 250, 65, 1, 42, 24, 22, 36, 18, 16, 46, 2, 68, 1, 108, 2, 32, 14, 8, 252, 253]] # 100 u'd' 100
[101, [0, 2, 14, 75, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 48, 14, 8, 252, 253]] # 101 u'e' 101
[102, [0, 2, 14, 8, 254, 250, 52, 1, 48, 2, 35, 1, 22, 24, 26, 92, 2, 80, 14, 8, 252, 253]] # 102 u'f' 102
[103, [0, 2, 14, 75, 28, 1, 30, 32, 18, 68, 22, 40, 26, 44, 30, 48, 2, 32, 14, 8, 252, 251]] # 103 u'g' 103
[104, [0, 2, 14, 8, 254, 250, 1, 100, 2, 76, 1, 34, 16, 30, 60, 2, 32, 14, 8, 252, 253]] # 104 u'h' 104
[105, [0, 2, 14, 108, 1, 68, 2, 20, 1, 20, 2, 32, 108, 14, 8, 254, 253]] # 105 u'i' 105
[106, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 28, 1, 30, 16, 18, 84, 2, 20, 1, 20, 2, 32, 108, 14, 3, 2, 14, 8, 249, 246, 14, 4, 2]] # 106 u'j' 106
[107, [0, 2, 14, 8, 254, 250, 1, 100, 2, 76, 1, 32, 34, 2, 42, 1, 46, 2, 32, 14, 8, 252, 253]] # 107 u'k' 107
[108, [0, 2, 14, 3, 2, 14, 8, 255, 244, 14, 4, 2, 100, 1, 92, 30, 2, 32, 14, 3, 2, 14, 8, 251, 250, 14, 4, 2]] # 108 u'l' 108
[109, [0, 2, 14, 75, 1, 68, 2, 28, 1, 18, 30, 28, 2, 20, 1, 18, 30, 60, 2, 32, 14, 8, 252, 253]] # 109 u'm' 109
[110, [0, 2, 14, 8, 254, 250, 1, 68, 2, 44, 1, 34, 16, 30, 60, 2, 32, 14, 8, 252, 253]] # 110 u'n' 110
[111, [0, 2, 14, 75, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 48, 14, 8, 252, 253]] # 111 u'o' 111
[112, [0, 2, 14, 75, 44, 1, 100, 2, 28, 1, 18, 32, 30, 44, 26, 56, 2, 96, 14, 8, 252, 251]] # 112 u'p' 112
[113, [0, 2, 14, 75, 79, 1, 100, 2, 28, 1, 22, 40, 26, 44, 30, 48, 2, 32, 14, 8, 252, 251]] # 113 u'q' 113
[114, [0, 2, 14, 75, 1, 68, 2, 44, 1, 34, 16, 30, 2, 32, 60, 14, 8, 252, 253]] # 114 u'r' 114
[115, [0, 2, 14, 75, 1, 48, 18, 22, 40, 22, 18, 48, 2, 77, 14, 8, 252, 253]] # 115 u's' 115
[116, [0, 2, 14, 8, 254, 250, 68, 1, 64, 2, 38, 1, 92, 30, 18, 2, 47, 14, 8, 252, 253]] # 116 u't' 116
[117, [0, 2, 14, 75, 68, 1, 60, 30, 16, 34, 2, 36, 1, 76, 2, 32, 14, 8, 252, 253]] # 117 u'u' 117
[118, [0, 2, 14, 75, 68, 1, 77, 67, 2, 77, 14, 8, 252, 253]] # 118 u'v' 118
[119, [0, 2, 14, 75, 68, 1, 9, 1, 252, 1, 4, 1, 252, 1, 4, 0, 0, 2, 77, 14, 8, 252, 253]] # 119 u'w' 119
[120, [0, 2, 14, 75, 1, 66, 2, 72, 1, 78, 2, 32, 14, 8, 252, 253]] # 120 u'x' 120
[121, [0, 2, 14, 75, 68, 1, 77, 2, 67, 1, 107, 24, 2, 36, 96, 14, 8, 252, 251]] # 121 u'y' 121
[122, [0, 2, 14, 75, 68, 1, 64, 74, 64, 2, 32, 14, 8, 252, 253]] # 122 u'z' 122
[123, [0, 2, 14, 8, 255, 250, 100, 32, 1, 26, 28, 26, 30, 28, 30, 2, 32, 14, 58]] # 123 u'{' 123
[124, [0, 2, 14, 108, 1, 100, 2, 108, 32, 14, 8, 254, 253]] # 124 u'|' 124
[125, [0, 2, 14, 8, 255, 250, 1, 18, 20, 18, 22, 20, 22, 2, 108, 64, 14, 58]] # 125 u'}' 125
[126, [0, 2, 14, 75, 52, 1, 18, 47, 18, 2, 77, 14, 8, 252, 254]] # 126 u'~' 126
[128, [0, 7, 32, 172]] # 128 u'\x80' 128
[160, [0, 2, 96, 14, 8, 250, 248]] # 160 u'\xa0' 160
[161, [0, 2, 14, 108, 1, 68, 2, 20, 1, 20, 2, 8, 2, 250, 14, 8, 254, 253]] # 161 u'\xa1' 161
[162, [0, 2, 14, 8, 254, 251, 30, 1, 8, 2, 6, 2, 30, 1, 56, 26, 44, 30, 48, 2, 32, 14, 74]] # 162 u'\xa2' 162
[163, [0, 2, 14, 8, 254, 250, 64, 1, 72, 18, 68, 18, 30, 2, 43, 1, 40, 2, 111, 14, 8, 252, 253]] # 163 u'\xa3' 163
[165, [0, 2, 14, 8, 254, 250, 100, 1, 8, 2, 253, 60, 2, 37, 1, 32, 2, 39, 1, 32, 2, 24, 1, 8, 2, 3, 2, 8, 2, 250, 14, 8, 252, 253]] # 165 u'\xa5' 165
[167, [0, 2, 14, 3, 2, 14, 8, 253, 244, 14, 4, 2, 20, 1, 30, 16, 18, 22, 24, 22, 18, 2, 33, 1, 22, 24, 26, 30, 16, 30, 26, 2, 8, 3, 254, 14, 3, 2, 14, 8, 249, 250, 14, 4, 2]] # 167 u'\xa7' 167
[170, [0, 2, 14, 8, 255, 250, 2, 3, 2, 8, 3, 9, 1, 26, 24, 22, 36, 18, 16, 30, 44, 30, 2, 73, 1, 64, 2, 4, 2, 8, 2, 253, 14, 8, 253, 1]] # 170 u'\xaa' 170
[171, [0, 2, 14, 8, 254, 251, 33, 1, 38, 34, 2, 32, 1, 42, 46, 2, 47, 14, 8, 252, 254]] # 171 u'\xab' 171
[176, [0, 2, 14, 8, 255, 250, 84, 1, 18, 30, 26, 22, 2, 92, 64, 14, 8, 253, 2]] # 176 u'\xb0' 176
[177, [0, 2, 14, 8, 254, 250, 20, 1, 64, 2, 39, 1, 68, 2, 42, 1, 64, 2, 77, 14, 8, 252, 254]] # 177 u'\xb1' 177
[181, [0, 2, 14, 75, 44, 1, 8, 1, 6, 1, 60, 30, 16, 35, 2, 36, 1, 76, 2, 32, 14, 8, 252, 251]] # 181 u'\xb5' 181
[186, [0, 2, 14, 8, 255, 250, 3, 2, 2, 8, 3, 12, 1, 40, 26, 44, 30, 32, 18, 36, 22, 2, 107, 1, 64, 2, 4, 2, 8, 2, 253, 14, 8, 253, 1]] # 186 u'\xba' 186
[187, [0, 2, 14, 8, 254, 251, 20, 1, 34, 38, 2, 32, 1, 46, 42, 2, 64, 28, 14, 8, 252, 254]] # 187 u'\xbb' 187
[188, [0, 2, 3, 2, 14, 8, 251, 244, 1, 8, 10, 12, 2, 8, 248, 250, 1, 100, 26, 2, 92, 1, 32, 2, 8, 6, 252, 1, 72, 8, 3, 4, 108, 2, 96, 14, 8, 247, 250, 4, 2]] # 188 u'\xbc' 188
[189, [0, 2, 3, 2, 14, 8, 251, 244, 1, 8, 10, 12, 2, 8, 248, 250, 1, 100, 26, 2, 92, 1, 32, 2, 8, 3, 255, 1, 18, 32, 30, 28, 26, 40, 26, 44, 64, 2, 64, 14, 8, 247, 250, 4, 2]] # 189 u'\xbd' 189
[191, [0, 2, 3, 2, 14, 8, 253, 244, 8, 6, 2, 1, 42, 40, 38, 36, 34, 36, 2, 36, 1, 36, 2, 8, 8, 244, 14, 8, 249, 250, 4, 2]] # 191 u'\xbf' 191
[192, [0, 2, 14, 8, 254, 250, 1, 36, 34, 46, 44, 2, 8, 252, 1, 1, 64, 2, 8, 254, 4, 1, 39, 2, 8, 6, 250, 14, 8, 252, 253]] # 192 u'\xc0' 192
[193, [0, 2, 14, 8, 254, 250, 1, 36, 34, 46, 44, 2, 8, 252, 1, 1, 64, 2, 8, 254, 4, 1, 33, 2, 8, 2, 250, 14, 8, 252, 253]] # 193 u'\xc1' 193
[194, [0, 2, 14, 8, 254, 250, 1, 36, 34, 46, 44, 2, 8, 252, 1, 1, 64, 2, 8, 252, 3, 1, 34, 46, 2, 8, 2, 252, 14, 8, 252, 253]] # 194 u'\xc2' 194
[195, [0, 2, 14, 8, 254, 250, 1, 36, 34, 46, 44, 2, 8, 252, 1, 1, 64, 2, 8, 252, 4, 1, 18, 47, 18, 2, 8, 2, 250, 14, 8, 252, 253]] # 195 u'\xc3' 195
[196, [0, 2, 14, 8, 254, 250, 1, 36, 67, 2, 41, 1, 20, 2, 64, 1, 28, 2, 39, 1, 77, 44, 2, 71, 1, 64, 2, 46, 14, 8, 252, 253]] # 196 u'\xc4' 196
[197, [0, 2, 14, 8, 254, 250, 1, 36, 50, 22, 26, 62, 44, 2, 8, 252, 1, 1, 64, 2, 47, 14, 8, 252, 253]] # 197 u'\xc5' 197
[198, [0, 2, 14, 8, 254, 250, 1, 52, 8, 2, 3, 32, 2, 8, 254, 253, 1, 32, 2, 60, 1, 40, 100, 2, 75, 1, 32, 2, 79, 14, 8, 252, 253]] # 198 u'\xc6' 198
[199, [0, 2, 14, 8, 254, 250, 46, 1, 16, 20, 24, 20, 2, 33, 1, 26, 40, 22, 68, 18, 32, 30, 2, 46, 60, 14, 8, 252, 251]] # 199 u'\xc7' 199
[200, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 22, 1, 39, 2, 76, 24, 1, 32, 2, 42, 1, 64, 2, 32, 14, 8, 252, 253]] # 200 u'\xc8' 200
[201, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 37, 1, 41, 2, 60, 24, 1, 32, 2, 42, 1, 64, 2, 32, 14, 8, 252, 253]] # 201 u'\xc9' 201
[202, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 22, 1, 22, 26, 2, 60, 24, 1, 32, 2, 42, 1, 64, 2, 32, 14, 8, 252, 253]] # 202 u'\xca' 202
[203, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 22, 1, 20, 2, 40, 1, 28, 2, 60, 24, 1, 32, 2, 42, 1, 64, 2, 32, 14, 8, 252, 253]] # 203 u'\xcb' 203
[204, [0, 2, 14, 8, 255, 250, 68, 1, 32, 2, 20, 1, 39, 2, 45, 1, 76, 2, 24, 1, 32, 2, 32, 14, 58]] # 204 u'\xcc' 204
[205, [0, 2, 14, 8, 255, 250, 68, 1, 32, 2, 36, 1, 41, 2, 30, 1, 76, 2, 24, 1, 32, 2, 32, 14, 58]] # 205 u'\xcd' 205
[206, [0, 2, 14, 8, 255, 250, 68, 1, 32, 2, 20, 1, 22, 26, 2, 30, 1, 76, 2, 24, 1, 32, 2, 32, 14, 58]] # 206 u'\xce' 206
[207, [0, 2, 14, 8, 255, 250, 68, 1, 32, 2, 20, 1, 20, 2, 40, 1, 28, 2, 30, 1, 76, 2, 24, 1, 32, 2, 32, 14, 58]] # 207 u'\xcf' 207
[208, [0, 2, 14, 8, 254, 250, 1, 100, 48, 30, 76, 26, 56, 2, 36, 21, 1, 32, 2, 29, 79, 14, 8, 252, 253]] # 208 u'\xd0' 208
[209, [0, 2, 14, 8, 254, 250, 1, 68, 78, 68, 2, 72, 20, 1, 18, 47, 18, 2, 8, 2, 250, 14, 8, 252, 253]] # 209 u'\xd1' 209
[210, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 22, 1, 39, 2, 44, 48, 1, 76, 72, 2, 96, 14, 8, 252, 253]] # 210 u'\xd2' 210
[211, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 37, 1, 41, 2, 28, 48, 1, 76, 72, 2, 96, 14, 8, 252, 253]] # 211 u'\xd3' 211
[212, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 22, 1, 22, 26, 2, 28, 48, 1, 76, 72, 2, 96, 14, 8, 252, 253]] # 212 u'\xd4' 212
[213, [0, 2, 14, 8, 254, 250, 1, 68, 64, 2, 36, 1, 26, 39, 26, 2, 28, 64, 1, 76, 72, 2, 96, 14, 8, 252, 253]] # 213 u'\xd5' 213
[214, [0, 2, 14, 8, 254, 250, 1, 68, 2, 18, 1, 20, 2, 43, 1, 64, 2, 22, 1, 20, 2, 45, 1, 76, 72, 2, 96, 14, 8, 252, 253]] # 214 u'\xd6' 214
[216, [0, 2, 14, 8, 254, 250, 1, 8, 4, 6, 2, 24, 1, 30, 76, 26, 40, 22, 68, 18, 32, 2, 8, 3, 250, 14, 8, 252, 253]] # 216 u'\xd8' 216
[217, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 26, 1, 39, 2, 80, 108, 14, 8, 252, 253]] # 217 u'\xd9' 217
[218, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 24, 1, 41, 2, 80, 92, 14, 8, 252, 253]] # 218 u'\xda' 218
[219, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 26, 1, 22, 26, 2, 80, 92, 14, 8, 252, 253]] # 219 u'\xdb' 219
[220, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 26, 1, 20, 2, 40, 1, 28, 2, 94, 14, 8, 252, 253]] # 220 u'\xdc' 220
[221, [0, 2, 14, 8, 254, 250, 68, 1, 46, 44, 2, 36, 1, 34, 2, 37, 1, 41, 2, 80, 92, 14, 8, 252, 253]] # 221 u'\xdd' 221
[222, [0, 2, 14, 8, 254, 250, 44, 1, 132, 2, 60, 1, 18, 32, 30, 44, 26, 40, 22, 2, 8, 6, 255, 14, 8, 252, 251]] # 222 u'\xde' 222
[223, [0, 2, 14, 8, 254, 250, 1, 18, 68, 18, 16, 30, 28, 26, 30, 28, 26, 24, 2, 64, 14, 8, 252, 253]] # 223 u'\xdf' 223
[224, [0, 2, 14, 8, 254, 250, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 2, 3, 2, 8, 253, 10, 1, 71, 2, 8, 11, 244, 4, 2, 14, 8, 252, 253]] # 224 u'\xe0' 224
[225, [0, 2, 14, 8, 254, 250, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 2, 3, 2, 8, 253, 12, 1, 73, 2, 8, 11, 246, 4, 2, 14, 8, 252, 253]] # 225 u'\xe1' 225
[226, [0, 2, 14, 8, 254, 250, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 2, 3, 2, 8, 253, 10, 1, 38, 42, 2, 8, 11, 246, 4, 2, 14, 8, 252, 253]] # 226 u'\xe2' 226
[227, [0, 2, 14, 8, 254, 250, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 3, 2, 8, 249, 10, 1, 18, 47, 18, 2, 8, 7, 245, 4, 2, 14, 8, 252, 253]] # 227 u'\xe3' 227
[228, [0, 2, 14, 8, 254, 250, 32, 1, 24, 22, 36, 2, 52, 1, 28, 2, 48, 1, 20, 2, 58, 1, 18, 16, 30, 44, 26, 2, 18, 1, 30, 2, 32, 14, 8, 252, 253]] # 228 u'\xe4' 228
[229, [0, 2, 14, 8, 254, 250, 3, 2, 8, 3, 8, 1, 34, 38, 42, 46, 2, 8, 3, 250, 4, 2, 1, 26, 24, 22, 36, 18, 16, 30, 44, 30, 2, 32, 14, 8, 252, 253]] # 229 u'\xe5' 229
[230, [0, 2, 14, 75, 33, 1, 26, 22, 36, 18, 30, 60, 2, 52, 1, 18, 30, 28, 40, 2, 28, 1, 30, 16, 2, 32, 14, 8, 252, 253]] # 230 u'\xe6' 230
[231, [0, 2, 14, 75, 66, 1, 56, 26, 44, 30, 48, 2, 40, 1, 28, 16, 28, 24, 2, 65, 14, 8, 252, 251]] # 231 u'\xe7' 231
[232, [0, 2, 14, 8, 254, 250, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 84, 1, 39, 2, 80, 108, 14, 8, 252, 253]] # 232 u'\xe8' 232
[233, [0, 2, 14, 8, 254, 250, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 100, 1, 41, 2, 94, 14, 8, 252, 253]] # 233 u'\xe9' 233
[234, [0, 2, 14, 8, 254, 250, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 84, 1, 22, 26, 2, 94, 14, 8, 252, 253]] # 234 u'\xea' 234
[235, [0, 2, 14, 8, 254, 250, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 100, 16, 1, 28, 2, 72, 1, 20, 2, 110, 14, 8, 252, 253]] # 235 u'\xeb' 235
[236, [0, 2, 14, 8, 255, 250, 16, 1, 68, 2, 37, 1, 47, 2, 32, 92, 14, 58]] # 236 u'\xec' 236
[237, [0, 2, 14, 8, 255, 250, 16, 1, 68, 2, 22, 1, 33, 2, 32, 108, 14, 58]] # 237 u'\xed' 237
[238, [0, 2, 14, 8, 255, 250, 16, 1, 68, 2, 22, 1, 18, 30, 2, 32, 92, 14, 58]] # 238 u'\xee' 238
[239, [0, 2, 14, 8, 255, 250, 16, 1, 68, 2, 22, 1, 20, 2, 32, 1, 28, 2, 32, 92, 14, 58]] # 239 u'\xef' 239
[240, [0, 2, 14, 8, 254, 250, 8, 3, 4, 1, 40, 26, 44, 30, 32, 18, 36, 54, 2, 28, 1, 33, 2, 8, 3, 250, 14, 8, 252, 253]] # 240 u'\xf0' 240
[241, [0, 2, 14, 8, 254, 250, 1, 68, 2, 20, 1, 18, 47, 18, 2, 74, 1, 34, 16, 30, 60, 2, 32, 14, 8, 252, 253]] # 241 u'\xf1' 241
[242, [0, 2, 14, 8, 254, 250, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 84, 1, 39, 2, 80, 108, 14, 8, 252, 253]] # 242 u'\xf2' 242
[243, [0, 2, 14, 8, 254, 250, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 100, 1, 41, 2, 94, 14, 8, 252, 253]] # 243 u'\xf3' 243
[244, [0, 2, 14, 8, 254, 250, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 84, 1, 22, 26, 2, 94, 14, 8, 252, 253]] # 244 u'\xf4' 244
[245, [0, 2, 14, 8, 254, 250, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 8, 253, 5, 1, 18, 47, 18, 2, 8, 2, 250, 14, 8, 252, 253]] # 245 u'\xf5' 245
[246, [0, 2, 14, 8, 254, 250, 48, 1, 40, 22, 36, 2, 52, 1, 28, 2, 64, 1, 20, 2, 8, 252, 253, 1, 18, 32, 30, 44, 26, 2, 48, 14, 8, 252, 253]] # 246 u'\xf6' 246
[247, [0, 2, 14, 8, 254, 251, 33, 1, 20, 2, 33, 1, 72, 2, 34, 1, 28, 2, 78, 14, 8, 252, 254]] # 247 u'\xf7' 247
[248, [0, 2, 14, 75, 16, 1, 32, 18, 36, 22, 40, 26, 44, 30, 2, 24, 1, 66, 2, 77, 14, 8, 252, 253]] # 248 u'\xf8' 248
[249, [0, 2, 14, 8, 254, 250, 68, 1, 60, 30, 16, 34, 2, 56, 68, 1, 47, 2, 30, 1, 76, 2, 32, 14, 8, 252, 253]] # 249 u'\xf9' 249
[250, [0, 2, 14, 8, 254, 250, 68, 1, 60, 30, 16, 34, 2, 54, 1, 33, 2, 45, 1, 76, 2, 32, 14, 8, 252, 253]] # 250 u'\xfa' 250
[251, [0, 2, 14, 8, 254, 250, 68, 1, 60, 30, 16, 34, 2, 54, 1, 18, 30, 2, 30, 1, 76, 2, 32, 14, 8, 252, 253]] # 251 u'\xfb' 251
[252, [0, 2, 14, 8, 254, 250, 100, 16, 1, 28, 2, 26, 1, 60, 30, 16, 34, 2, 68, 24, 1, 28, 2, 30, 1, 76, 2, 32, 14, 8, 252, 253]] # 252 u'\xfc' 252
[253, [0, 2, 14, 8, 254, 250, 68, 1, 77, 2, 8, 255, 5, 1, 33, 2, 45, 1, 107, 24, 2, 36, 96, 14, 8, 252, 253]] # 253 u'\xfd' 253
[254, [0, 2, 14, 8, 254, 250, 1, 100, 2, 30, 25, 1, 48, 30, 28, 26, 56, 2, 31, 28, 80, 14, 8, 252, 253]] # 254 u'\xfe' 254
[255, [0, 2, 14, 8, 254, 250, 68, 1, 77, 2, 84, 24, 1, 20, 2, 32, 1, 28, 2, 30, 1, 107, 24, 2, 36, 96, 14, 8, 252, 251]] # 255 u'\xff' 255
[260, [0, 2, 14, 8, 254, 250, 1, 36, 67, 77, 44, 2, 71, 1, 64, 2, 44, 1, 26, 30, 2, 34, 14, 8, 252, 253]] # 260 u'\u0104' 260
[261, [0, 2, 14, 75, 32, 1, 24, 22, 36, 18, 16, 30, 44, 26, 2, 18, 1, 30, 3, 2, 26, 30, 4, 2, 2, 33, 14, 8, 252, 253]] # 261 u'\u0105' 261
[262, [0, 2, 14, 8, 254, 250, 64, 20, 1, 26, 40, 22, 68, 18, 32, 30, 2, 56, 36, 1, 33, 2, 140, 48, 14, 8, 252, 253]] # 262 u'\u0106' 262
[263, [0, 2, 14, 75, 66, 1, 56, 26, 44, 30, 48, 2, 3, 2, 8, 251, 10, 1, 33, 2, 41, 8, 9, 246, 4, 2, 14, 8, 252, 253]] # 263 u'\u0107' 263
[268, [0, 2, 14, 8, 254, 248, 64, 20, 1, 26, 40, 22, 68, 18, 32, 30, 2, 38, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 268 u'\u010c' 268
[269, [0, 2, 14, 8, 254, 250, 66, 1, 56, 26, 44, 30, 48, 2, 40, 84, 1, 18, 2, 26, 1, 22, 2, 80, 108, 14, 8, 252, 253]] # 269 u'\u010d' 269
[270, [0, 2, 14, 8, 254, 248, 1, 48, 18, 68, 22, 56, 2, 16, 1, 108, 2, 16, 116, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 270 u'\u010e' 270
[271, [0, 2, 14, 8, 254, 250, 65, 1, 42, 24, 22, 36, 18, 16, 46, 2, 68, 1, 108, 2, 32, 100, 1, 26, 2, 16, 92, 14, 8, 252, 253]] # 271 u'\u010f' 271
[280, [0, 2, 14, 8, 254, 250, 1, 100, 64, 2, 72, 60, 1, 32, 2, 40, 60, 1, 64, 1, 26, 30, 2, 34, 14, 8, 252, 253]] # 280 u'\u0118' 280
[281, [0, 2, 14, 75, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 3, 2, 26, 30, 4, 2, 2, 20, 48, 14, 8, 252, 253]] # 281 u'\u0119' 281
[282, [0, 2, 14, 8, 254, 248, 1, 100, 64, 2, 72, 60, 1, 32, 2, 40, 60, 1, 64, 2, 40, 116, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 282 u'\u011a' 282
[283, [0, 2, 14, 8, 254, 250, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 24, 84, 1, 18, 2, 26, 1, 22, 2, 80, 108, 14, 8, 252, 253]] # 283 u'\u011b' 283
[321, [0, 2, 14, 8, 254, 250, 100, 1, 108, 64, 2, 72, 52, 1, 3, 2, 8, 4, 5, 2, 8, 8, 245, 4, 2, 14, 8, 252, 253]] # 321 u'\u0141' 321
[322, [0, 2, 14, 3, 2, 14, 8, 255, 244, 14, 4, 2, 100, 3, 2, 16, 4, 2, 1, 92, 30, 2, 3, 2, 8, 253, 5, 1, 8, 3, 4, 2, 8, 4, 247, 4, 2, 14, 3, 2, 14, 8, 251, 250, 14, 4, 2]] # 322 u'\u0142' 322
[323, [0, 2, 14, 8, 254, 250, 1, 100, 8, 4, 250, 100, 2, 56, 20, 1, 33, 2, 140, 48, 14, 8, 252, 253]] # 323 u'\u0143' 323
[324, [0, 2, 14, 3, 2, 14, 8, 253, 248, 14, 4, 2, 1, 68, 2, 28, 1, 18, 16, 30, 60, 2, 8, 254, 5, 1, 17, 2, 25, 8, 4, 251, 14, 3, 2, 14, 8, 249, 250, 14, 4, 2]] # 324 u'\u0144' 324
[327, [0, 2, 14, 8, 254, 248, 1, 100, 8, 4, 250, 100, 2, 39, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 327 u'\u0147' 327
[328, [0, 2, 14, 8, 254, 250, 1, 68, 2, 28, 1, 18, 16, 30, 60, 2, 24, 84, 1, 18, 2, 26, 1, 22, 2, 64, 108, 14, 58]] # 328 u'\u0148' 328
[336, [0, 2, 14, 8, 254, 250, 1, 100, 64, 108, 72, 2, 116, 16, 1, 60, 2, 32, 1, 52, 2, 124, 48, 14, 8, 252, 253]] # 336 u'\u0150' 336
[337, [0, 2, 14, 75, 48, 1, 40, 22, 36, 18, 32, 30, 44, 26, 2, 116, 1, 44, 2, 40, 1, 36, 2, 124, 80, 14, 8, 252, 253]] # 337 u'\u0151' 337
[344, [0, 2, 14, 8, 254, 248, 1, 100, 48, 30, 28, 26, 56, 2, 16, 1, 62, 2, 40, 116, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 344 u'\u0158' 344
[345, [0, 2, 14, 8, 254, 250, 1, 68, 2, 44, 1, 34, 16, 30, 2, 38, 1, 18, 2, 26, 1, 22, 2, 80, 108, 14, 8, 252, 253]] # 345 u'\u0159' 345
[346, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 70, 18, 32, 30, 2, 36, 56, 1, 33, 2, 140, 48, 14, 8, 252, 253]] # 346 u'\u015a' 346
[347, [0, 2, 14, 75, 1, 48, 18, 22, 40, 22, 18, 48, 2, 3, 2, 8, 251, 2, 1, 33, 2, 41, 8, 9, 246, 4, 2, 14, 8, 252, 253]] # 347 u'\u015b' 347
[352, [0, 2, 14, 8, 254, 248, 20, 1, 30, 32, 18, 70, 18, 32, 30, 2, 38, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 352 u'\u0160' 352
[353, [0, 2, 14, 8, 254, 250, 1, 48, 18, 22, 40, 22, 18, 48, 2, 39, 1, 18, 2, 26, 1, 22, 2, 80, 108, 14, 8, 252, 253]] # 353 u'\u0161' 353
[356, [0, 2, 14, 8, 254, 248, 100, 1, 64, 2, 40, 1, 108, 2, 116, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 356 u'\u0164' 356
[357, [0, 2, 14, 8, 254, 250, 68, 1, 64, 2, 38, 1, 92, 30, 18, 2, 84, 1, 26, 2, 48, 92, 14, 8, 252, 253]] # 357 u'\u0165' 357
[366, [0, 2, 14, 8, 254, 247, 100, 1, 92, 30, 32, 18, 84, 2, 39, 1, 18, 22, 26, 30, 2, 64, 124, 14, 8, 252, 253]] # 366 u'\u016e' 366
[367, [0, 2, 14, 8, 254, 249, 68, 1, 60, 30, 16, 34, 2, 36, 1, 76, 2, 40, 84, 1, 18, 22, 26, 30, 2, 64, 92, 14, 8, 252, 253]] # 367 u'\u016f' 367
[368, [0, 2, 14, 8, 254, 250, 100, 1, 92, 30, 32, 18, 84, 2, 22, 1, 60, 2, 40, 1, 52, 2, 124, 80, 14, 8, 252, 253]] # 368 u'\u0170' 368
[369, [0, 2, 14, 75, 68, 1, 60, 30, 16, 34, 2, 36, 1, 76, 2, 116, 24, 1, 44, 2, 40, 1, 36, 2, 124, 80, 14, 8, 252, 253]] # 369 u'\u0171' 369
[377, [0, 2, 14, 8, 254, 250, 100, 1, 64, 8, 252, 250, 64, 2, 56, 116, 1, 33, 2, 140, 48, 14, 8, 252, 253]] # 377 u'\u0179' 377
[378, [0, 2, 14, 75, 68, 1, 64, 74, 64, 2, 3, 2, 8, 251, 10, 1, 33, 2, 41, 8, 9, 246, 4, 2, 14, 8, 252, 253]] # 378 u'\u017a' 378
[379, [0, 2, 14, 8, 254, 250, 100, 1, 64, 8, 252, 250, 64, 2, 132, 40, 1, 3, 4, 26, 30, 18, 22, 4, 4, 2, 64, 140, 14, 8, 252, 253]] # 379 u'\u017b' 379
[380, [0, 2, 14, 75, 68, 1, 64, 74, 64, 2, 3, 2, 8, 252, 11, 3, 4, 1, 26, 30, 18, 22, 4, 4, 2, 8, 8, 245, 4, 2, 14, 8, 252, 253]] # 380 u'\u017c' 380
[381, [0, 2, 14, 8, 254, 248, 100, 1, 64, 8, 252, 250, 64, 2, 40, 116, 1, 18, 2, 26, 1, 22, 2, 80, 140, 14, 8, 252, 253]] # 381 u'\u017d' 381
[382, [0, 2, 14, 8, 254, 250, 68, 1, 64, 74, 64, 2, 40, 84, 1, 18, 2, 26, 1, 22, 2, 80, 108, 14, 8, 252, 253]] # 382 u'\u017e' 382
[1040, [0, 2, 14, 8, 254, 250, 1, 36, 67, 77, 44, 2, 71, 1, 64, 2, 46, 14, 8, 252, 253]] # 1040 u'\u0410' 1040
[1041, [0, 2, 14, 8, 254, 250, 1, 100, 48, 28, 20, 56, 60, 48, 30, 28, 26, 56, 2, 96, 14, 8, 252, 253]] # 1041 u'\u0411' 1041
[1042, [0, 2, 14, 8, 254, 250, 1, 48, 18, 20, 22, 40, 2, 32, 1, 18, 20, 22, 56, 2, 16, 1, 108, 2, 80, 14, 8, 252, 253]] # 1042 u'\u0412' 1042
[1043, [0, 2, 14, 8, 254, 250, 1, 100, 64, 28, 2, 92, 32, 14, 8, 252, 253]] # 1043 u'\u0413' 1043
[1044, [0, 2, 14, 8, 254, 250, 28, 1, 20, 80, 100, 40, 75, 44, 80, 28, 2, 20, 32, 14, 8, 250, 253]] # 1044 u'\u0414' 1044
[1045, [0, 2, 14, 8, 254, 250, 1, 100, 64, 2, 72, 60, 1, 32, 2, 40, 60, 1, 64, 2, 32, 14, 8, 252, 253]] # 1045 u'\u0415' 1045
[1046, [0, 2, 14, 8, 254, 250, 1, 98, 2, 56, 1, 108, 2, 56, 100, 1, 110, 2, 32, 14, 8, 250, 253]] # 1046 u'\u0416' 1046
[1047, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 20, 22, 24, 16, 18, 20, 22, 40, 26, 2, 96, 92, 14, 8, 252, 253]] # 1047 u'\u0417' 1047
[1048, [0, 2, 14, 8, 254, 250, 1, 66, 76, 100, 2, 72, 1, 108, 2, 96, 14, 8, 252, 253]] # 1048 u'\u0418' 1048
[1049, [0, 2, 14, 8, 254, 250, 1, 66, 76, 100, 2, 24, 1, 40, 2, 24, 1, 108, 2, 96, 14, 8, 252, 253]] # 1049 u'\u0419' 1049
[1050, [0, 2, 14, 8, 254, 250, 1, 100, 2, 64, 1, 58, 24, 2, 16, 1, 62, 2, 32, 14, 8, 252, 253]] # 1050 u'\u041a' 1050
[1051, [0, 2, 14, 8, 254, 250, 1, 16, 99, 16, 108, 2, 32, 14, 8, 251, 253]] # 1051 u'\u041b' 1051
[1052, [0, 2, 14, 8, 254, 250, 1, 100, 77, 67, 108, 2, 32, 14, 8, 252, 253]] # 1052 u'\u041c' 1052
[1053, [0, 2, 14, 8, 254, 250, 1, 100, 2, 60, 1, 64, 2, 52, 1, 108, 2, 32, 14, 8, 252, 253]] # 1053 u'\u041d' 1053
[1054, [0, 2, 14, 8, 254, 250, 20, 1, 68, 18, 32, 30, 76, 26, 40, 22, 2, 96, 28, 14, 8, 252, 253]] # 1054 u'\u041e' 1054
[1055, [0, 2, 14, 8, 254, 250, 1, 100, 64, 108, 2, 32, 14, 8, 252, 253]] # 1055 u'\u041f' 1055
[1056, [0, 2, 14, 8, 254, 250, 1, 100, 48, 30, 28, 26, 56, 2, 111, 14, 8, 252, 253]] # 1056 u'\u0420' 1056
[1057, [0, 2, 14, 8, 254, 250, 64, 20, 1, 26, 40, 22, 68, 18, 32, 30, 2, 46, 60, 14, 8, 252, 253]] # 1057 u'\u0421' 1057
[1058, [0, 2, 14, 8, 254, 250, 100, 1, 64, 2, 40, 1, 108, 2, 64, 14, 8, 252, 253]] # 1058 u'\u0422' 1058
[1059, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 84, 76, 56, 22, 52, 2, 96, 108, 14, 8, 252, 253]] # 1059 u'\u0423' 1059
[1060, [0, 2, 14, 8, 254, 250, 32, 1, 100, 24, 26, 44, 30, 32, 18, 36, 22, 24, 2, 64, 108, 14, 8, 252, 253]] # 1060 u'\u0424' 1060
[1061, [0, 2, 14, 8, 254, 250, 1, 8, 4, 6, 2, 72, 1, 8, 4, 250, 2, 32, 14, 8, 252, 253]] # 1061 u'\u0425' 1061
[1062, [0, 2, 14, 8, 254, 250, 1, 100, 108, 64, 100, 108, 16, 28, 2, 20, 32, 14, 8, 251, 253]] # 1062 u'\u0426' 1062
[1063, [0, 2, 14, 8, 254, 250, 100, 1, 60, 30, 48, 68, 108, 2, 32, 14, 8, 252, 253]] # 1063 u'\u0427' 1063
[1064, [0, 2, 14, 8, 254, 250, 1, 100, 108, 48, 68, 76, 48, 100, 108, 2, 32, 14, 8, 250, 253]] # 1064 u'\u0428' 1064
[1065, [0, 2, 14, 8, 254, 250, 1, 100, 108, 48, 68, 76, 48, 100, 108, 16, 28, 20, 2, 32, 14, 8, 249, 253]] # 1065 u'\u0429' 1065
[1066, [0, 2, 14, 8, 254, 250, 84, 1, 20, 16, 108, 48, 18, 20, 22, 56, 2, 96, 60, 14, 8, 251, 253]] # 1066 u'\u042a' 1066
[1067, [0, 2, 14, 8, 254, 250, 1, 48, 18, 20, 22, 56, 60, 100, 2, 80, 1, 108, 2, 32, 14, 8, 251, 253]] # 1067 u'\u042b' 1067
[1068, [0, 2, 14, 8, 254, 250, 1, 48, 18, 20, 22, 56, 60, 100, 2, 96, 108, 14, 8, 252, 253]] # 1068 u'\u042c' 1068
[1069, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 36, 40, 32, 36, 22, 40, 26, 2, 96, 92, 14, 8, 252, 253]] # 1069 u'\u042d' 1069
[1070, [0, 2, 14, 8, 254, 250, 1, 100, 60, 16, 36, 18, 16, 30, 76, 26, 24, 22, 36, 2, 80, 60, 14, 8, 252, 253]] # 1070 u'\u042e' 1070
[1071, [0, 2, 14, 8, 254, 250, 1, 34, 32, 68, 56, 26, 44, 30, 48, 44, 2, 32, 14, 8, 252, 253]] # 1071 u'\u042f' 1071
[1072, [0, 2, 14, 8, 254, 250, 20, 1, 36, 18, 32, 30, 20, 76, 20, 26, 40, 22, 2, 96, 28, 14, 8, 252, 253]] # 1072 u'\u0430' 1072
[1073, [0, 2, 14, 8, 254, 250, 68, 48, 1, 56, 76, 48, 18, 22, 56, 2, 44, 96, 14, 8, 252, 253]] # 1073 u'\u0431' 1073
[1074, [0, 2, 14, 8, 254, 250, 1, 68, 32, 10, 1, 164, 40, 48, 10, 1, 164, 56, 2, 96, 14, 8, 252, 253]] # 1074 u'\u0432' 1074
[1075, [0, 2, 14, 8, 254, 250, 1, 68, 48, 2, 76, 32, 14, 8, 253, 253]] # 1075 u'\u0433' 1075
[1076, [0, 2, 14, 8, 254, 250, 28, 1, 20, 16, 52, 18, 16, 76, 40, 48, 28, 2, 20, 32, 14, 8, 252, 253]] # 1076 u'\u0434' 1076
[1077, [0, 2, 14, 75, 36, 1, 48, 18, 22, 40, 26, 44, 30, 32, 2, 48, 14, 8, 252, 253]] # 1077 u'\u0435' 1077
[1078, [0, 2, 14, 8, 254, 250, 1, 66, 2, 72, 1, 78, 2, 40, 1, 68, 2, 64, 76, 14, 8, 252, 253]] # 1078 u'\u0436' 1078
[1079, [0, 2, 14, 8, 254, 250, 52, 1, 18, 32, 30, 26, 24, 16, 30, 26, 40, 22, 2, 96, 28, 14, 8, 252, 253]] # 1079 u'\u0437' 1079
[1080, [0, 2, 14, 8, 254, 250, 68, 1, 76, 66, 76, 2, 32, 14, 8, 252, 253]] # 1080 u'\u0438' 1080
[1081, [0, 2, 14, 8, 254, 250, 68, 1, 76, 66, 76, 2, 68, 24, 1, 40, 2, 80, 76, 14, 8, 252, 253]] # 1081 u'\u0439' 1081
[1082, [0, 2, 14, 8, 254, 250, 1, 68, 44, 32, 34, 42, 46, 2, 32, 14, 8, 252, 253]] # 1082 u'\u043a' 1082
[1083, [0, 2, 14, 8, 254, 250, 1, 67, 32, 76, 2, 32, 14, 8, 252, 253]] # 1083 u'\u043b' 1083
[1084, [0, 2, 14, 8, 254, 250, 1, 68, 46, 34, 76, 2, 32, 14, 8, 252, 253]] # 1084 u'\u043c' 1084
[1085, [0, 2, 14, 8, 254, 250, 1, 68, 44, 64, 36, 76, 2, 32, 14, 8, 252, 253]] # 1085 u'\u043d' 1085
[1086, [0, 2, 14, 75, 14, 8, 0, 254, 20, 1, 36, 18, 32, 30, 44, 26, 40, 22, 2, 96, 28, 14, 8, 252, 253]] # 1086 u'\u043e' 1086
[1087, [0, 2, 14, 8, 254, 250, 1, 68, 64, 76, 2, 32, 14, 8, 252, 253]] # 1087 u'\u043f' 1087
[1088, [0, 2, 14, 8, 254, 250, 1, 68, 48, 30, 28, 26, 56, 2, 96, 28, 14, 8, 252, 253]] # 1088 u'\u0440' 1088
[1089, [0, 2, 14, 8, 254, 250, 64, 20, 1, 26, 40, 22, 36, 18, 32, 30, 2, 32, 60, 14, 8, 252, 253]] # 1089 u'\u0441' 1089
[1090, [0, 2, 14, 8, 254, 250, 32, 1, 68, 40, 64, 2, 32, 76, 14, 8, 252, 253]] # 1090 u'\u0442' 1090
[1091, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 52, 44, 40, 38, 2, 96, 76, 14, 8, 252, 253]] # 1091 u'\u0443' 1091
[1092, [0, 2, 14, 8, 254, 250, 32, 1, 68, 24, 26, 28, 30, 32, 18, 20, 22, 24, 2, 64, 76, 14, 8, 252, 253]] # 1092 u'\u0444' 1092
[1093, [0, 2, 14, 75, 14, 8, 0, 254, 1, 66, 2, 72, 1, 78, 2, 32, 14, 8, 252, 253]] # 1093 u'\u0445' 1093
[1094, [0, 2, 14, 8, 254, 250, 68, 1, 76, 48, 68, 76, 16, 28, 2, 20, 32, 14, 8, 252, 253]] # 1094 u'\u0446' 1094
[1095, [0, 2, 14, 8, 254, 250, 68, 1, 60, 48, 52, 76, 2, 32, 14, 8, 253, 253]] # 1095 u'\u0447' 1095
[1096, [0, 2, 14, 8, 254, 250, 1, 68, 76, 32, 36, 44, 32, 68, 2, 76, 32, 14, 8, 252, 253]] # 1096 u'\u0448' 1096
[1097, [0, 2, 14, 8, 254, 250, 1, 68, 76, 32, 36, 44, 32, 68, 76, 16, 28, 2, 20, 32, 14, 8, 251, 253]] # 1097 u'\u0449' 1097
[1098, [0, 2, 14, 8, 254, 250, 68, 1, 16, 76, 32, 18, 22, 40, 2, 80, 44, 14, 8, 252, 253]] # 1098 u'\u044a' 1098
[1099, [0, 2, 14, 8, 254, 250, 1, 68, 44, 32, 30, 26, 40, 2, 64, 1, 68, 2, 76, 32, 14, 8, 252, 253]] # 1099 u'\u044b' 1099
[1100, [0, 2, 14, 8, 254, 250, 1, 68, 44, 32, 30, 26, 40, 2, 80, 14, 8, 253, 253]] # 1100 u'\u044c' 1100
[1101, [0, 2, 14, 8, 254, 250, 20, 1, 30, 32, 18, 20, 40, 32, 20, 22, 40, 26, 2, 96, 60, 14, 8, 252, 253]] # 1101 u'\u044d' 1101
[1102, [0, 2, 14, 8, 254, 250, 1, 68, 44, 16, 20, 18, 16, 30, 44, 26, 24, 22, 20, 2, 80, 44, 14, 8, 252, 253]] # 1102 u'\u044e' 1102
[1103, [0, 2, 14, 8, 254, 250, 1, 34, 24, 22, 18, 48, 44, 40, 32, 44, 2, 32, 14, 8, 252, 253]] # 1103 u'\u044f' 1103
[8352, [0, 7, 32, 172]] # 8352 u'\u20a0' 8352
[8359, [0, 2, 14, 107, 14, 16, 1, 100, 32, 30, 28, 26, 40, 2, 8, 4, 3, 1, 92, 30, 18, 2, 37, 1, 40, 2, 62, 32, 14, 8, 250, 253]] # 8359 u'\u20a7' 8359
[8364, [0, 3, 2, 2, 14, 8, 252, 244, 128, 36, 1, 28, 26, 72, 38, 68, 5, 68, 34, 64, 30, 28, 6, 2, 8, 255, 255, 5, 1, 80, 6, 2, 36, 1, 96, 2, 8, 6, 249, 14, 8, 248, 250, 4, 2]] # 8364 u'\u20ac' 8364
[8486, [0, 2, 14, 8, 254, 250, 1, 16, 20, 37, 36, 18, 32, 30, 44, 43, 28, 16, 2, 32, 14, 8, 252, 253]] # 8486 u'\u2126' 8486
[8709, [0, 2, 14, 8, 254, 250, 18, 1, 22, 36, 18, 32, 30, 44, 26, 40, 2, 27, 1, 99, 2, 16, 61, 60, 14, 8, 252, 253]] # 8709 u'\u2205' 8709
[8734, [0, 2, 14, 75, 52, 1, 30, 34, 30, 26, 38, 26, 2, 111, 14, 8, 252, 255]] # 8734 u'\u221e' 8734
[8804, [0, 2, 14, 8, 254, 250, 20, 1, 64, 2, 84, 1, 73, 79, 2, 46, 14, 8, 252, 254]] # 8804 u'\u2264' 8804
[8962, [0, 2, 14, 75, 1, 36, 34, 46, 44, 72, 2, 96, 14, 8, 252, 253]] # 8962 u'\u2302' 8962 | zecruel/dxf_le | mini_cad/fonte_padrao.py | Python | gpl-3.0 | 34,230 |
"""Colorful worry-free console applications for Linux, Mac OS X, and Windows.
Supported natively on Linux and Mac OSX (Just Works), and on Windows it works the same if Windows.enable() is called.
Gives you expected and sane results from methods like len() and .capitalize().
https://github.com/Robpol86/colorclass
https://pypi.python.org/pypi/colorclass
"""
import atexit
from collections import Mapping
import ctypes
import os
import re
import sys
if os.name == 'nt':
import ctypes.wintypes
__author__ = '@Robpol86'
__license__ = 'MIT'
__version__ = '1.1.1'
_BASE_CODES = {
'/all': 0, 'b': 1, 'f': 2, 'i': 3, 'u': 4, 'flash': 5, 'outline': 6, 'negative': 7, 'invis': 8, 'strike': 9,
'/b': 22, '/f': 22, '/i': 23, '/u': 24, '/flash': 25, '/outline': 26, '/negative': 27, '/invis': 28,
'/strike': 29, '/fg': 39, '/bg': 49,
'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37,
'bgblack': 40, 'bgred': 41, 'bggreen': 42, 'bgyellow': 43, 'bgblue': 44, 'bgmagenta': 45, 'bgcyan': 46,
'bgwhite': 47,
'hiblack': 90, 'hired': 91, 'higreen': 92, 'hiyellow': 93, 'hiblue': 94, 'himagenta': 95, 'hicyan': 96,
'hiwhite': 97,
'hibgblack': 100, 'hibgred': 101, 'hibggreen': 102, 'hibgyellow': 103, 'hibgblue': 104, 'hibgmagenta': 105,
'hibgcyan': 106, 'hibgwhite': 107,
'autored': None, 'autoblack': None, 'automagenta': None, 'autowhite': None, 'autoblue': None, 'autoyellow': None,
'autogreen': None, 'autocyan': None,
'autobgred': None, 'autobgblack': None, 'autobgmagenta': None, 'autobgwhite': None, 'autobgblue': None,
'autobgyellow': None, 'autobggreen': None, 'autobgcyan': None,
'/black': 39, '/red': 39, '/green': 39, '/yellow': 39, '/blue': 39, '/magenta': 39, '/cyan': 39, '/white': 39,
'/hiblack': 39, '/hired': 39, '/higreen': 39, '/hiyellow': 39, '/hiblue': 39, '/himagenta': 39, '/hicyan': 39,
'/hiwhite': 39,
'/bgblack': 49, '/bgred': 49, '/bggreen': 49, '/bgyellow': 49, '/bgblue': 49, '/bgmagenta': 49, '/bgcyan': 49,
'/bgwhite': 49, '/hibgblack': 49, '/hibgred': 49, '/hibggreen': 49, '/hibgyellow': 49, '/hibgblue': 49,
'/hibgmagenta': 49, '/hibgcyan': 49, '/hibgwhite': 49,
'/autored': 39, '/autoblack': 39, '/automagenta': 39, '/autowhite': 39, '/autoblue': 39, '/autoyellow': 39,
'/autogreen': 39, '/autocyan': 39,
'/autobgred': 49, '/autobgblack': 49, '/autobgmagenta': 49, '/autobgwhite': 49, '/autobgblue': 49,
'/autobgyellow': 49, '/autobggreen': 49, '/autobgcyan': 49,
}
_WINDOWS_CODES = {
'/all': -33, '/fg': -39, '/bg': -49,
'black': 0, 'red': 4, 'green': 2, 'yellow': 6, 'blue': 1, 'magenta': 5, 'cyan': 3, 'white': 7,
'bgblack': -8, 'bgred': 64, 'bggreen': 32, 'bgyellow': 96, 'bgblue': 16, 'bgmagenta': 80, 'bgcyan': 48,
'bgwhite': 112,
'hiblack': 8, 'hired': 12, 'higreen': 10, 'hiyellow': 14, 'hiblue': 9, 'himagenta': 13, 'hicyan': 11, 'hiwhite': 15,
'hibgblack': 128, 'hibgred': 192, 'hibggreen': 160, 'hibgyellow': 224, 'hibgblue': 144, 'hibgmagenta': 208,
'hibgcyan': 176, 'hibgwhite': 240,
'/black': -39, '/red': -39, '/green': -39, '/yellow': -39, '/blue': -39, '/magenta': -39, '/cyan': -39,
'/white': -39, '/hiblack': -39, '/hired': -39, '/higreen': -39, '/hiyellow': -39, '/hiblue': -39, '/himagenta': -39,
'/hicyan': -39, '/hiwhite': -39,
'/bgblack': -49, '/bgred': -49, '/bggreen': -49, '/bgyellow': -49, '/bgblue': -49, '/bgmagenta': -49,
'/bgcyan': -49, '/bgwhite': -49, '/hibgblack': -49, '/hibgred': -49, '/hibggreen': -49, '/hibgyellow': -49,
'/hibgblue': -49, '/hibgmagenta': -49, '/hibgcyan': -49, '/hibgwhite': -49,
}
_RE_GROUP_SEARCH = re.compile(r'(?:\033\[[\d;]+m)+')
_RE_NUMBER_SEARCH = re.compile(r'\033\[([\d;]+)m')
_RE_SPLIT = re.compile(r'(\033\[[\d;]+m)')
PARENT_CLASS = unicode if sys.version_info[0] == 2 else str
class _AutoCodes(Mapping):
"""Read-only subclass of dict, resolves closing tags (based on colorclass.CODES) and automatic colors."""
DISABLE_COLORS = False
LIGHT_BACKGROUND = False
def __init__(self):
self.__dict = _BASE_CODES.copy()
def __getitem__(self, item):
if item == 'autoblack':
answer = self.autoblack
elif item == 'autored':
answer = self.autored
elif item == 'autogreen':
answer = self.autogreen
elif item == 'autoyellow':
answer = self.autoyellow
elif item == 'autoblue':
answer = self.autoblue
elif item == 'automagenta':
answer = self.automagenta
elif item == 'autocyan':
answer = self.autocyan
elif item == 'autowhite':
answer = self.autowhite
elif item == 'autobgblack':
answer = self.autobgblack
elif item == 'autobgred':
answer = self.autobgred
elif item == 'autobggreen':
answer = self.autobggreen
elif item == 'autobgyellow':
answer = self.autobgyellow
elif item == 'autobgblue':
answer = self.autobgblue
elif item == 'autobgmagenta':
answer = self.autobgmagenta
elif item == 'autobgcyan':
answer = self.autobgcyan
elif item == 'autobgwhite':
answer = self.autobgwhite
else:
answer = self.__dict[item]
return answer
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
@property
def autoblack(self):
"""Returns automatic black foreground color depending on background color."""
return self.__dict['black' if _AutoCodes.LIGHT_BACKGROUND else 'hiblack']
@property
def autored(self):
"""Returns automatic red foreground color depending on background color."""
return self.__dict['red' if _AutoCodes.LIGHT_BACKGROUND else 'hired']
@property
def autogreen(self):
"""Returns automatic green foreground color depending on background color."""
return self.__dict['green' if _AutoCodes.LIGHT_BACKGROUND else 'higreen']
@property
def autoyellow(self):
"""Returns automatic yellow foreground color depending on background color."""
return self.__dict['yellow' if _AutoCodes.LIGHT_BACKGROUND else 'hiyellow']
@property
def autoblue(self):
"""Returns automatic blue foreground color depending on background color."""
return self.__dict['blue' if _AutoCodes.LIGHT_BACKGROUND else 'hiblue']
@property
def automagenta(self):
"""Returns automatic magenta foreground color depending on background color."""
return self.__dict['magenta' if _AutoCodes.LIGHT_BACKGROUND else 'himagenta']
@property
def autocyan(self):
"""Returns automatic cyan foreground color depending on background color."""
return self.__dict['cyan' if _AutoCodes.LIGHT_BACKGROUND else 'hicyan']
@property
def autowhite(self):
"""Returns automatic white foreground color depending on background color."""
return self.__dict['white' if _AutoCodes.LIGHT_BACKGROUND else 'hiwhite']
@property
def autobgblack(self):
"""Returns automatic black background color depending on background color."""
return self.__dict['bgblack' if _AutoCodes.LIGHT_BACKGROUND else 'hibgblack']
@property
def autobgred(self):
"""Returns automatic red background color depending on background color."""
return self.__dict['bgred' if _AutoCodes.LIGHT_BACKGROUND else 'hibgred']
@property
def autobggreen(self):
"""Returns automatic green background color depending on background color."""
return self.__dict['bggreen' if _AutoCodes.LIGHT_BACKGROUND else 'hibggreen']
@property
def autobgyellow(self):
"""Returns automatic yellow background color depending on background color."""
return self.__dict['bgyellow' if _AutoCodes.LIGHT_BACKGROUND else 'hibgyellow']
@property
def autobgblue(self):
"""Returns automatic blue background color depending on background color."""
return self.__dict['bgblue' if _AutoCodes.LIGHT_BACKGROUND else 'hibgblue']
@property
def autobgmagenta(self):
"""Returns automatic magenta background color depending on background color."""
return self.__dict['bgmagenta' if _AutoCodes.LIGHT_BACKGROUND else 'hibgmagenta']
@property
def autobgcyan(self):
"""Returns automatic cyan background color depending on background color."""
return self.__dict['bgcyan' if _AutoCodes.LIGHT_BACKGROUND else 'hibgcyan']
@property
def autobgwhite(self):
"""Returns automatic white background color depending on background color."""
return self.__dict['bgwhite' if _AutoCodes.LIGHT_BACKGROUND else 'hibgwhite']
def _pad_input(incoming):
"""Avoid IndexError and KeyError by ignoring un-related fields.
Example: '{0}{autored}' becomes '{{0}}{autored}'.
Positional arguments:
incoming -- the input unicode value.
Returns:
Padded unicode value.
"""
incoming_expanded = incoming.replace('{', '{{').replace('}', '}}')
for key in _BASE_CODES:
before, after = '{{%s}}' % key, '{%s}' % key
if before in incoming_expanded:
incoming_expanded = incoming_expanded.replace(before, after)
return incoming_expanded
def _parse_input(incoming):
"""Performs the actual conversion of tags to ANSI escaped codes.
Provides a version of the input without any colors for len() and other methods.
Positional arguments:
incoming -- the input unicode value.
Returns:
2-item tuple. First item is the parsed output. Second item is a version of the input without any colors.
"""
codes = dict((k, v) for k, v in _AutoCodes().items() if '{%s}' % k in incoming)
color_codes = dict((k, '' if _AutoCodes.DISABLE_COLORS else '\033[{0}m'.format(v)) for k, v in codes.items())
incoming_padded = _pad_input(incoming)
output_colors = incoming_padded.format(**color_codes)
# Simplify: '{b}{red}' -> '\033[1m\033[31m' -> '\033[1;31m'
groups = sorted(set(_RE_GROUP_SEARCH.findall(output_colors)), key=len, reverse=True) # Get codes, grouped adjacent.
groups_simplified = [[x for n in _RE_NUMBER_SEARCH.findall(i) for x in n.split(';')] for i in groups]
groups_compiled = ['\033[{0}m'.format(';'.join(g)) for g in groups_simplified] # Final codes.
assert len(groups_compiled) == len(groups) # For testing.
output_colors_simplified = output_colors
for i in range(len(groups)):
output_colors_simplified = output_colors_simplified.replace(groups[i], groups_compiled[i])
output_no_colors = _RE_SPLIT.sub('', output_colors_simplified)
# Strip any remaining color codes.
if _AutoCodes.DISABLE_COLORS:
output_colors_simplified = _RE_NUMBER_SEARCH.sub('', output_colors_simplified)
return output_colors_simplified, output_no_colors
def disable_all_colors():
"""Disable all colors. Strips any color tags or codes."""
_AutoCodes.DISABLE_COLORS = True
def set_light_background():
"""Chooses dark colors for all 'auto'-prefixed codes for readability on light backgrounds."""
_AutoCodes.DISABLE_COLORS = False
_AutoCodes.LIGHT_BACKGROUND = True
def set_dark_background():
"""Chooses dark colors for all 'auto'-prefixed codes for readability on light backgrounds."""
_AutoCodes.DISABLE_COLORS = False
_AutoCodes.LIGHT_BACKGROUND = False
def list_tags():
"""Lists the available tags.
Returns:
Tuple of tuples. Child tuples are four items: ('opening tag', 'closing tag', main ansi value, closing ansi value).
"""
codes = _AutoCodes()
grouped = set([(k, '/{0}'.format(k), codes[k], codes['/{0}'.format(k)]) for k in codes if not k.startswith('/')])
# Add half-tags like /all.
found = [c for r in grouped for c in r[:2]]
missing = set([('', r[0], None, r[1]) if r[0].startswith('/') else (r[0], '', r[1], None)
for r in _AutoCodes().items() if r[0] not in found])
grouped |= missing
# Sort.
payload = sorted([i for i in grouped if i[2] is None], key=lambda x: x[3]) # /all /fg /bg
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[2] < 10], key=lambda x: x[2])) # b i u flash
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if i[0].startswith('auto')], key=lambda x: x[2])) # auto colors
grouped -= set(payload)
payload.extend(sorted([i for i in grouped if not i[0].startswith('hi')], key=lambda x: x[2])) # dark colors
grouped -= set(payload)
payload.extend(sorted(grouped, key=lambda x: x[2])) # light colors
return tuple(payload)
class Color(PARENT_CLASS):
"""Unicode (str in Python3) subclass with ANSI terminal text color support.
Example syntax: Color('{red}Sample Text{/red}')
For a list of codes, call: colorclass.list_tags()
"""
def __new__(cls, *args, **kwargs):
parent_class = cls.__bases__[0]
value_markup = args[0] if args else parent_class()
value_colors, value_no_colors = _parse_input(value_markup)
if args:
args = [value_colors] + list(args[1:])
obj = parent_class.__new__(cls, *args, **kwargs)
obj.value_colors, obj.value_no_colors = value_colors, value_no_colors
obj.has_colors = bool(_RE_NUMBER_SEARCH.match(value_colors))
return obj
def __len__(self):
return self.value_no_colors.__len__()
def capitalize(self):
split = _RE_SPLIT.split(self.value_colors)
for i in range(len(split)):
if _RE_SPLIT.match(split[i]):
continue
split[i] = PARENT_CLASS(split[i]).capitalize()
return Color().join(split)
def center(self, width, fillchar=None):
if fillchar is not None:
result = PARENT_CLASS(self.value_no_colors).center(width, fillchar)
else:
result = PARENT_CLASS(self.value_no_colors).center(width)
return result.replace(self.value_no_colors, self.value_colors)
def count(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).count(*args, **kwargs)
def endswith(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).endswith(*args, **kwargs)
def find(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).find(*args, **kwargs)
def format(*args, **kwargs):
return Color(super(Color, args[0]).format(*args[1:], **kwargs))
def index(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).index(*args, **kwargs)
def isalnum(self):
return PARENT_CLASS(self.value_no_colors).isalnum()
def isalpha(self):
return PARENT_CLASS(self.value_no_colors).isalpha()
def isdecimal(self):
return PARENT_CLASS(self.value_no_colors).isdecimal()
def isdigit(self):
return PARENT_CLASS(self.value_no_colors).isdigit()
def isnumeric(self):
return PARENT_CLASS(self.value_no_colors).isnumeric()
def isspace(self):
return PARENT_CLASS(self.value_no_colors).isspace()
def istitle(self):
return PARENT_CLASS(self.value_no_colors).istitle()
def isupper(self):
return PARENT_CLASS(self.value_no_colors).isupper()
def ljust(self, width, fillchar=None):
if fillchar is not None:
result = PARENT_CLASS(self.value_no_colors).ljust(width, fillchar)
else:
result = PARENT_CLASS(self.value_no_colors).ljust(width)
return result.replace(self.value_no_colors, self.value_colors)
def rfind(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).rfind(*args, **kwargs)
def rindex(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).rindex(*args, **kwargs)
def rjust(self, width, fillchar=None):
if fillchar is not None:
result = PARENT_CLASS(self.value_no_colors).rjust(width, fillchar)
else:
result = PARENT_CLASS(self.value_no_colors).rjust(width)
return result.replace(self.value_no_colors, self.value_colors)
def splitlines(self):
return [Color(l) for l in PARENT_CLASS(self.value_colors).splitlines()]
def startswith(self, *args, **kwargs):
return PARENT_CLASS(self.value_no_colors).startswith(*args, **kwargs)
def swapcase(self):
split = _RE_SPLIT.split(self.value_colors)
for i in range(len(split)):
if _RE_SPLIT.match(split[i]):
continue
split[i] = PARENT_CLASS(split[i]).swapcase()
return Color().join(split)
def title(self):
split = _RE_SPLIT.split(self.value_colors)
for i in range(len(split)):
if _RE_SPLIT.match(split[i]):
continue
split[i] = PARENT_CLASS(split[i]).title()
return Color().join(split)
def translate(self, table):
split = _RE_SPLIT.split(self.value_colors)
for i in range(len(split)):
if _RE_SPLIT.match(split[i]):
continue
split[i] = PARENT_CLASS(split[i]).translate(table)
return Color().join(split)
def upper(self):
split = _RE_SPLIT.split(self.value_colors)
for i in range(len(split)):
if _RE_SPLIT.match(split[i]):
continue
split[i] = PARENT_CLASS(split[i]).upper()
return Color().join(split)
def zfill(self, width):
if not self.value_no_colors:
return PARENT_CLASS().zfill(width)
split = _RE_SPLIT.split(self.value_colors)
filled = PARENT_CLASS(self.value_no_colors).zfill(width)
if len(split) == 1:
return filled
padding = filled.replace(self.value_no_colors, '')
if not split[0]:
split[2] = padding + split[2]
else:
split[0] = padding + split[0]
return Color().join(split)
class Windows(object):
"""Enable and disable Windows support for ANSI color character codes.
Call static method Windows.enable() to enable color support for the remainder of the process' lifetime.
This class is also a context manager. You can do this:
with Windows():
print(Color('{autored}Test{/autored}'))
Or this:
with Windows(auto_colors=True):
print(Color('{autored}Test{/autored}'))
"""
@staticmethod
def disable():
"""Restore sys.stderr and sys.stdout to their original objects. Resets colors to their original values."""
if os.name != 'nt' or not Windows.is_enabled():
return False
getattr(sys.stderr, '_reset_colors', lambda: False)()
getattr(sys.stdout, '_reset_colors', lambda: False)()
if isinstance(sys.stderr, _WindowsStream):
sys.stderr = getattr(sys.stderr, 'original_stream')
if isinstance(sys.stderr, _WindowsStream):
sys.stdout = getattr(sys.stdout, 'original_stream')
return True
@staticmethod
def is_enabled():
"""Returns True if either stderr or stdout has colors enabled."""
return isinstance(sys.stderr, _WindowsStream) or isinstance(sys.stdout, _WindowsStream)
@staticmethod
def enable(auto_colors=False, reset_atexit=False):
"""Enables color text with print() or sys.stdout.write() (stderr too).
Keyword arguments:
auto_colors -- automatically selects dark or light colors based on current terminal's background color. Only
works with {autored} and related tags.
reset_atexit -- resets original colors upon Python exit (in case you forget to reset it yourself with a closing
tag).
"""
if os.name != 'nt':
return False
# Overwrite stream references.
if not isinstance(sys.stderr, _WindowsStream):
sys.stderr.flush()
sys.stderr = _WindowsStream(stderr=True)
if not isinstance(sys.stdout, _WindowsStream):
sys.stdout.flush()
sys.stdout = _WindowsStream(stderr=False)
if not isinstance(sys.stderr, _WindowsStream) and not isinstance(sys.stdout, _WindowsStream):
return False
# Automatically select which colors to display.
bg_color = getattr(sys.stdout, 'default_bg', getattr(sys.stderr, 'default_bg', None))
if auto_colors and bg_color is not None:
set_light_background() if bg_color in (112, 96, 240, 176, 224, 208, 160) else set_dark_background()
# Reset on exit if requested.
if reset_atexit:
atexit.register(lambda: Windows.disable())
return True
def __init__(self, auto_colors=False):
self.auto_colors = auto_colors
def __enter__(self):
Windows.enable(auto_colors=self.auto_colors)
def __exit__(self, *_):
Windows.disable()
class _WindowsCSBI(object):
"""Interfaces with Windows CONSOLE_SCREEN_BUFFER_INFO API/DLL calls. Gets info for stderr and stdout.
References:
https://code.google.com/p/colorama/issues/detail?id=47.
pytest's py project: py/_io/terminalwriter.py.
Class variables:
CSBI -- ConsoleScreenBufferInfo class/struct (not instance, the class definition itself) defined in _define_csbi().
HANDLE_STDERR -- GetStdHandle() return integer for stderr.
HANDLE_STDOUT -- GetStdHandle() return integer for stdout.
WINDLL -- my own loaded instance of ctypes.WinDLL.
"""
CSBI = None
HANDLE_STDERR = None
HANDLE_STDOUT = None
WINDLL = ctypes.LibraryLoader(getattr(ctypes, 'WinDLL', None))
@staticmethod
def _define_csbi():
"""Defines structs and populates _WindowsCSBI.CSBI."""
if _WindowsCSBI.CSBI is not None:
return
class COORD(ctypes.Structure):
"""Windows COORD structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119"""
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRECT(ctypes.Structure):
"""Windows SMALL_RECT structure. http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311"""
_fields_ = [('Left', ctypes.c_short), ('Top', ctypes.c_short), ('Right', ctypes.c_short),
('Bottom', ctypes.c_short)]
class ConsoleScreenBufferInfo(ctypes.Structure):
"""Windows CONSOLE_SCREEN_BUFFER_INFO structure.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093
"""
_fields_ = [
('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.wintypes.WORD),
('srWindow', SmallRECT),
('dwMaximumWindowSize', COORD)
]
_WindowsCSBI.CSBI = ConsoleScreenBufferInfo
@staticmethod
def initialize():
"""Initializes the WINDLL resource and populated the CSBI class variable."""
_WindowsCSBI._define_csbi()
_WindowsCSBI.HANDLE_STDERR = _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-12)
_WindowsCSBI.HANDLE_STDOUT = _WindowsCSBI.HANDLE_STDOUT or _WindowsCSBI.WINDLL.kernel32.GetStdHandle(-11)
if _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes:
return
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.argtypes = [ctypes.wintypes.DWORD]
_WindowsCSBI.WINDLL.kernel32.GetStdHandle.restype = ctypes.wintypes.HANDLE
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.restype = ctypes.wintypes.BOOL
_WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo.argtypes = [
ctypes.wintypes.HANDLE, ctypes.POINTER(_WindowsCSBI.CSBI)
]
@staticmethod
def get_info(handle):
"""Get information about this current console window (for Microsoft Windows only).
Raises IOError if attempt to get information fails (if there is no console window).
Don't forget to call _WindowsCSBI.initialize() once in your application before calling this method.
Positional arguments:
handle -- either _WindowsCSBI.HANDLE_STDERR or _WindowsCSBI.HANDLE_STDOUT.
Returns:
Dictionary with different integer values. Keys are:
buffer_width -- width of the buffer (Screen Buffer Size in cmd.exe layout tab).
buffer_height -- height of the buffer (Screen Buffer Size in cmd.exe layout tab).
terminal_width -- width of the terminal window.
terminal_height -- height of the terminal window.
bg_color -- current background color (http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088).
fg_color -- current text color code.
"""
# Query Win32 API.
csbi = _WindowsCSBI.CSBI()
try:
if not _WindowsCSBI.WINDLL.kernel32.GetConsoleScreenBufferInfo(handle, ctypes.byref(csbi)):
raise IOError('Unable to get console screen buffer info from win32 API.')
except ctypes.ArgumentError:
raise IOError('Unable to get console screen buffer info from win32 API.')
# Parse data.
result = dict(
buffer_width=int(csbi.dwSize.X - 1),
buffer_height=int(csbi.dwSize.Y),
terminal_width=int(csbi.srWindow.Right - csbi.srWindow.Left),
terminal_height=int(csbi.srWindow.Bottom - csbi.srWindow.Top),
bg_color=int(csbi.wAttributes & 240),
fg_color=int(csbi.wAttributes % 16),
)
return result
class _WindowsStream(object):
"""Replacement stream (overwrites sys.stdout and sys.stderr). When writing or printing, ANSI codes are converted.
ANSI (Linux/Unix) color codes are converted into win32 system calls, changing the next character's color before
printing it. Resources referenced:
https://github.com/tartley/colorama
http://www.cplusplus.com/articles/2ywTURfi/
http://thomasfischer.biz/python-and-windows-terminal-colors/
http://stackoverflow.com/questions/17125440/c-win32-console-color
http://www.tysos.org/svn/trunk/mono/corlib/System/WindowsConsoleDriver.cs
http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088#_win32_character_attributes
Class variables:
ALL_BG_CODES -- list of background Windows codes. Used to determine if requested color is foreground or background.
COMPILED_CODES -- 'translation' dictionary. Keys are ANSI codes (values of _BASE_CODES), values are Windows codes.
STD_ERROR_HANDLE -- http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231
STD_OUTPUT_HANDLE -- http://msdn.microsoft.com/en-us/library/windows/desktop/ms683231
Instance variables:
original_stream -- the original stream to write non-code text to.
win32_stream_handle -- handle to the Windows stderr or stdout device. Used by other Windows functions.
default_fg -- the foreground Windows color code at the time of instantiation.
default_bg -- the background Windows color code at the time of instantiation.
"""
ALL_BG_CODES = [v for k, v in _WINDOWS_CODES.items() if k.startswith('bg') or k.startswith('hibg')]
COMPILED_CODES = dict((v, _WINDOWS_CODES[k]) for k, v in _BASE_CODES.items() if k in _WINDOWS_CODES)
def __init__(self, stderr=False):
_WindowsCSBI.initialize()
self.original_stream = sys.stderr if stderr else sys.stdout
self.win32_stream_handle = _WindowsCSBI.HANDLE_STDERR if stderr else _WindowsCSBI.HANDLE_STDOUT
self.default_fg, self.default_bg = self._get_colors()
def __getattr__(self, item):
"""If an attribute/function/etc is not defined in this function, retrieve the one from the original stream.
Fixes ipython arrow key presses.
"""
return getattr(self.original_stream, item)
def _get_colors(self):
"""Returns a tuple of two integers representing current colors: (foreground, background)."""
try:
csbi = _WindowsCSBI.get_info(self.win32_stream_handle)
return csbi['fg_color'], csbi['bg_color']
except IOError:
return 7, 0
def _reset_colors(self):
"""Sets the foreground and background colors to their original values (when class was instantiated)."""
self._set_color(-33)
def _set_color(self, color_code):
"""Changes the foreground and background colors for subsequently printed characters.
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
Positional arguments:
color_code -- integer color code from _WINDOWS_CODES.
"""
# Get current color code.
current_fg, current_bg = self._get_colors()
# Handle special negative codes. Also determine the final color code.
if color_code == -39:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == -49:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == -33:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == -8:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
_WindowsCSBI.WINDLL.kernel32.SetConsoleTextAttribute(self.win32_stream_handle, final_color_code)
def write(self, p_str):
for segment in _RE_SPLIT.split(p_str):
if not segment:
# Empty string. p_str probably starts with colors so the first item is always ''.
continue
if not _RE_SPLIT.match(segment):
# No color codes, print regular text.
self.original_stream.write(segment)
self.original_stream.flush()
continue
for color_code in (int(c) for c in _RE_NUMBER_SEARCH.findall(segment)[0].split(';')):
if color_code in self.COMPILED_CODES:
self._set_color(self.COMPILED_CODES[color_code]) | hkff/AccLab | pyAAL/tools/color.py | Python | gpl-3.0 | 31,055 |
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
from trytond.model.fields.field import Field
from trytond.transaction import Transaction
class Binary(Field):
'''
Define a binary field (``str``).
'''
_type = 'binary'
def __init__(self, string='', help='', required=False, readonly=False,
domain=None, states=None, select=False, on_change=None,
on_change_with=None, depends=None, filename=None, order_field=None,
context=None, loading='lazy'):
if filename is not None:
self.filename = filename
if depends is None:
depends = [filename]
else:
depends.append(filename)
super(Binary, self).__init__(string=string, help=help,
required=required, readonly=readonly, domain=domain, states=states,
select=select, on_change=on_change, on_change_with=on_change_with,
depends=depends, order_field=order_field, context=context,
loading=loading)
@staticmethod
def get(ids, model, name, values=None):
'''
Convert the binary value into ``str``
:param ids: a list of ids
:param model: a string with the name of the model
:param name: a string with the name of the field
:param values: a dictionary with the read values
:return: a dictionary with ids as key and values as value
'''
if values is None:
values = {}
res = {}
converter = buffer
default = False
format_ = Transaction().context.pop('%s.%s' % (model, name), '')
if format_ == 'size':
converter = len
default = 0
for i in values:
res[i['id']] = converter(i[name]) if i[name] else default
for i in ids:
res.setdefault(i, default)
return res
| mediafactory/tryton_core_daemon | trytond/model/fields/binary.py | Python | gpl-3.0 | 1,971 |
# Author: echel0n <[email protected]>
# URL: https://sickrage.ca
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import threading
import sickrage
from sickrage.core.common import Quality, DOWNLOADED, SNATCHED, SNATCHED_PROPER, WANTED
from sickrage.core.queues.search import BacklogQueueItem
from sickrage.core.searchers import new_episode_finder
class BacklogSearcher(object):
def __init__(self, *args, **kwargs):
self.name = "BACKLOG"
self.lock = threading.Lock()
self._last_backlog_search = None
self.cycleTime = 21 / 60 / 24
self.amActive = False
self.amPaused = False
self.amWaiting = False
self.forced = False
def run(self, force=False):
if self.amActive or sickrage.app.developer and not force:
return
# set thread name
threading.currentThread().setName(self.name)
# set cycle time
self.cycleTime = sickrage.app.config.backlog_searcher_freq / 60 / 24
try:
self.forced = force
self.search_backlog()
finally:
self.amActive = False
def next_run(self):
if self._last_backlog_search <= 1:
return datetime.date.today()
else:
return datetime.date.fromordinal(self._last_backlog_search + self.cycleTime)
def am_running(self):
sickrage.app.log.debug("amWaiting: " + str(self.amWaiting) + ", amActive: " + str(self.amActive))
return (not self.amWaiting) and self.amActive
def search_backlog(self, which_shows=None):
if self.amActive:
sickrage.app.log.debug("Backlog is still running, not starting it again")
return
self.amActive = True
self.amPaused = False
show_list = sickrage.app.showlist
if which_shows:
show_list = which_shows
cur_date = datetime.date.today().toordinal()
from_date = datetime.date.fromordinal(1)
if not which_shows and self.forced:
sickrage.app.log.info("Running limited backlog on missed episodes " + str(
sickrage.app.config.backlog_days) + " day(s) old")
from_date = datetime.date.today() - datetime.timedelta(days=sickrage.app.config.backlog_days)
else:
sickrage.app.log.info('Running full backlog search on missed episodes for selected shows')
# find new released episodes and update their statuses
new_episode_finder()
# go through non air-by-date shows and see if they need any episodes
for curShow in show_list:
if curShow.paused:
sickrage.app.log.debug("Skipping search for {} because the show is paused".format(curShow.name))
continue
self._last_backlog_search = self._get_last_backlog_search(curShow.indexerid)
segments = self._get_segments(curShow, from_date)
if segments:
sickrage.app.search_queue.put(BacklogQueueItem(curShow, segments))
else:
sickrage.app.log.debug("Nothing needs to be downloaded for {}, skipping".format(curShow.name))
# don't consider this an actual backlog search if we only did recent eps
# or if we only did certain shows
if from_date == datetime.date.fromordinal(1) and not which_shows:
self._set_last_backlog_search(curShow.indexerid, cur_date)
self.amActive = False
@staticmethod
def _get_segments(show, from_date):
anyQualities, bestQualities = Quality.splitQuality(show.quality)
sickrage.app.log.debug("Seeing if we need anything from {}".format(show.name))
# check through the list of statuses to see if we want any
wanted = []
for result in (x for x in sickrage.app.main_db.get_many('tv_episodes', show.indexerid) if
x['season'] > 0 and datetime.date.today().toordinal() > x['airdate'] >= from_date.toordinal()):
curStatus, curQuality = Quality.splitCompositeStatus(int(result["status"] or -1))
# if we need a better one then say yes
if curStatus not in {WANTED, DOWNLOADED, SNATCHED, SNATCHED_PROPER}:
continue
if curStatus != WANTED:
if bestQualities:
if curQuality in bestQualities:
continue
elif curQuality != Quality.UNKNOWN and curQuality > max(bestQualities):
continue
else:
if curQuality in anyQualities:
continue
elif curQuality != Quality.UNKNOWN and curQuality > max(anyQualities):
continue
# skip upgrading quality of downloaded episodes if enabled
if curStatus == DOWNLOADED and show.skip_downloaded:
continue
epObj = show.get_episode(int(result["season"]), int(result["episode"]))
wanted.append(epObj)
return wanted
@staticmethod
def _get_last_backlog_search(showid):
sickrage.app.log.debug("Retrieving the last check time from the DB")
try:
dbData = sickrage.app.main_db.get('tv_shows', showid)
return int(dbData["last_backlog_search"])
except:
return 1
@staticmethod
def _set_last_backlog_search(showid, when):
sickrage.app.log.debug("Setting the last backlog in the DB to {}".format(when))
dbData = sickrage.app.main_db.get('tv_shows', showid)
if dbData:
dbData['last_backlog_search'] = when
sickrage.app.main_db.update(dbData) | SiCKRAGETV/SickRage | sickrage/core/searchers/backlog_searcher.py | Python | gpl-3.0 | 6,370 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: edgeos_facts
version_added: "2.5"
author:
- Nathaniel Case (@qalthos)
- Sam Doran (@samdoran)
short_description: Collect facts from remote devices running EdgeOS
description:
- Collects a base set of device facts from a remote device that
is running EdgeOS. This module prepends all of the
base network fact keys with U(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
notes:
- Tested against EdgeOS 1.9.7
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, default, config, and neighbors. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: "!config"
"""
EXAMPLES = """
- name: collect all facts from the device
edgeos_facts:
gather_subset: all
- name: collect only the config and default facts
edgeos_facts:
gather_subset: config
- name: collect everything exception the config
edgeos_facts:
gather_subset: "!config"
"""
RETURN = """
ansible_net_config:
description: The running-config from the device
returned: when config is configured
type: str
ansible_net_commits:
description: The set of available configuration revisions
returned: when present
type: list
ansible_net_hostname:
description: The configured system hostname
returned: always
type: str
ansible_net_model:
description: The device model string
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the device
returned: always
type: str
ansible_net_version:
description: The version of the software running
returned: always
type: str
ansible_net_neighbors:
description: The set of LLDP neighbors
returned: when interface is configured
type: list
ansible_net_gather_subset:
description: The list of subsets gathered by the module
returned: always
type: list
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.edgeos.edgeos import run_commands
class FactsBase(object):
COMMANDS = frozenset()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, list(self.COMMANDS))
class Default(FactsBase):
COMMANDS = [
'show version',
'show host name',
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.responses[1]
def parse_version(self, data):
match = re.search(r'Version:\s*v(\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'HW model:\s*([A-Za-z0-9- ]+)', data)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'HW S/N:\s+(\S+)', data)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = [
'show configuration commands',
'show system commit',
]
def populate(self):
super(Config, self).populate()
self.facts['config'] = self.responses
commits = self.responses[1]
entries = list()
entry = None
for line in commits.split('\n'):
match = re.match(r'(\d+)\s+(.+)by(.+)via(.+)', line)
if match:
if entry:
entries.append(entry)
entry = dict(revision=match.group(1),
datetime=match.group(2),
by=str(match.group(3)).strip(),
via=str(match.group(4)).strip(),
comment=None)
elif entry:
entry['comment'] = line.strip()
self.facts['commits'] = entries
class Neighbors(FactsBase):
COMMANDS = [
'show lldp neighbors',
'show lldp neighbors detail',
]
def populate(self):
super(Neighbors, self).populate()
all_neighbors = self.responses[0]
if 'LLDP not configured' not in all_neighbors:
neighbors = self.parse(
self.responses[1]
)
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def parse(self, data):
parsed = list()
values = None
for line in data.split('\n'):
if not line:
continue
elif line[0] == ' ':
values += '\n%s' % line
elif line.startswith('Interface'):
if values:
parsed.append(values)
values = line
if values:
parsed.append(values)
return parsed
def parse_neighbors(self, data):
facts = dict()
for item in data:
interface = self.parse_interface(item)
host = self.parse_host(item)
port = self.parse_port(item)
if interface not in facts:
facts[interface] = list()
facts[interface].append(dict(host=host, port=port))
return facts
def parse_interface(self, data):
match = re.search(r'^Interface:\s+(\S+),', data)
return match.group(1)
def parse_host(self, data):
match = re.search(r'SysName:\s+(.+)$', data, re.M)
if match:
return match.group(1)
def parse_port(self, data):
match = re.search(r'PortDescr:\s+(.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
neighbors=Neighbors,
config=Config
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = AnsibleModule(argument_spec=spec,
supports_check_mode=True)
warnings = list()
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Subset must be one of [%s], got %s' %
(', '.join(VALID_SUBSETS), subset))
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| hkariti/ansible | lib/ansible/modules/network/edgeos/edgeos_facts.py | Python | gpl-3.0 | 8,335 |
#
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Utillity functions for macwatch"""
import re
LEGAL_MAC_ADDRESS = re.compile('^[a-fA-F0-9]{12}$')
# Max number of nybbles in a mac-address.
MAC_ADDR_MAX_LEN = 12
# Minimum number of nybbles for a mac-address prefix
MAC_ADDR_MIN_LEN = 6
# Minimum value for a mac-address, used for appending
# zeroes to prefix.
MAC_ADDR_MIN_VAL = '000000000000'
def strip_delimiters(mac_address):
"""Strip delimiters from mac-address. Legal delimiters
are '-' and ':'"""
return re.sub('-', '', re.sub(':', '', mac_address))
def has_legal_values(mac_address):
"""Check if the given mac-addres consists for legal
hex-numbers. The mac-address must be stripped for
delimiters before calling this functiom."""
if not LEGAL_MAC_ADDRESS.match(mac_address):
return False
return True
def add_zeros_to_mac_addr(mac_address):
"""Add zeroes at the end of a mac-address if the given
mac-address has less than 6 octets.
The mac-address given as parameter will not get checked
if it only contains legal hex-numbers."""
prefix_len = len(mac_address)
if prefix_len < MAC_ADDR_MAX_LEN:
mac_address += MAC_ADDR_MIN_VAL[prefix_len:]
return mac_address
| hmpf/nav | python/nav/web/macwatch/utils.py | Python | gpl-3.0 | 1,854 |
# -*- test-case-name: twisted.python.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import, print_function
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from functools import wraps
from twisted.python.compat import _PY3, unicode
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
# For backwards compatibility, some things import this, so just link it
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.util",
"OrderedDict")
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__ = has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
def uniquify(lst):
"""Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if 'self' in frame.f_locals:
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""Empty the log"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential(object):
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator(object):
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def __next__(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
# Iterators on Python 2 use next(), not __next__()
next = __next__
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# initgroups is available in Python 2.7+ on UNIX-likes
from os import initgroups as _initgroups
except ImportError:
_initgroups = None
if _initgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
This uses the stdlib support which calls initgroups(3) under the hood.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or C{NoneType}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
return _initgroups(pwd.getpwuid(uid)[0], primaryGid)
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or C{NoneType}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type gid: C{int} or C{NoneType}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
def _replaceIf(condition, alternative):
"""
If C{condition}, replace this function with C{alternative}.
@param condition: A L{bool} which says whether this should be replaced.
@param alternative: An alternative function that will be swapped in instead
of the original, if C{condition} is truthy.
@return: A decorator.
"""
def decorator(func):
if condition is True:
call = alternative
elif condition is False:
call = func
else:
raise ValueError(("condition argument to _replaceIf requires a "
"bool, not {}").format(repr(condition)))
@wraps(func)
def wrapped(*args, **kwargs):
return call(*args, **kwargs)
return wrapped
return decorator
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes", "runWithWarningsSuppressed",
]
if _PY3:
__notported__ = ["SubclassableCStringIO", "LineLog", "makeStatBar"]
for name in __all__[:]:
if name in __notported__:
__all__.remove(name)
del globals()[name]
del name, __notported__
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/python/util.py | Python | gpl-3.0 | 28,267 |
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
class AircoxConfig(AppConfig):
name = 'aircox'
verbose_name = 'Aircox'
class AircoxAdminConfig(AdminConfig):
default_site = 'aircox.admin_site.AdminSite'
| lordblackfox/aircox | aircox/apps.py | Python | gpl-3.0 | 256 |
# telepathy-mixer - a MXit connection manager for Telepathy
#
# Copyright (C) 2008 Ralf Kistner <[email protected]>
#
# Adapted from telepathy-butterfly,
# Copyright (C) 2006-2007 Ali Sabil <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import telepathy
import gobject
import dbus
import logging
from mixer.connection import MixerConnection
__all__ = ['MixerConnectionManager']
logger = logging.getLogger('Mixer.ConnectionManager')
class MixerConnectionManager(telepathy.server.ConnectionManager):
"""Mixer connection manager
Implements the org.freedesktop.Telepathy.ConnectionManager interface"""
def __init__(self, shutdown_func=None):
"Initializer"
telepathy.server.ConnectionManager.__init__(self, 'mixer')
self._protos['mxit'] = MixerConnection
self._shutdown = shutdown_func
logger.info("Connection manager created")
def GetParameters(self, proto):
"Returns the mandatory and optional parameters for the given proto."
if proto not in self._protos:
raise telepathy.NotImplemented('unknown protocol %s' % proto)
result = []
connection_class = self._protos[proto]
mandatory_parameters = connection_class._mandatory_parameters
optional_parameters = connection_class._optional_parameters
default_parameters = connection_class._parameter_defaults
for parameter_name, parameter_type in mandatory_parameters.iteritems():
param = (parameter_name,
telepathy.CONN_MGR_PARAM_FLAG_REQUIRED,
parameter_type,
'')
result.append(param)
for parameter_name, parameter_type in optional_parameters.iteritems():
if parameter_name in default_parameters:
param = (parameter_name,
telepathy.CONN_MGR_PARAM_FLAG_HAS_DEFAULT,
parameter_name,
default_parameters[parameter_name])
else:
param = (parameter_name, 0, parameter_name, '')
result.append(param)
return result
def disconnected(self, conn):
logger.info("disconnected")
def shutdown():
if self._shutdown is not None and \
len(self._connections) == 0:
self._shutdown()
return False
result = telepathy.server.ConnectionManager.disconnected(self, conn)
gobject.timeout_add(5000, shutdown)
def quit(self):
"Terminates all connections. Must be called upon quit"
logger.info("quit")
for connection in self._connections:
connection.Disconnect()
logger.info("Connection manager quitting")
| detrout/telepathy-mixer | mixer/connection_manager.py | Python | gpl-3.0 | 3,436 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import functools
import collections
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QSize, QTimer, QUrl
from PyQt5.QtGui import QIcon
from PyQt5.QtWebKitWidgets import QWebPage
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget
from qutebrowser.browser import signalfilter, commands, webview
from qutebrowser.utils import (log, message, usertypes, utils, qtutils, objreg,
urlutils)
UndoEntry = collections.namedtuple('UndoEntry', ['url', 'history'])
class TabbedBrowser(tabwidget.TabWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occured
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occured in the current tab.
Attributes:
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabbar -> new-tab-position set to 'left'.
_tab_insert_idx_right: Same as above, for 'right'.
_undo_stack: List of UndoEntry namedtuples of closed tabs.
Signals:
cur_progress: Progress of the current tab changed (loadProgress).
cur_load_started: Current tab started loading (loadStarted)
cur_load_finished: Current tab finished loading (loadFinished)
cur_statusbar_message: Current tab got a statusbar message
(statusBarMessage)
cur_url_text_changed: Current URL text changed.
cur_link_hovered: Link hovered in current tab (linkHovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted WebView.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_statusbar_message = pyqtSignal(str)
cur_url_text_changed = pyqtSignal(str)
cur_link_hovered = pyqtSignal(str, str, str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(str)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
got_cmd = pyqtSignal(str)
current_tab_changed = pyqtSignal(webview.WebView)
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.tabCloseRequested.connect(self.on_tab_close_requested)
self.currentChanged.connect(self.on_current_changed)
self.cur_load_started.connect(self.on_cur_load_started)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._undo_stack = []
self._filter = signalfilter.SignalFilter(win_id, self)
dispatcher = commands.CommandDispatcher(win_id)
objreg.register('command-dispatcher', dispatcher, scope='window',
window=win_id)
self.destroyed.connect(
functools.partial(objreg.delete, 'command-dispatcher',
scope='window', window=win_id))
self._now_focused = None
# FIXME adjust this to font size
# https://github.com/The-Compiler/qutebrowser/issues/119
self.setIconSize(QSize(12, 12))
objreg.get('config').changed.connect(self.update_favicons)
def __repr__(self):
return utils.get_repr(self, count=self.count())
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
w = []
for i in range(self.count()):
w.append(self.widget(i))
return w
def _change_app_title(self, text):
"""Change the window title based on the tab text."""
if not text:
title = 'qutebrowser'
else:
title = '{} - qutebrowser'.format(text)
self.window().setWindowTitle(title)
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
page = tab.page()
frame = page.mainFrame()
# filtered signals
tab.linkHovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.loadProgress.connect(
self._filter.create(self.cur_progress, tab))
frame.loadFinished.connect(
self._filter.create(self.cur_load_finished, tab))
frame.loadStarted.connect(
self._filter.create(self.cur_load_started, tab))
tab.statusBarMessage.connect(
self._filter.create(self.cur_statusbar_message, tab))
tab.scroll_pos_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.url_text_changed.connect(
self._filter.create(self.cur_url_text_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.url_text_changed.connect(
functools.partial(self.on_url_text_changed, tab))
# misc
tab.titleChanged.connect(
functools.partial(self.on_title_changed, tab))
tab.iconChanged.connect(
functools.partial(self.on_icon_changed, tab))
tab.loadProgress.connect(
functools.partial(self.on_load_progress, tab))
frame.loadFinished.connect(
functools.partial(self.on_load_finished, tab))
frame.loadStarted.connect(
functools.partial(self.on_load_started, tab))
page.windowCloseRequested.connect(
functools.partial(self.on_window_close_requested, tab))
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
widget = self.currentWidget()
if widget is None:
url = QUrl()
else:
url = widget.cur_url
# It's possible for url to be invalid, but the caller will handle that.
qtutils.ensure_valid(url)
return url
def shutdown(self):
"""Try to shut down all tabs cleanly."""
try:
self.currentChanged.disconnect()
except TypeError:
log.destroy.exception("Error while shutting down tabs")
for tab in self.widgets():
self._remove_tab(tab)
def close_tab(self, tab):
"""Close a tab.
Args:
tab: The QWebView to be closed.
"""
last_close = config.get('tabs', 'last-close')
if self.count() > 1:
self._remove_tab(tab)
elif last_close == 'close':
self._remove_tab(tab)
self.close_window.emit()
elif last_close == 'blank':
tab.openurl(QUrl('about:blank'))
def _remove_tab(self, tab):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
"""
idx = self.indexOf(tab)
if idx == -1:
raise ValueError("tab {} is not contained in TabbedWidget!".format(
tab))
if tab is self._now_focused:
self._now_focused = None
if tab is objreg.get('last-focused-tab', None, scope='window',
window=self._win_id):
objreg.delete('last-focused-tab', scope='window',
window=self._win_id)
if tab.cur_url.isValid():
history_data = qtutils.serialize(tab.history())
entry = UndoEntry(tab.cur_url, history_data)
self._undo_stack.append(entry)
elif tab.cur_url.isEmpty():
# There are some good reasons why an URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/The-Compiler/qutebrowser/issues/163
pass
else:
# We display a warnings for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(self._win_id, tab.cur_url, "saving tab")
tab.shutdown()
self.removeTab(idx)
tab.deleteLater()
def undo(self):
"""Undo removing of a tab."""
url, history_data = self._undo_stack.pop()
newtab = self.tabopen(url, background=False)
qtutils.deserialize(history_data, newtab.history())
@pyqtSlot('QUrl', bool)
def openurl(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab:
self.tabopen(url, background=False)
else:
self.currentWidget().openurl(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget(idx)
if tab is None:
log.webview.debug("Got invalid tab {} for index {}!".format(
tab, idx))
return
self.close_tab(tab)
@pyqtSlot(webview.WebView)
def on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
self.close_tab(widget)
@pyqtSlot('QUrl', bool)
def tabopen(self, url=None, background=None, explicit=False):
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the background-tabs setting decides.
explicit: Whether the tab was opened explicitely.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current.
- Explicitely opened tabs are at the very right.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}".format(url))
if config.get('tabs', 'tabs-are-windows') and self.count() > 0:
from qutebrowser.mainwindow import mainwindow
window = mainwindow.MainWindow.spawn()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window)
return tabbed_browser.tabopen(url, background, explicit)
tab = webview.WebView(self._win_id, self)
self._connect_tab_signals(tab)
idx = self._get_new_tab_idx(explicit)
self.insertTab(idx, tab, "")
if url is not None:
tab.openurl(url)
if background is None:
background = config.get('tabs', 'background-tabs')
if not background:
self.setCurrentWidget(tab)
tab.show()
return tab
def _get_new_tab_idx(self, explicit):
"""Get the index of a tab to insert.
Args:
explicit: Whether the tab was opened explicitely.
Return:
The index of the new tab.
"""
if explicit:
pos = config.get('tabs', 'new-tab-position-explicit')
else:
pos = config.get('tabs', 'new-tab-position')
if pos == 'left':
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a tab
# *to the left* of the currently focused tab, indices will shift by
# 1 automatically.
elif pos == 'right':
idx = self._tab_insert_idx_right
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid new-tab-position '{}'.".format(pos))
log.webview.debug("new-tab-position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
@pyqtSlot(str, int)
def search(self, text, flags):
"""Search for text in the current page.
Args:
text: The text to search for.
flags: The QWebPage::FindFlags.
"""
log.webview.debug("Searching with text '{}' and flags "
"0x{:04x}.".format(text, int(flags)))
widget = self.currentWidget()
old_scroll_pos = widget.scroll_pos
found = widget.findText(text, flags)
if not found and not flags & QWebPage.HighlightAllOccurrences and text:
message.error(self._win_id, "Text '{}' not found on "
"page!".format(text), immediately=True)
else:
backward = int(flags) & QWebPage.FindBackward
def check_scroll_pos():
"""Check if the scroll position got smaller and show info."""
if not backward and widget.scroll_pos < old_scroll_pos:
message.info(self._win_id, "Search hit BOTTOM, continuing "
"at TOP", immediately=True)
elif backward and widget.scroll_pos > old_scroll_pos:
message.info(self._win_id, "Search hit TOP, continuing at "
"BOTTOM", immediately=True)
# We first want QWebPage to refresh.
QTimer.singleShot(0, check_scroll_pos)
@config.change_filter('tabs', 'show-favicons')
def update_favicons(self):
"""Update favicons when config was changed."""
show = config.get('tabs', 'show-favicons')
for i, tab in enumerate(self.widgets()):
if show:
self.setTabIcon(i, tab.icon())
else:
self.setTabIcon(i, QIcon())
@pyqtSlot()
def on_load_started(self, tab):
"""Clear icon when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
if idx == -1:
# We can get signals for tabs we already deleted...
log.webview.debug("Got invalid tab {}!".format(tab))
return
self.setTabIcon(idx, QIcon())
@pyqtSlot()
def on_cur_load_started(self):
"""Leave insert/hint mode when loading started."""
modeman.maybe_leave(self._win_id, usertypes.KeyMode.insert,
'load started')
modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint,
'load started')
@pyqtSlot(webview.WebView, str)
def on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the titleChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
if idx == -1:
# We can get signals for tabs we already deleted...
log.webview.debug("Got invalid tab {}!".format(tab))
return
self.setTabText(idx, text.replace('&', '&&'))
if idx == self.currentIndex():
self._change_app_title(text)
@pyqtSlot(webview.WebView, str)
def on_url_text_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
if idx == -1:
# We can get signals for tabs we already deleted...
log.webview.debug("Got invalid tab {}!".format(tab))
return
if not self.tabText(idx):
self.setTabText(idx, url)
@pyqtSlot(webview.WebView)
def on_icon_changed(self, tab):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
"""
if not config.get('tabs', 'show-favicons'):
return
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
if idx == -1:
# We can get *_changed signals for tabs we already deleted...
log.webview.debug("Got invalid tab {}!".format(tab))
return
self.setTabIcon(idx, tab.icon())
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
if mode in (usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno):
widget = self.currentWidget()
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
if widget is None:
return
widget.setFocus()
@pyqtSlot(int)
def on_current_changed(self, idx):
"""Set last-focused-tab and leave hinting mode when focus changed."""
if idx == -1:
# closing the last tab (before quitting)
return
tab = self.widget(idx)
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
for mode in (usertypes.KeyMode.hint, usertypes.KeyMode.insert):
modeman.maybe_leave(self._win_id, mode, 'tab changed')
if self._now_focused is not None:
objreg.register('last-focused-tab', self._now_focused, update=True,
scope='window', window=self._win_id)
self._now_focused = tab
self.current_tab_changed.emit(tab)
self._change_app_title(self.tabText(idx))
self._tab_insert_idx_left = self.currentIndex()
self._tab_insert_idx_right = self.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
start = config.get('colors', 'tabs.indicator.start')
stop = config.get('colors', 'tabs.indicator.stop')
system = config.get('colors', 'tabs.indicator.system')
color = utils.interpolate_color(start, stop, perc, system)
self.tabBar().set_tab_indicator_color(idx, color)
def on_load_finished(self, tab):
"""Adjust tab indicator when loading finished.
We don't take loadFinished's ok argument here as it always seems to be
true when the QWebPage has an ErrorPageExtension implemented.
See https://github.com/The-Compiler/qutebrowser/issues/84
"""
try:
idx = self.indexOf(tab)
except RuntimeError:
# We can get signals for tabs we already deleted...
return
if tab.page().error_occured:
color = config.get('colors', 'tabs.indicator.error')
else:
start = config.get('colors', 'tabs.indicator.start')
stop = config.get('colors', 'tabs.indicator.stop')
system = config.get('colors', 'tabs.indicator.system')
color = utils.interpolate_color(start, stop, 100, system)
self.tabBar().set_tab_indicator_color(idx, color)
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
| larryhynes/qutebrowser | qutebrowser/mainwindow/tabbedbrowser.py | Python | gpl-3.0 | 22,640 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.tests.menu_page_viewperm import ViewPermissionTests
from django.contrib.auth.models import User
class ViewPermissionComplexMenuStaffNodeTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=staff group access and menu nodes rendering
"""
settings_overrides = {
'CMS_MODERATOR': False,
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'staff',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are INVISIBLE to an anonymous user
"""
all_pages = self._setup_tree_pages()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_public_menu_anonymous_user(self):
"""
Anonymous sees nothing, as he is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = []
self.assertGrantedVisibility(all_pages, granted)
def test_node_staff_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1')
# user 1 is member of group_b_access_page_and_children
user = User.objects.get(username='user_1')
urls = self.get_url_dict(all_pages)
# call /
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_children_group_1_no_staff(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
no staff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b',
'page_b_a',
'page_b_b',
'page_b_c',
'page_b_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_1_nostaff')
user = User.objects.get(username='user_1_nostaff')
urls = self.get_url_dict(all_pages)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
user = User.objects.get(username='user_2')
urls = self.get_url_dict(all_pages)
self.assertViewNotAllowed(urls['/en/page_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_a/'], user)
self.assertViewAllowed(urls['/en/page_b/page_b_b/page_b_b_b/'], user)
self.assertViewNotAllowed(urls['/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/'], user)
self.assertViewNotAllowed(urls['/en/page_d/'], user)
self.assertViewAllowed(urls['/en/page_d/page_d_a/'], user)
#
def test_node_staff_access_children_group_2_nostaff(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b and user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b_b_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2_nostaff')
user = User.objects.get(username='user_2_nostaff')
urls = self.get_url_dict(all_pages)
# member of group that has access to this page
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
and user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
user = User.objects.get(username='user_3')
urls = self.get_url_dict(all_pages)
url = '/en' + self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_page_and_descendants_group_3_nostaff(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
user is not staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3_nostaff')
user = User.objects.get(username='user_3_nostaff')
urls = self.get_url_dict(all_pages)
# call /
url = '/en' + self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
def test_node_staff_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
user = User.objects.get(username='user_4')
urls = self.get_url_dict(all_pages)
# call /
url = '/en' + self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
# not a direct child
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_descendants_group_4_nostaff(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
user is no staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4_nostaff')
user = User.objects.get(username='user_4_nostaff')
urls = self.get_url_dict(all_pages)
url = '/en' + self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
def test_node_staff_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
user is staff
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
user = User.objects.get(username='user_5')
urls = self.get_url_dict(all_pages)
url = '/en' + self.get_pages_root()
self.assertViewAllowed(urls[url], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_node_staff_access_page_group_5_nostaff(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
nostaff user
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_d',]
self.assertGrantedVisibility(all_pages, granted, username='user_5_nostaff')
user = User.objects.get(username='user_5_nostaff')
urls = self.get_url_dict(all_pages)
url = '/en' + self.get_pages_root()
self.assertViewNotAllowed(urls[url], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_c/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_d/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_c/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_c/"], user)
self.assertViewNotAllowed(urls["/en/page_d/page_d_d/"], user)
| hzlf/openbroadcast | website/cms/tests/menu_page_viewperm_staff.py | Python | gpl-3.0 | 16,554 |
from openanalysis.string_matching import StringMatchingAlgorithm, StringMatchingAnalyzer
class BruteForceMatch(StringMatchingAlgorithm):
"""
Class to implement Brute Force String Matching Algorithm
"""
def __init__(self):
StringMatchingAlgorithm.__init__(self, "Brute Force String Matching")
def match(self, text: str, pattern: str):
# can implement __call__ method to use object as function
"""
class X:
def __call__(self,params): # class X is callable now
print(params)
y = X()
y("Hello")
# O/p: Hello
"""
StringMatchingAlgorithm.match(self, text, pattern)
for i in range(0, len(text) - len(pattern)):
j = 0
while j < len(pattern) and pattern[j] == text[i + j]:
j += 1
self.count += 1
if j == len(pattern):
return True
return False
class Horspool(StringMatchingAlgorithm):
def __init__(self):
StringMatchingAlgorithm.__init__(self, "Hosrpool String Matching")
self.shift_table = {}
self.pattern = ''
def generate_shift_table(self, pattern):
self.pattern = pattern
for i in range(0, len(pattern) - 1):
self.shift_table.update({pattern[i]: len(pattern) - 1 - i})
def match(self, text: str, pattern: str):
StringMatchingAlgorithm.match(self, text, pattern)
self.generate_shift_table(pattern)
i = len(self.pattern) - 1
while i < len(text):
j = 0
while j < len(self.pattern) and text[i - j] == self.pattern[len(self.pattern) - 1 - j]:
j += 1
self.count += j
if j == len(self.pattern):
return i - len(self.pattern) + 1
if text[i] in self.shift_table:
i += self.shift_table[text[i]]
else:
i += len(self.pattern)
return -1
class BoyerMoore(StringMatchingAlgorithm):
def __init__(self):
StringMatchingAlgorithm.__init__(self, "Boyer-Moore String Matching")
self.gdsuf_shift_table = []
self.bdsym_shift_table = {}
self.pattern = ''
def generate_bdsym_shift_table(self, pattern):
self.pattern = pattern
for i in range(len(pattern) - 1):
self.bdsym_shift_table[pattern[i]] = len(pattern) - i - 1
def generate_gdsuf_shift_table(self, pattern):
lp = len(pattern)
for i in range(lp - 1):
suff = pattern[lp - i - 1:]
# find highest index where suffix 'suff' is present in pattern
targ = pattern.rfind(suff, 0, lp - i - 1)
while pattern[targ - 1] == pattern[lp - i - 2] and targ > 0:
targ = pattern.rfind(suff, 0, targ)
if targ > 0:
self.gdsuf_shift_table.append((lp - i - 1) - targ)
elif targ == 0:
self.gdsuf_shift_table.append(lp - i - 1)
else:
lis = []
# find first and last pre and suff that should match, then append distance b/w them
for j in range(lp - 1, lp - i - 2, -1):
if pattern[j:] == pattern[0:lp - j]:
lis.append(j)
if len(lis) == 0:
self.gdsuf_shift_table.append(lp)
else:
self.gdsuf_shift_table.append(max(lis))
def match(self, text: str, pattern: str):
StringMatchingAlgorithm.match(self, text, pattern)
# pattern = "BAOBAB"
self.generate_bdsym_shift_table(pattern)
self.generate_gdsuf_shift_table(pattern)
# print(self.gdsuf_shift_table, self.bdsym_shift_table)
i = len(self.pattern) - 1
while i < len(text):
j = 0
k = 0 # matched characters
while j < len(self.pattern) and text[i - j] == self.pattern[len(self.pattern) - j - 1]:
j += 1
k += 1
self.count += j
if j == len(self.pattern):
return i - len(self.pattern) + 1
if text[i - j] in self.bdsym_shift_table:
t1 = self.bdsym_shift_table[text[i - j]]
else:
t1 = len(self.pattern)
d1 = max(t1 - k, 1)
d = d1
if k > 0:
d2 = self.gdsuf_shift_table[k - 1] # 0 based index table
d = max(d1, d2)
i += d
return -1
if __name__ == "__main__":
StringMatchingAnalyzer(Horspool).analyze(max_text_length=5000)
| OpenWeavers/openanalysis | analysistest/string_matching.py | Python | gpl-3.0 | 4,632 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import cstr, cint
from frappe.contacts.doctype.address.address import get_default_address
from erpnext.setup.doctype.customer_group.customer_group import get_parent_customer_groups
class IncorrectCustomerGroup(frappe.ValidationError): pass
class IncorrectSupplierType(frappe.ValidationError): pass
class ConflictingTaxRule(frappe.ValidationError): pass
class TaxRule(Document):
def __setup__(self):
self.flags.ignore_these_exceptions_in_test = [ConflictingTaxRule]
def validate(self):
self.validate_tax_template()
self.validate_date()
self.validate_filters()
self.validate_use_for_shopping_cart()
def validate_tax_template(self):
if self.tax_type== "Sales":
self.purchase_tax_template = self.supplier = self.supplier_type = None
if self.customer:
self.customer_group = None
else:
self.sales_tax_template = self.customer = self.customer_group = None
if self.supplier:
self.supplier_type = None
if not (self.sales_tax_template or self.purchase_tax_template):
frappe.throw(_("Tax Template is mandatory."))
def validate_date(self):
if self.from_date and self.to_date and self.from_date > self.to_date:
frappe.throw(_("From Date cannot be greater than To Date"))
def validate_filters(self):
filters = {
"tax_type": self.tax_type,
"customer": self.customer,
"customer_group": self.customer_group,
"supplier": self.supplier,
"supplier_type": self.supplier_type,
"billing_city": self.billing_city,
"billing_county": self.billing_county,
"billing_state": self.billing_state,
"billing_country": self.billing_country,
"shipping_city": self.shipping_city,
"shipping_county": self.shipping_county,
"shipping_state": self.shipping_state,
"shipping_country": self.shipping_country,
"company": self.company
}
conds=""
for d in filters:
if conds:
conds += " and "
conds += """ifnull({0}, '') = '{1}'""".format(d, frappe.db.escape(cstr(filters[d])))
if self.from_date and self.to_date:
conds += """ and ((from_date > '{from_date}' and from_date < '{to_date}') or
(to_date > '{from_date}' and to_date < '{to_date}') or
('{from_date}' > from_date and '{from_date}' < to_date) or
('{from_date}' = from_date and '{to_date}' = to_date))""".format(from_date=self.from_date, to_date=self.to_date)
elif self.from_date and not self.to_date:
conds += """ and to_date > '{from_date}'""".format(from_date = self.from_date)
elif self.to_date and not self.from_date:
conds += """ and from_date < '{to_date}'""".format(to_date = self.to_date)
tax_rule = frappe.db.sql("select name, priority \
from `tabTax Rule` where {0} and name != '{1}'".format(conds, self.name), as_dict=1)
if tax_rule:
if tax_rule[0].priority == self.priority:
frappe.throw(_("Tax Rule Conflicts with {0}".format(tax_rule[0].name)), ConflictingTaxRule)
def validate_use_for_shopping_cart(self):
'''If shopping cart is enabled and no tax rule exists for shopping cart, enable this one'''
if (not self.use_for_shopping_cart
and cint(frappe.db.get_single_value('Shopping Cart Settings', 'enabled'))
and not frappe.db.get_value('Tax Rule', {'use_for_shopping_cart': 1, 'name': ['!=', self.name]})):
self.use_for_shopping_cart = 1
frappe.msgprint(_("Enabling 'Use for Shopping Cart', as Shopping Cart is enabled and there should be at least one Tax Rule for Shopping Cart"))
@frappe.whitelist()
def get_party_details(party, party_type, args=None):
out = {}
billing_address, shipping_address = None, None
if args:
if args.get('billing_address'):
billing_address = frappe.get_doc('Address', args.get('billing_address'))
if args.get('shipping_address'):
shipping_address = frappe.get_doc('Address', args.get('shipping_address'))
else:
billing_address_name = get_default_address(party_type, party)
shipping_address_name = get_default_address(party_type, party, 'is_shipping_address')
if billing_address_name:
billing_address = frappe.get_doc('Address', billing_address_name)
if shipping_address_name:
shipping_address = frappe.get_doc('Address', shipping_address_name)
if billing_address:
out["billing_city"]= billing_address.city
out["billing_county"]= billing_address.county
out["billing_state"]= billing_address.state
out["billing_country"]= billing_address.country
if shipping_address:
out["shipping_city"]= shipping_address.city
out["shipping_county"]= shipping_address.county
out["shipping_state"]= shipping_address.state
out["shipping_country"]= shipping_address.country
return out
def get_tax_template(posting_date, args):
"""Get matching tax rule"""
args = frappe._dict(args)
conditions = ["""(from_date is null or from_date = '' or from_date <= '{0}')
and (to_date is null or to_date = '' or to_date >= '{0}')""".format(posting_date)]
for key, value in args.iteritems():
if key=="use_for_shopping_cart":
conditions.append("use_for_shopping_cart = {0}".format(1 if value else 0))
if key == 'customer_group' and value:
customer_group_condition = get_customer_group_condition(value)
conditions.append("ifnull({0}, '') in ('', {1})".format(key, customer_group_condition))
else:
conditions.append("ifnull({0}, '') in ('', '{1}')".format(key, frappe.db.escape(cstr(value))))
tax_rule = frappe.db.sql("""select * from `tabTax Rule`
where {0}""".format(" and ".join(conditions)), as_dict = True)
if not tax_rule:
return None
for rule in tax_rule:
rule.no_of_keys_matched = 0
for key in args:
if rule.get(key): rule.no_of_keys_matched += 1
rule = sorted(tax_rule, lambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority))[0]
tax_template = rule.sales_tax_template or rule.purchase_tax_template
doctype = "{0} Taxes and Charges Template".format(rule.tax_type)
if frappe.db.get_value(doctype, tax_template, 'disabled')==1:
return None
return tax_template
def get_customer_group_condition(customer_group):
condition = ""
customer_groups = ["'%s'"%(frappe.db.escape(d.name)) for d in get_parent_customer_groups(customer_group)]
if customer_groups:
condition = ",".join(['%s'] * len(customer_groups))%(tuple(customer_groups))
return condition | mbauskar/erpnext | erpnext/accounts/doctype/tax_rule/tax_rule.py | Python | gpl-3.0 | 6,495 |
__kupfer_name__ = _("Search the Web")
__kupfer_sources__ = ("OpenSearchSource", )
__kupfer_text_sources__ = ()
__kupfer_actions__ = (
"SearchFor",
"SearchWithEngine",
)
__description__ = _("Search the web with OpenSearch search engines")
__version__ = "2020-04-19"
__author__ = "Ulrik Sverdrup <[email protected]>"
import locale
import os
import urllib.parse
import xml.etree.cElementTree as ElementTree
from kupfer.objects import Action, Source, Leaf
from kupfer.objects import TextLeaf
from kupfer import utils, config
from kupfer.plugin import firefox
def _noescape_urlencode(items):
"""Assemble an url param string from @items, without
using any url encoding.
"""
return "?" + "&".join("%s=%s" % (n, v) for n, v in items)
def _urlencode(word):
"""Urlencode a single string of bytes @word"""
return urllib.parse.urlencode({"q": word})[2:]
def _do_search_engine(terms, search_url, encoding="UTF-8"):
"""Show an url searching for @search_url with @terms"""
query_url = search_url.replace("{searchTerms}", _urlencode(terms))
utils.show_url(query_url)
class SearchWithEngine (Action):
"""TextLeaf -> SearchWithEngine -> SearchEngine"""
def __init__(self):
Action.__init__(self, _("Search With..."))
def activate(self, leaf, iobj):
coding = iobj.object.get("InputEncoding")
url = iobj.object["Url"]
_do_search_engine(leaf.object, url, encoding=coding)
def item_types(self):
yield TextLeaf
def requires_object(self):
return True
def object_types(self):
yield SearchEngine
def object_source(self, for_item=None):
return OpenSearchSource()
def get_description(self):
return _("Search the web with OpenSearch search engines")
def get_icon_name(self):
return "edit-find"
class SearchFor (Action):
"""SearchEngine -> SearchFor -> TextLeaf
This is the opposite action to SearchWithEngine
"""
def __init__(self):
Action.__init__(self, _("Search For..."))
def activate(self, leaf, iobj):
coding = leaf.object.get("InputEncoding")
url = leaf.object["Url"]
terms = iobj.object
_do_search_engine(terms, url, encoding=coding)
def item_types(self):
yield SearchEngine
def requires_object(self):
return True
def object_types(self):
yield TextLeaf
def get_description(self):
return _("Search the web with OpenSearch search engines")
def get_icon_name(self):
return "edit-find"
class SearchEngine (Leaf):
def get_description(self):
desc = self.object.get("Description")
return desc if desc != str(self) else None
def get_icon_name(self):
return "text-html"
def coroutine(func):
"""Coroutine decorator: Start the coroutine"""
def startcr(*ar, **kw):
cr = func(*ar, **kw)
next(cr)
return cr
return startcr
class OpenSearchParseError (Exception):
pass
def gettagname(tag):
return tag.rsplit("}", 1)[-1]
class OpenSearchSource (Source):
def __init__(self):
Source.__init__(self, _("Search Engines"))
@coroutine
def _parse_opensearch(self, target):
"""This is a coroutine to parse OpenSearch files"""
vital_keys = set(["Url", "ShortName"])
keys = set(["Description", "Url", "ShortName", "InputEncoding"])
roots = ('OpenSearchDescription', 'SearchPlugin')
def parse_etree(etree, name=None):
if not gettagname(etree.getroot().tag) in roots:
raise OpenSearchParseError("Search %s has wrong type" % name)
search = {}
for child in etree.getroot():
tagname = gettagname(child.tag)
if tagname not in keys:
continue
# Only pick up Url tags with type="text/html"
if tagname == "Url":
if (child.get("type") == "text/html" and
child.get("template")):
text = child.get("template")
params = {}
for ch in child:
if gettagname(ch.tag) == "Param":
params[ch.get("name")] = ch.get("value")
if params:
text += _noescape_urlencode(list(params.items()))
else:
continue
else:
text = (child.text or "").strip()
search[tagname] = text
if not vital_keys.issubset(list(search.keys())):
raise OpenSearchParseError("Search %s missing keys" % name)
return search
while True:
try:
path = (yield)
etree = ElementTree.parse(path)
target.send(parse_etree(etree, name=path))
except Exception as exc:
self.output_debug("%s: %s" % (type(exc).__name__, exc))
def get_items(self):
plugin_dirs = []
# accept in kupfer data dirs
plugin_dirs.extend(config.get_data_dirs("searchplugins"))
# firefox in home directory
ffx_home = firefox.get_firefox_home_file("searchplugins")
if ffx_home and os.path.isdir(ffx_home):
plugin_dirs.append(ffx_home)
plugin_dirs.extend(config.get_data_dirs("searchplugins",
package="firefox"))
plugin_dirs.extend(config.get_data_dirs("searchplugins",
package="iceweasel"))
addon_dir = "/usr/lib/firefox-addons/searchplugins"
cur_lang, _ignored = locale.getlocale(locale.LC_MESSAGES)
suffixes = ["en-US"]
if cur_lang:
suffixes = [cur_lang.replace("_", "-"), cur_lang[:2]] + suffixes
for suffix in suffixes:
addon_lang_dir = os.path.join(addon_dir, suffix)
if os.path.exists(addon_lang_dir):
plugin_dirs.append(addon_lang_dir)
break
# debian iceweasel
if os.path.isdir("/etc/iceweasel/searchplugins/common"):
plugin_dirs.append("/etc/iceweasel/searchplugins/common")
for suffix in suffixes:
addon_dir = os.path.join("/etc/iceweasel/searchplugins/locale",
suffix)
if os.path.isdir(addon_dir):
plugin_dirs.append(addon_dir)
# try to find all versions of firefox
for prefix in ('/usr/lib', '/usr/share'):
for dirname in os.listdir(prefix):
if dirname.startswith("firefox") or \
dirname.startswith("iceweasel"):
addon_dir = os.path.join(prefix, dirname,
"searchplugins")
if os.path.isdir(addon_dir):
plugin_dirs.append(addon_dir)
addon_dir = os.path.join(prefix, dirname,
"distribution", "searchplugins",
"common")
if os.path.isdir(addon_dir):
plugin_dirs.append(addon_dir)
self.output_debug("Found following searchplugins directories",
sep="\n", *plugin_dirs)
@coroutine
def collect(seq):
"""Collect items in list @seq"""
while True:
seq.append((yield))
searches = []
collector = collect(searches)
parser = self._parse_opensearch(collector)
# files are unique by filename to allow override
visited_files = set()
for pdir in plugin_dirs:
try:
for f in os.listdir(pdir):
if f in visited_files:
continue
fpath = os.path.join(pdir, f)
if os.path.isdir(fpath):
continue
parser.send(fpath)
visited_files.add(f)
except EnvironmentError as exc:
self.output_error(exc)
for s in searches:
yield SearchEngine(s, s["ShortName"])
def should_sort_lexically(self):
return True
def provides(self):
yield SearchEngine
def get_icon_name(self):
return "applications-internet"
| kupferlauncher/kupfer | kupfer/plugin/websearch.py | Python | gpl-3.0 | 8,532 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import piw
from pi import const,agent,atom,domain,bundles,upgrade,policy
from . import rectangle_oscillator_version as version,synth_native
class Agent(agent.Agent):
def __init__(self, address, ordinal):
agent.Agent.__init__(self, signature=version,names='rectangle oscillator',ordinal=ordinal)
self.domain = piw.clockdomain_ctl()
self[1] = bundles.Output(1,True,names="audio output")
self.output = bundles.Splitter(self.domain, self[1])
self.osc = synth_native.rect(self.output.cookie(),self.domain)
self.input = bundles.VectorInput(self.osc.cookie(), self.domain, signals=(1,2,3,4))
self[2]=atom.Atom(domain=domain.BoundedFloat(0,1),names="volume input",policy=self.input.local_policy(1,policy.IsoStreamPolicy(1,0,0)))
self[3]=atom.Atom(domain=domain.BoundedFloat(0,96000),names="frequency input",policy=self.input.merge_policy(2,False))
self[4]=atom.Atom(domain=domain.BoundedFloat(0.1,0.9,rest=0.5), names="pulse width input",policy=self.input.merge_policy(3,False))
self[5]=atom.Atom(domain=domain.BoundedFloat(-1200,1200), names='detune input',policy=self.input.merge_policy(4,False))
agent.main(Agent)
| barnone/EigenD | plg_synth/rectangle_oscillator_plg.py | Python | gpl-3.0 | 1,915 |
# -*- coding: utf-8 -*-
"""Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import io
import os
import sys
from unittest import TestCase
try:
from importlib import invalidate_caches # Required from Python 3.3
except ImportError:
def invalidate_caches():
pass
import nose.tools as nt
from IPython.core import magic
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic,
register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython.core.magics import execution, script, code
from IPython.nbformat.v3.tests.nbexamples import nb0
from IPython.nbformat import current
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils import py3compat
from IPython.utils.io import capture_output
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils.process import find_cmd
if py3compat.PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Test functions begin
#-----------------------------------------------------------------------------
@magic.magics_class
class DummyMagics(magic.Magics): pass
def test_extract_code_ranges():
instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
expected = [(0, 1),
(2, 3),
(4, 6),
(6, 9),
(9, 14),
(16, None),
(None, 9),
(9, None),
(None, 13),
(None, None)]
actual = list(code.extract_code_ranges(instr))
nt.assert_equal(actual, expected)
def test_extract_symbols():
source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
expected = [([], ['a']),
(["def b():\n return 42\n"], []),
(["class A: pass\n"], []),
(["class A: pass\n", "def b():\n return 42\n"], []),
(["class A: pass\n"], ['a']),
([], ['z'])]
for symbols, exp in zip(symbols_args, expected):
nt.assert_equal(code.extract_symbols(source, symbols), exp)
def test_extract_symbols_raises_exception_with_non_python_code():
source = ("=begin A Ruby program :)=end\n"
"def hello\n"
"puts 'Hello world'\n"
"end")
with nt.assert_raises(SyntaxError):
code.extract_symbols(source, "hello")
def test_config():
""" test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registerd as a
magic.
"""
## should not raise.
_ip.magic('config')
def test_rehashx():
# clear up everything
_ip = get_ipython()
_ip.alias_manager.clear_aliases()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
nt.assert_true(len(_ip.alias_manager.aliases) > 10)
for name, cmd in _ip.alias_manager.aliases:
# we must strip dots from alias names
nt.assert_not_in('.', name)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
nt.assert_true(len(scoms) > 10)
def test_magic_parse_options():
"""Test that we don't mangle paths when parsing magic options."""
ip = get_ipython()
path = 'c:\\x'
m = DummyMagics(ip)
opts = m.parse_options('-f %s' % path,'f:')[0]
# argv splitting is os-dependent
if os.name == 'posix':
expected = 'c:x'
else:
expected = path
nt.assert_equal(opts['f'], expected)
def test_magic_parse_long_options():
"""Magic.parse_options can handle --foo=bar long options"""
ip = get_ipython()
m = DummyMagics(ip)
opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
nt.assert_in('foo', opts)
nt.assert_in('bar', opts)
nt.assert_equal(opts['bar'], "bubble")
@dec.skip_without('sqlite3')
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
"""
@dec.skip_without('sqlite3')
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
"""
@dec.skip_without('sqlite3')
def doctest_hist_op():
"""Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
"""
@dec.skip_without('sqlite3')
def test_macro():
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
ip.magic("macro test 1-3")
nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
# List macros
nt.assert_in("test", ip.magic("macro"))
@dec.skip_without('sqlite3')
def test_macro_run():
"""Test that we can run a multi-line macro successfully."""
ip = get_ipython()
ip.history_manager.reset()
cmds = ["a=10", "a+=1", py3compat.doctest_refactor_print("print a"),
"%macro test 2-3"]
for cmd in cmds:
ip.run_cell(cmd, store_history=True)
nt.assert_equal(ip.user_ns["test"].value,
py3compat.doctest_refactor_print("a+=1\nprint a\n"))
with tt.AssertPrints("12"):
ip.run_cell("test")
with tt.AssertPrints("13"):
ip.run_cell("test")
def test_magic_magic():
"""Test %magic"""
ip = get_ipython()
with capture_output() as captured:
ip.magic("magic")
stdout = captured.stdout
nt.assert_in('%magic', stdout)
nt.assert_in('IPython', stdout)
nt.assert_in('Available', stdout)
@dec.skipif_not_numpy
def test_numpy_reset_array_undec():
"Test '%reset array' functionality"
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
nt.assert_in('a', _ip.user_ns)
_ip.magic('reset -f array')
nt.assert_not_in('a', _ip.user_ns)
def test_reset_out():
"Test '%reset out' magic"
_ip.run_cell("parrot = 'dead'", store_history=True)
# test '%reset -f out', make an Out prompt
_ip.run_cell("parrot", store_history=True)
nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
_ip.magic('reset -f out')
nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
nt.assert_equal(len(_ip.user_ns['Out']), 0)
def test_reset_in():
"Test '%reset in' magic"
# test '%reset -f in'
_ip.run_cell("parrot", store_history=True)
nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
_ip.magic('%reset -f in')
nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
def test_reset_dhist():
"Test '%reset dhist' magic"
_ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
_ip.magic('cd ' + os.path.dirname(nt.__file__))
_ip.magic('cd -')
nt.assert_true(len(_ip.user_ns['_dh']) > 0)
_ip.magic('reset -f dhist')
nt.assert_equal(len(_ip.user_ns['_dh']), 0)
_ip.run_cell("_dh = [d for d in tmp]") #restore
def test_reset_in_length():
"Test that '%reset in' preserves In[] length"
_ip.run_cell("print 'foo'")
_ip.run_cell("reset -f in")
nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
nt.assert_equal(last_line, "SyntaxError: invalid syntax")
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n"
" %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop('run', None)
with tt.AssertNotPrints("not found", channel='stderr'):
ip.run_cell("%%time\n"
"run = 0\n"
"run += 1")
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.magic('doctest_mode')
_ip.magic('doctest_mode')
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(py3compat.getcwd())[1].replace('\\','/')
curpath = py3compat.getcwd
startdir = py3compat.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.magic('cd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('cd -')
nt.assert_equal(curpath(), startdir)
_ip.magic('pushd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('popd')
nt.assert_equal(curpath(), startdir)
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(3):
_ip.magic("xmode")
nt.assert_equal(_ip.InteractiveTB.mode, xmode)
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
nt.assert_equal(monitor, [])
_ip.magic("reset -f")
nt.assert_equal(monitor, [1])
class TestXdel(tt.TempFileMixin):
def test_xdel(self):
"""Test that references from %run are cleared by xdel."""
src = ("class A(object):\n"
" monitor = []\n"
" def __del__(self):\n"
" self.monitor.append(1)\n"
"a = A()\n")
self.mktmp(src)
# %run creates some hidden references...
_ip.magic("run %s" % self.fname)
# ... as does the displayhook.
_ip.run_cell("a")
monitor = _ip.user_ns["A"].monitor
nt.assert_equal(monitor, [])
_ip.magic("xdel a")
# Check that a's __del__ method has been called.
nt.assert_equal(monitor, [1])
def doctest_who():
"""doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
"""
def test_whos():
"""Check that whos is protected against objects where repr() fails."""
class A(object):
def __repr__(self):
raise Exception()
_ip.user_ns['a'] = A()
_ip.magic("whos")
@py3compat.u_format
def doctest_precision():
"""doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: {u}'%.5f'
In [3]: f.float_format
Out[3]: {u}'%.5f'
In [4]: %precision %e
Out[4]: {u}'%e'
In [5]: f(3.1415927)
Out[5]: {u}'3.141593e+00'
"""
def test_psearch():
with tt.AssertPrints("dict.fromkeys"):
_ip.run_cell("dict.fr*?")
def test_timeit_shlex():
"""test shlex issues with timeit (#1109)"""
_ip.ex("def f(*a,**kw): pass")
_ip.magic('timeit -n1 "this is a bug".count(" ")')
_ip.magic('timeit -r1 -n1 f(" ", 1)')
_ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
_ip.magic('timeit -r1 -n1 ("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b ")')
def test_timeit_arguments():
"Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
_ip.magic("timeit ('#')")
def test_timeit_special_syntax():
"Test %%timeit with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
def test_timeit_return():
"""
test wether timeit -o return object
"""
res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
assert(res is not None)
def test_timeit_quiet():
"""
test quiet option of timeit magic
"""
with tt.AssertNotPrints("loops"):
_ip.run_cell("%timeit -n1 -r1 -q 1")
@dec.skipif(execution.profile is None)
def test_prun_special_syntax():
"Test %%prun with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('prun', '-q %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('prun', '-q', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
@dec.skipif(execution.profile is None)
def test_prun_quotes():
"Test that prun does not clobber string escapes (GH #1302)"
_ip.magic(r"prun -q x = '\t'")
nt.assert_equal(_ip.user_ns['x'], '\t')
def test_extension():
tmpdir = TemporaryDirectory()
orig_ipython_dir = _ip.ipython_dir
try:
_ip.ipython_dir = tmpdir.name
nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
url = os.path.join(os.path.dirname(__file__), "daft_extension.py")
_ip.magic("install_ext %s" % url)
_ip.user_ns.pop('arq', None)
invalidate_caches() # Clear import caches
_ip.magic("load_ext daft_extension")
nt.assert_equal(_ip.user_ns['arq'], 185)
_ip.magic("unload_ext daft_extension")
assert 'arq' not in _ip.user_ns
finally:
_ip.ipython_dir = orig_ipython_dir
tmpdir.cleanup()
def test_notebook_export_json():
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.ipynb")
_ip.ex(py3compat.u_format(u"u = {u}'héllo'"))
_ip.magic("notebook -e %s" % outfile)
def test_notebook_export_py():
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.py")
_ip.ex(py3compat.u_format(u"u = {u}'héllo'"))
_ip.magic("notebook -e %s" % outfile)
def test_notebook_reformat_py():
with TemporaryDirectory() as td:
infile = os.path.join(td, "nb.ipynb")
with io.open(infile, 'w', encoding='utf-8') as f:
current.write(nb0, f, 'json')
_ip.ex(py3compat.u_format(u"u = {u}'héllo'"))
_ip.magic("notebook -f py %s" % infile)
def test_notebook_reformat_json():
with TemporaryDirectory() as td:
infile = os.path.join(td, "nb.py")
with io.open(infile, 'w', encoding='utf-8') as f:
current.write(nb0, f, 'py')
_ip.ex(py3compat.u_format(u"u = {u}'héllo'"))
_ip.magic("notebook -f ipynb %s" % infile)
_ip.magic("notebook -f json %s" % infile)
def test_env():
env = _ip.magic("env")
assert isinstance(env, dict), type(env)
class CellMagicTestCase(TestCase):
def check_ident(self, magic):
# Manually called, we get the result
out = _ip.run_cell_magic(magic, 'a', 'b')
nt.assert_equal(out, ('a','b'))
# Via run_cell, it goes into the user's namespace via displayhook
_ip.run_cell('%%' + magic +' c\nd')
nt.assert_equal(_ip.user_ns['_'], ('c','d'))
def test_cell_magic_func_deco(self):
"Cell magic using simple decorator"
@register_cell_magic
def cellm(line, cell):
return line, cell
self.check_ident('cellm')
def test_cell_magic_reg(self):
"Cell magic manually registered"
def cellm(line, cell):
return line, cell
_ip.register_magic_function(cellm, 'cell', 'cellm2')
self.check_ident('cellm2')
def test_cell_magic_class(self):
"Cell magics declared via a class"
@magics_class
class MyMagics(Magics):
@cell_magic
def cellm3(self, line, cell):
return line, cell
_ip.register_magics(MyMagics)
self.check_ident('cellm3')
def test_cell_magic_class2(self):
"Cell magics declared via a class, #2"
@magics_class
class MyMagics2(Magics):
@cell_magic('cellm4')
def cellm33(self, line, cell):
return line, cell
_ip.register_magics(MyMagics2)
self.check_ident('cellm4')
# Check that nothing is registered as 'cellm33'
c33 = _ip.find_cell_magic('cellm33')
nt.assert_equal(c33, None)
def test_file():
"""Basic %%file"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("file", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_var_expand():
"""%%file $filename"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.user_ns['filename'] = fname
ip.run_cell_magic("file", '$filename', u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_unicode():
"""%%file with unicode cell"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("file", fname, u'\n'.join([
u'liné1',
u'liné2',
]))
with io.open(fname, encoding='utf-8') as f:
s = f.read()
nt.assert_in(u'liné1\n', s)
nt.assert_in(u'liné2', s)
def test_file_amend():
"""%%file -a amends files"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file2')
ip.run_cell_magic("file", fname, u'\n'.join([
'line1',
'line2',
]))
ip.run_cell_magic("file", "-a %s" % fname, u'\n'.join([
'line3',
'line4',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line3\n', s)
def test_script_config():
ip = get_ipython()
ip.config.ScriptMagics.script_magics = ['whoda']
sm = script.ScriptMagics(shell=ip)
nt.assert_in('whoda', sm.magics['cell'])
@dec.skip_win32
def test_script_out():
ip = get_ipython()
ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
@dec.skip_win32
def test_script_err():
ip = get_ipython()
ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_bg_out():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
@dec.skip_win32
def test_script_bg_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
@dec.skip_win32
def test_script_bg_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
def test_script_defaults():
ip = get_ipython()
for cmd in ['sh', 'bash', 'perl', 'ruby']:
try:
find_cmd(cmd)
except Exception:
pass
else:
nt.assert_in(cmd, ip.magics_manager.magics['cell'])
@magics_class
class FooFoo(Magics):
"""class with both %foo and %%foo magics"""
@line_magic('foo')
def line_foo(self, line):
"I am line foo"
pass
@cell_magic("foo")
def cell_foo(self, line, cell):
"I am cell foo, not line foo"
pass
def test_line_cell_info():
"""%%foo and %foo magics are distinguishable to inspect"""
ip = get_ipython()
ip.magics_manager.register(FooFoo)
oinfo = ip.object_inspect('foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
oinfo = ip.object_inspect('%%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
oinfo = ip.object_inspect('%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
def test_multiple_magics():
ip = get_ipython()
foo1 = FooFoo(ip)
foo2 = FooFoo(ip)
mm = ip.magics_manager
mm.register(foo1)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
mm.register(foo2)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
def test_alias_magic():
"""Test %alias_magic."""
ip = get_ipython()
mm = ip.magics_manager
# Basic operation: both cell and line magics are created, if possible.
ip.run_line_magic('alias_magic', 'timeit_alias timeit')
nt.assert_in('timeit_alias', mm.magics['line'])
nt.assert_in('timeit_alias', mm.magics['cell'])
# --cell is specified, line magic not created.
ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
nt.assert_in('timeit_cell_alias', mm.magics['cell'])
# Test that line alias is created successfully.
ip.run_line_magic('alias_magic', '--line env_alias env')
nt.assert_equal(ip.run_line_magic('env', ''),
ip.run_line_magic('env_alias', ''))
def test_save():
"""Test %save."""
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "testsave.py")
ip.run_line_magic("save", "%s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 1)
nt.assert_in('coding: utf-8', content)
ip.run_line_magic("save", "-a %s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 2)
nt.assert_in('coding: utf-8', content)
def test_store():
"""Test %store."""
ip = get_ipython()
ip.run_line_magic('load_ext', 'storemagic')
# make sure the storage is empty
ip.run_line_magic('store', '-z')
ip.user_ns['var'] = 42
ip.run_line_magic('store', 'var')
ip.user_ns['var'] = 39
ip.run_line_magic('store', '-r')
nt.assert_equal(ip.user_ns['var'], 42)
ip.run_line_magic('store', '-d var')
ip.user_ns['var'] = 39
ip.run_line_magic('store' , '-r')
nt.assert_equal(ip.user_ns['var'], 39)
def _run_edit_test(arg_s, exp_filename=None,
exp_lineno=-1,
exp_contents=None,
exp_is_temp=None):
ip = get_ipython()
M = code.CodeMagics(ip)
last_call = ['','']
opts,args = M.parse_options(arg_s,'prxn:')
filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
if exp_filename is not None:
nt.assert_equal(exp_filename, filename)
if exp_contents is not None:
with io.open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
nt.assert_equal(exp_contents, contents)
if exp_lineno != -1:
nt.assert_equal(exp_lineno, lineno)
if exp_is_temp is not None:
nt.assert_equal(exp_is_temp, is_temp)
def test_edit_interactive():
"""%edit on interactively defined objects"""
ip = get_ipython()
n = ip.execution_count
ip.run_cell(u"def foo(): return 1", store_history=True)
try:
_run_edit_test("foo")
except code.InteractivelyDefined as e:
nt.assert_equal(e.index, n)
else:
raise AssertionError("Should have raised InteractivelyDefined")
def test_edit_cell():
"""%edit [cell id]"""
ip = get_ipython()
ip.run_cell(u"def foo(): return 1", store_history=True)
# test
_run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
| alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/core/tests/test_magic.py | Python | gpl-3.0 | 27,805 |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2011 Álvaro Justen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from textwrap import dedent
import unittest
import tempfile
import os
from outputty import Table
class TestTableTxt(unittest.TestCase):
def test_should_save_data_into_text_file(self):
temp_fp = tempfile.NamedTemporaryFile(delete=False)
temp_fp.close()
my_table = Table(headers=['ham', 'spam', 'eggs'])
my_table.append({'ham': '', 'spam': '', 'eggs': ''})
my_table.append({'ham': 1, 'spam': 2, 'eggs': 3})
my_table.append({'ham': 11, 'spam': 22, 'eggs': 33})
my_table.write('text', temp_fp.name)
output = my_table.write('text')
fp = open(temp_fp.name, 'r')
contents = fp.read()
fp.close()
os.remove(temp_fp.name)
self.assertEqual(contents, dedent('''
+-----+------+------+
| ham | spam | eggs |
+-----+------+------+
| | | |
| 1 | 2 | 3 |
| 11 | 22 | 33 |
+-----+------+------+
''').strip())
self.assertEquals(contents, output)
def test_input_and_output_character_encoding_in_method_to_text_file(self):
temp_fp = tempfile.NamedTemporaryFile(delete=False)
temp_fp.close()
my_table = Table(headers=['Álvaro'.decode('utf8').encode('utf16')],
input_encoding='utf16', output_encoding='iso-8859-1')
my_table.append(['Píton'.decode('utf8').encode('utf16')])
my_table.write('text', temp_fp.name)
fp = open(temp_fp.name)
file_contents = fp.read()
fp.close()
os.remove(temp_fp.name)
output = dedent('''
+--------+
| Álvaro |
+--------+
| Píton |
+--------+
''').strip().decode('utf8').encode('iso-8859-1')
self.assertEqual(file_contents, output)
#TODO: test input and output encoding
| turicas/outputty | tests/test_Table_text.py | Python | gpl-3.0 | 2,601 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class EInvoiceUser(Document):
pass
| frappe/erpnext | erpnext/regional/doctype/e_invoice_user/e_invoice_user.py | Python | gpl-3.0 | 241 |
#
# Copyright © 2012–2022 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Tests for rendering quality checks."""
from weblate.checks.render import MaxSizeCheck
from weblate.fonts.models import FontGroup, FontOverride
from weblate.fonts.tests.utils import FontTestCase
from weblate.utils.state import STATE_TRANSLATED
class MaxSizeCheckTest(FontTestCase):
def setUp(self):
super().setUp()
self.check = MaxSizeCheck()
def perform_check(self, target, flags):
unit = self.get_unit()
unit.flags = flags
unit.target = target
unit.state = STATE_TRANSLATED
return self.check.check_target(["source"], [target], unit)
def test_good(self):
self.assertFalse(self.perform_check("short", "max-size:500"))
self.assertEqual(self.check.last_font, "sans")
def test_bad_long(self):
self.assertTrue(self.perform_check("long" * 50, "max-size:500"))
self.assertEqual(self.check.last_font, "sans")
def test_bad_multiline(self):
self.assertTrue(self.perform_check("long " * 50, "max-size:500"))
self.assertEqual(self.check.last_font, "sans")
def test_good_multiline(self):
self.assertFalse(self.perform_check("long " * 50, "max-size:500:50"))
self.assertEqual(self.check.last_font, "sans")
def add_font_group(self):
font = self.add_font()
return FontGroup.objects.create(name="droid", font=font, project=self.project)
def test_custom_font(self):
self.add_font_group()
self.assertFalse(self.perform_check("short", "max-size:500,font-family:droid"))
self.assertEqual(self.check.last_font, "Droid Sans Fallback Regular")
def test_custom_font_override(self):
group = self.add_font_group()
FontOverride.objects.create(
group=group, language=self.get_translation().language, font=group.font
)
self.assertFalse(self.perform_check("short", "max-size:500,font-family:droid"))
self.assertEqual(self.check.last_font, "Droid Sans Fallback Regular")
| nijel/weblate | weblate/checks/tests/test_render_checks.py | Python | gpl-3.0 | 2,761 |
# Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Additional tests for the `help` email command."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
]
import unittest
from mailman.app.lifecycle import create_list
from mailman.commands.eml_help import Help
from mailman.email.message import Message
from mailman.interfaces.command import ContinueProcessing
from mailman.runners.command import Results
from mailman.testing.layers import ConfigLayer
class TestHelp(unittest.TestCase):
"""Test email help."""
layer = ConfigLayer
def setUp(self):
self._mlist = create_list('[email protected]')
self._help = Help()
def test_too_many_arguments(self):
# Error message when too many help arguments are given.
results = Results()
status = self._help.process(self._mlist, Message(), {},
('more', 'than', 'one'),
results)
self.assertEqual(status, ContinueProcessing.no)
self.assertEqual(unicode(results), """\
The results of your email command are provided below.
help: too many arguments: more than one
""")
def test_no_such_command(self):
# Error message when asking for help on an existent command.
results = Results()
status = self._help.process(self._mlist, Message(), {},
('doesnotexist',), results)
self.assertEqual(status, ContinueProcessing.no)
self.assertEqual(unicode(results), """\
The results of your email command are provided below.
help: no such command: doesnotexist
""")
| trevor/mailman3 | src/mailman/commands/tests/test_help.py | Python | gpl-3.0 | 2,367 |
#!/usr/bin/env python3
'''
TYPE 1 - BEST. Has all of the following characteristics:
1. GM-ES prediction identical to a CEGMA prediction.
2. Identical to an AAT alignment of at least one C. muris or C. parvum homologue.
3. Intron-exon boundaries of model supported by assembled transcripts, with start
& stop coordinates contained within the first and last exons of the transcript,
respectively. Non-CDS portion does not have to be identical, i.e. allowing for UTRs.
Example: http://bit.ly/1irtqwZ
Example: http://bit.ly/1emZ39O # not the best example, because while technically true,
the terminal exon of the Cufflinks transcript it ridiculously long.
Type 2 - BEST. Has all of the following characteristics:
1. GM-ES prediction identical to a CEGMA prediction.
2. NOT identical to an AAT alignment of at least one C. muris or C. parvum homologue.
3. Intron-exon boundaries of model supported by assembled transcripts, with start &
stop coordinates contained within the first and last exons of the transcript,
respectively. Non-CDS portion does not have to be identical, i.e. allowing for UTRs.
Example:
http://bit.ly/1g43oCO
TYPE 2 - BETTER. Has all of the following characteristics:
1. No match to CEGMA model. By no match, I mean either no CEGMA model exists OR one
does, but it does not have same coordinates.
2. GM-ES model identical to an AAT alignment of at least one C. muris or C. parvum
homologue.
3. Intron-exon boundaries of model supported by assembled transcripts, with start &
stop coordinates contained within the first and last exons of the transcript,
respectively.
Example:
http://bit.ly/1e8CsTG
TYPE 3 - STILL BETTER. Has all of the following characteristics:
1. CEGMA model (a case where we are using CEGMA model rather than GM-ES model)
2. Does not match GM-ES model (imperfect match or no GMES model)
3. CEGMA model identical to an AAT alignment of at least one C. muris or C. parvum
homologue.
4. Intron-exon boundaries of CEGMA model supported by assembled transcripts, with
start & stop coordinates contained within the first and last exons of the transcript,
respectively.
Example:
http://bit.ly/Q0ZG39
http://bit.ly/1gJ6Pe6
Author: Joshua Orvis
'''
import os
import biocodegff
import biocodeutils
def main():
gm_es_file = 'genemark_hmm.gff3'
cegma_file = 'output.cegma.gff3'
#aat_file = 'bail_training_genes.aat.1500maxintron.80percid.gff3'
aat_file = 'aat.bail_hominis_filtered_training.gff3'
#aat_file = 'aat.merged.gff3'
print("INFO: parsing Genemark-ES data")
(assemblies, gm_es_features) = biocodegff.get_gff3_features( gm_es_file )
gm_es_genes = get_genes_from_dict(gm_es_features)
print("\tINFO: Got {0} Genemark-ES genes".format(len(gm_es_genes)))
print("INFO: parsing CEGMA data")
(assemblies, cegma_features) = biocodegff.get_gff3_features( cegma_file, assemblies=assemblies )
cegma_genes = get_genes_from_dict(cegma_features)
print("\tINFO: Got {0} CEGMA genes".format(len(cegma_genes)))
print("INFO: parsing AAT results")
(assemblies, aat_muris_features) = biocodegff.get_gff3_features( aat_file, assemblies=assemblies)
aat_genes = get_genes_from_dict(aat_muris_features)
print("\tINFO: Got {0} AAT 'genes'".format(len(aat_genes)))
genemark_cegma_shared_genes = list()
gmes_cegma_fh = open('gmes_cegma.shared.ids', 'wt')
for gm_es_gene in gm_es_genes:
for cegma_gene in cegma_genes:
if gm_es_gene.has_same_coordinates_as( thing=cegma_gene ):
if gm_es_gene.shares_exon_structure_with( thing=cegma_gene ) == True:
genemark_cegma_shared_genes.append(gm_es_gene)
gmes_cegma_fh.write("{0}\n".format(gm_es_gene.id))
break
print("\n{0} genes were shared perfectly between Genemark-ES and CEGMA".format(len(genemark_cegma_shared_genes)) )
#############################################################################
genemark_aat_shared_genes = list()
gmes_aat_fh = open('gmes_aat.shared.ids', 'wt')
for gm_es_gene in gm_es_genes:
for aat_gene in aat_genes:
if gm_es_gene.shares_exon_structure_with( thing=aat_gene, stop_tolerant=True ) == True:
genemark_aat_shared_genes.append(gm_es_gene)
gmes_aat_fh.write("{0}\n".format(gm_es_gene.id))
break
print("{0} Genemark-ES genes had an exact AAT match".format(len(genemark_aat_shared_genes)) )
##############################################################################
cegma_matching_gm_es = list()
genemark_aat_cegma_shared_genes = list()
gmes_aat_cegma_fh = open('gmes_aat_cegma.shared.ids', 'wt')
for cegma_gene in cegma_genes:
match_found = False
for gm_es_gene in gm_es_genes:
if cegma_gene.has_same_coordinates_as( thing=gm_es_gene ):
if cegma_gene.shares_exon_structure_with( thing=gm_es_gene ) == True:
match_found = True
if gm_es_gene in genemark_aat_shared_genes and gm_es_gene not in genemark_aat_cegma_shared_genes:
genemark_aat_cegma_shared_genes.append(gm_es_gene)
gmes_aat_cegma_fh.write("{0}\n".format(gm_es_gene.id))
break
if match_found == True:
cegma_matching_gm_es.append(cegma_gene)
print("{0} genes with GeneMark-ES, CEGMA and AAT agreement".format(len(genemark_aat_cegma_shared_genes)) )
training_fh = open('training_gene.ids', 'wt')
for gene in genemark_aat_cegma_shared_genes:
training_fh.write("{0}\n".format(gene.id) )
##############################################################################
cegma_with_aat_not_gm_es = list()
cegma_aat_nogmes_fh = open('cegma_aat_nogmes.shared.ids', 'wt')
for cegma_gene in cegma_genes:
if cegma_gene in cegma_matching_gm_es:
continue
for aat_gene in aat_genes:
if cegma_gene.shares_exon_structure_with( thing=aat_gene, stop_tolerant=True ) == True:
cegma_with_aat_not_gm_es.append(cegma_gene)
cegma_aat_nogmes_fh.write("{0}\n".format(cegma_gene.id))
break
print("{0} CEGMA genes had no GeneMark-ES match but did have an AAT one".format(len(cegma_with_aat_not_gm_es)) )
def get_genes_from_dict( features ):
genes = list()
for feat_id in features:
feat = features[feat_id]
if feat.__class__.__name__ == 'Gene':
genes.append(feat)
return genes
if __name__ == '__main__':
main()
| zctea/biocode | sandbox/jorvis/custom.classify_mucor_tier3_genes.py | Python | gpl-3.0 | 6,864 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, shutil, errno
from calibre.customize.ui import run_plugins_on_import
from calibre.ebooks.metadata.meta import metadata_from_formats
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.utils.filenames import samefile
def serialize_metadata_for(paths, tdir, group_id):
mi = metadata_from_formats(paths)
mi.cover = None
cdata = None
if mi.cover_data:
cdata = mi.cover_data[-1]
mi.cover_data = (None, None)
if not mi.application_id:
mi.application_id = '__calibre_dummy__'
opf = metadata_to_opf(mi, default_lang='und')
has_cover = False
if cdata:
with open(os.path.join(tdir, '%s.cdata' % group_id), 'wb') as f:
f.write(cdata)
has_cover = True
return mi, opf, has_cover
def read_metadata_bulk(get_opf, get_cover, paths):
mi = metadata_from_formats(paths)
mi.cover = None
cdata = None
if mi.cover_data:
cdata = mi.cover_data[-1]
mi.cover_data = (None, None)
if not mi.application_id:
mi.application_id = '__calibre_dummy__'
ans = {'opf': None, 'cdata': None}
if get_opf:
ans['opf'] = metadata_to_opf(mi, default_lang='und')
if get_cover:
ans['cdata'] = cdata
return ans
def run_import_plugins(paths, group_id, tdir):
final_paths = []
for path in paths:
if not os.access(path, os.R_OK):
continue
try:
nfp = run_plugins_on_import(path)
except Exception:
nfp = None
import traceback
traceback.print_exc()
if nfp and os.access(nfp, os.R_OK) and not samefile(nfp, path):
# Ensure that the filename is preserved so that
# reading metadata from filename is not broken
name = os.path.splitext(os.path.basename(path))[0]
ext = os.path.splitext(nfp)[1]
path = os.path.join(tdir, '%s' % group_id, name + ext)
try:
os.mkdir(os.path.dirname(path))
except EnvironmentError as err:
if err.errno != errno.EEXIST:
raise
try:
os.rename(nfp, path)
except EnvironmentError:
shutil.copyfile(nfp, path)
final_paths.append(path)
return final_paths
def has_book(mi, data_for_has_book):
return mi.title and icu_lower(mi.title.strip()) in data_for_has_book
def read_metadata(paths, group_id, tdir, common_data=None):
paths = run_import_plugins(paths, group_id, tdir)
mi, opf, has_cover = serialize_metadata_for(paths, tdir, group_id)
duplicate_info = None
if isinstance(common_data, (set, frozenset)):
duplicate_info = has_book(mi, common_data)
return paths, opf, has_cover, duplicate_info
| jelly/calibre | src/calibre/ebooks/metadata/worker.py | Python | gpl-3.0 | 3,102 |
#-*- coding: utf-8 -*-
#
#copyright 2010 Dominik "Socek" Długajczyk
#
#This file is part of Gadu History.
#
#Gadu History is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#Gadu History is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gadu History; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import curses
from main import Colors, get_stdscr
from lib.gui.text import ROText
from lib.gui.locals import encode_string
class BaseView(object):
"""Base view."""
def __init__(self, title = None, bar = None):
def bar_init():
self._bar_info = bar
if self._bar_info == None:
return
(maxy,maxx) = get_stdscr().getmaxyx()
lines = self._bar_info['lines']
self._bar = curses.newwin( lines, maxx, maxy - lines , 0 )
#-----------------------------
self._title_text = title
self._title = None
self.title = title
self._main = None
bar_init()
def _get_title(self):
"""Title, shown at the top of the screen."""
return self._title
def _set_title(self, title):
(maxy,maxx) = get_stdscr().getmaxyx()
if title == None:
self._title = None
else:
if self._title != None:
self._title.clear()
self._title = curses.newwin( 1, maxx, 0, 0 )
title = encode_string(title.strip())
center = ( maxx / 2 ) - ( len(title) / 2 )
flags = curses.color_pair(Colors.title )
self._title.bkgd( ' ', flags )
self._title.addstr( 0, center, title )
title = property( _get_title, _set_title )
def refresh(self):
"""refresh(self) -> None
Refreshing of windows.
"""
if self._title_text != None:
self.title = self._title_text
self._title.refresh()
if self._main != None:
(maxy,maxx) = get_stdscr().getmaxyx()
#if we have the title, then we need to cut the upper line
if self._title == None:
y = 0
else:
y = 1
#if we have bottom bar, then we need to cut the bottom lines
if self._bar_info:
bottom = maxy - self._bar_info['lines'] - 1
else:
bottom = maxy-1
self._main.refresh( self._up, 0, y, 0, bottom, maxx-1 )
if self._bar_info:
self._bar.clear()
(maxy,maxx) = get_stdscr().getmaxyx()
lines = self._bar_info['lines']
self._bar.mvwin( maxy - lines , 0 )
flags = curses.color_pair(Colors.title )
self._bar.bkgd( ' ', flags )
loop = -1
for line in self._bar_info['text']:
loop += 1
self._bar.addstr( loop, 0, line )
self._bar.refresh()
def clear(self):
"""clear(self) -> None
Clearing of windows.
"""
if self._title != None:
self._title.clear()
self._title.refresh()
if self._main != None:
self._main.clear()
(maxy,maxx) = get_stdscr().getmaxyx()
if self._title == None:
y = 0
else:
y = 1
self._main.refresh( self._up, 0, y, 0, maxy-1, maxx-1 )
def please_wait(self):
return ROText(1, 0, u'Proszę czekać')
| porridge/gaduhistory | lib/gui/base.py | Python | gpl-3.0 | 3,927 |
'''
Small class to handle a single body. The notation follows
loosely the paper Brownian dynamics of confined rigid
bodies, Steven Delong et al. The Journal of Chemical
Physics 143, 144107 (2015). doi: 10.1063/1.4932062
'''
import numpy as np
import copy
from quaternion_integrator.quaternion import Quaternion
import sys
class Body(object):
'''
Small class to handle a single body.
'''
def __init__(self, location, orientation, reference_configuration, blob_radius):
'''
Constructor. Take arguments like ...
'''
# Location as np.array.shape = 3
self.location = np.copy(location)
self.location_new = np.copy(location)
self.location_old = np.copy(location)
# Orientation as Quaternion
self.orientation = copy.copy(orientation)
self.orientation_new = copy.copy(orientation)
self.orientation_old = copy.copy(orientation)
# Number of blobs
self.Nblobs = reference_configuration.shape[0]
# Reference configuration. Coordinates of blobs for quaternion [1, 0, 0, 0]
# and location = np.array[0, 0, 0]) as a np.array.shape = (Nblobs, 3)
# or np.array.shape = (Nblobs * 3)
self.reference_configuration = np.reshape(reference_configuration[:,0:3], (self.Nblobs, 3))
# Blob masses
self.blob_masses = np.ones(self.Nblobs)
# Blob radius
self.blob_radius = blob_radius
if reference_configuration.shape[1] == 4:
self.blobs_radius = reference_configuration[:,3]
else:
self.blobs_radius = np.ones(self.Nblobs) * blob_radius
# Body length
self.body_length = None
# Name of body and type of body. A string or number
self.name = None
self.type = None
self.mobility_blobs = None
self.mobility_body = None
# Geometrix matrix K (see paper Delong et al. 2015).
self.K = None
self.rotation_matrix = None
# Some default functions
self.function_slip = self.default_zero_blobs
self.function_force = self.default_none
self.function_torque = self.default_none
self.function_force_blobs = self.default_zero_blobs
self.prescribed_velocity = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
self.prescribed_kinematics = False
self.mobility_blobs_cholesky = None
self.ID = None
def get_r_vectors(self, location = None, orientation = None):
'''
Return the coordinates of the blobs.
'''
# Get location and orientation
if location is None:
location = self.location
if orientation is None:
orientation = self.orientation
# Compute blobs coordinates
rotation_matrix = orientation.rotation_matrix()
r_vectors = np.dot(self.reference_configuration, rotation_matrix.T)
r_vectors += location
return r_vectors
def calc_rot_matrix(self, location = None, orientation = None):
'''
Calculate the matrix R, where the i-th 3x3 block of R gives
(R_i x) = -1 (r_i cross x).
R has shape (3*Nblobs, 3).
'''
r_vectors = self.get_r_vectors(location, orientation) - (self.location if location is None else location)
rot_matrix = np.zeros((r_vectors.shape[0], 3, 3))
rot_matrix[:,0,1] = r_vectors[:,2]
rot_matrix[:,0,2] = -r_vectors[:,1]
rot_matrix[:,1,0] = -r_vectors[:,2]
rot_matrix[:,1,2] = r_vectors[:,0]
rot_matrix[:,2,0] = r_vectors[:,1]
rot_matrix[:,2,1] = -r_vectors[:,0]
return np.reshape(rot_matrix, (3*self.Nblobs, 3))
def calc_J_matrix(self):
'''
Returns a block matrix with dimensions (Nblobs, 1)
with each block being a 3x3 identity matrix.
'''
J = np.zeros((3*self.Nblobs, 3))
J[0::3,0] = 1.0
J[1::3,1] = 1.0
J[2::3,2] = 1.0
return J
def calc_K_matrix(self, location = None, orientation = None):
'''
Return geometric matrix K = [J, rot] with shape (3*Nblobs, 6)
'''
return np.concatenate([self.calc_J_matrix(), self.calc_rot_matrix(location, orientation)], axis=1)
def check_function(self, location = None, orientation = None, distance = None):
'''
Function to check that the body didn't cross the wall,
i.e., all its blobs have z > distance. Default distance is 0.
'''
# Define distance
if not distance:
distance = 0.0
# Get location and orientation
if location is None:
location = self.location
if orientation is None:
orientation = self.orientation
# Get current configuration
r_vectors = self.get_r_vectors(location, orientation)
# Loop over blobs
for vec in r_vectors:
if vec[2] < distance:
return False
return True
def calc_slip(self):
'''
Return the slip on the blobs.
'''
return self.function_slip(self)
def calc_prescribed_velocity(self):
'''
Return the body prescribed velocity.
'''
return self.prescribed_velocity
def calc_force(self):
'''
Return the force on the body.
'''
return self.function_force()
def calc_torque(self):
'''
Return the torque on the body.
'''
return self.function_torque()
def calc_force_blobs(self):
'''
Return the force on the blobs.
'''
return self.function_force_blobs()
def default_zero_blobs(self, *args, **kwargs):
return np.zeros((self.Nblobs, 3))
def default_none(self, *args, **kwargs):
return None
def calc_mobility_blobs(self, eta, a):
'''
Calculate blobs mobility. Shape (3*Nblobs, 3*Nblobs).
'''
r_vectors = self.get_r_vectors()
return self.mobility_blobs(r_vectors, eta, a)
def calc_mobility_body(self, eta, a, M = None, M_inv = None):
'''
Calculate the 6x6 body mobility that maps
forces and torques to velocities and angular
velocites.
'''
K = self.calc_K_matrix()
if M_inv is not None:
return np.linalg.pinv( np.dot(K.T, np.dot(M_inv, K)) )
if M is None:
M = self.calc_mobility_blobs(eta, a)
return np.linalg.pinv( np.dot(K.T, np.dot(np.linalg.inv(M), K)) )
def calc_mobility_blobs_cholesky(self, eta, a, M = None):
'''
Compute the Cholesky factorization L of the blobs mobility M=L*L.T.
L is a lower triangular matrix with shape (3*Nblobs, 3*Nblobs).
'''
if M is None:
M = self.calc_mobility_blobs(eta, a)
return np.linalg.cholesky(M)
def calc_body_length(self):
'''
It calculates, in one sense, the length of the body. Specifically, it
returns the distance between the two furthest apart blobs in the body.
'''
max_distance = 0.
for i in range(self.reference_configuration.size - 1):
for blob in self.reference_configuration[i+1:]:
blob_distance = np.linalg.norm(blob - self.reference_configuration[i])
if blob_distance > max_distance:
max_distance = blob_distance
self.body_length = max_distance + 2*self.blob_radius
return self.body_length
| stochasticHydroTools/RigidMultiblobsWall | body/body.py | Python | gpl-3.0 | 6,853 |
# -*- encoding: utf-8 -*-
from abjad import *
def test_timespantools_Timespan_stops_at_or_after_offset_01():
timespan = timespantools.Timespan(0, 10)
offset = durationtools.Offset(-5)
assert timespan.stops_at_or_after_offset(offset)
def test_timespantools_Timespan_stops_at_or_after_offset_02():
timespan = timespantools.Timespan(0, 10)
offset = durationtools.Offset(0)
assert timespan.stops_at_or_after_offset(offset)
def test_timespantools_Timespan_stops_at_or_after_offset_03():
timespan = timespantools.Timespan(0, 10)
offset = durationtools.Offset(5)
assert timespan.stops_at_or_after_offset(offset)
def test_timespantools_Timespan_stops_at_or_after_offset_04():
timespan = timespantools.Timespan(0, 10)
offset = durationtools.Offset(10)
assert timespan.stops_at_or_after_offset(offset)
def test_timespantools_Timespan_stops_at_or_after_offset_05():
timespan = timespantools.Timespan(0, 10)
offset = durationtools.Offset(15)
assert not timespan.stops_at_or_after_offset(offset) | mscuthbert/abjad | abjad/tools/timespantools/test/test_timespantools_Timespan_stops_at_or_after_offset.py | Python | gpl-3.0 | 1,048 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Various information about GTG. Should be updated for every release.
Contains the information displayed in GTG's Credits."""
from GTG import _
NAME = "Getting Things GNOME!"
URL = "http://gtgnome.net"
HELP_URI = "help:gtg"
TRANSLATE_URL = "https://translations.launchpad.net/gtg"
REPORT_BUG_URL = "https://bugs.launchpad.net/gtg/+filebug"
EMAIL = "[email protected]"
VERSION = '0.3.1'
SHORT_DESCRIPTION = \
_('GTG is a personal tasks and TODO-list items organizer for the GNOME.')
# CREDITS
AUTHORS = ["Main developers:",
"\tLionel Dricot <[email protected]>",
"\tBertrand Rousseau <[email protected]>",
"\tPaulo Cabido <[email protected]>",
"\tLuca Invernizzi <[email protected]>",
"\tKevin Mehall <[email protected]>",
"\tLuca Falavigna <[email protected]>",
"\tIzidor Matušov <[email protected]>",
"\tNimit Shah <[email protected]>",
"\tParin Porecha <[email protected]>",
"Contributors:",
"\tAbdul Rauf <[email protected]>",
"\tAbhiram <[email protected]>",
"\tAlayn Gortazar <[email protected]>",
"\tAlan Gomes <[email protected]>",
"\tAnant Gupta <[email protected]>",
"\tAndrew Starr-Bochicchio <[email protected]>",
"\tAntonio Roquentin <[email protected]>",
"\tAntons Rebguns <[email protected]>",
"\tBen Dowling <[email protected]>",
"\tBrian Kennedy <[email protected]>",
"\tBryce Harrington <[email protected]>",
"\tCarl Chenet <[email protected]>",
"\tChenxiong Qi <[email protected]>",
"\tChris Johnston <[email protected]>",
"\tCodee <[email protected]>",
"\tDaniel Neel <[email protected]>",
"\tdAnjou <[email protected]>",
"\tDavid Planella <[email protected]>",
"\tDmDr <[email protected]>",
"\tErin McLaughlin <[email protected]>",
"\tFabiano Fidencio <[email protected]>",
"\tFabio Prina <[email protected]>",
"\tFrançois",
"\tGérôme Fournier",
"\thuxuan <[email protected]>",
"\tIvan Evtukhovich <[email protected]>",
"\tJean-François Fortin Tam <[email protected]>",
"\tJeff Oliver <[email protected]>",
"\tJérôme Guelfucci <[email protected]>",
"\tJoão Ascenso <[email protected]>",
"\tJonathan Barnoud <[email protected]>",
"\tJonathan Lange <[email protected]>",
"\tKalle Persson <[email protected]>",
"\tKees Cook <[email protected]>",
"\tkpytang",
"\tMadhumitha Viswanathan <[email protected]>",
"\tMarcos Lenharo",
"\tMarko Kevac <[email protected]>",
"\tMarta Maria Casetti <[email protected]>",
"\tMatthew Rasmus <[email protected]>",
"\tMichael Vogt <[email protected]>",
"\tNicolas Maître <[email protected]>",
"\tPaul Kishimoto <[email protected]>",
"\tRichard Klein <[email protected]>",
"\tSaurabh Anand <[email protected]>",
"\tSebastian Dyroff <[email protected]>",
"\tSong Yangyu <[email protected]>",
"\tStefan Handschuh",
"\tSteve Scheel <[email protected]>",
"\tThibault Fevry <[email protected]>",
"\tTom Kadwill <[email protected]>",
"\tViktor Nagy <[email protected]>",
"\tVolodymyr Floreskul <[email protected]>",
"\tWolter Hellmund <[email protected]>",
"\tZach Shepherd <[email protected]>",
"\tZimin Huang <[email protected]>"]
ARTISTS = ["Kalle Persson <[email protected]>",
"Bertrand Rousseau <[email protected]>"]
ARTISTS.sort()
# Please, keep the width at max 80 characters wide because
# GtkAboutDialog in GTK3 can't wrap text :/
TRANSLATORS = \
"""
Afrikaans:Arthur Rilke, Walter Leibbrandt, Wesley Channon
Albanian: Vilson Gjeci
Arabic: Ahmed Kotb, Ali AlNoaimi, Anas Almzoghy, Basher Aobasher,
Islam Alshaikh, Kaïs Bejaoui
Asturian: Xuacu Saturio, ivarela
Basque: Asier Iturralde Sarasola, Oier Mees
Belarusian: Egor Kuryanovich, Iryna Nikanchuk
Bengali: XFACT, nasir khan saikat
Brazilian: AdirKuhn, André Gondim, Belenos Govannnon, Daniel Tiecher,
Davi da Silva Böger, Djavan Fagundes, Douglas Santos, Enrico Nicoletto,
Frederico Lopes, João Cruz Jr, João Santana, NeLaS, Nilton, Rafael Neri,
Teylo Laundos Aguiar, Waldir Leoncio, andre felix
Bulgarian: Damyan Ivanov, Svetoslav Stefanov, i.raynov
Catalan: Adolfo Jayme Barrientos, Carles Sala, David Planella,
Juanma Hernández, Marc Recasens, Radina Matic, Siegfried Gevatter,
anna marti
Chinese (simplified): Ang Gao, Anthony Fok, Eleanor Chen, Harold.luo,
Heling Yao, Joseph Lew, Kyle WANG, Wylmer Wang, Xuan Hu, chars, 百草谷居士,
somebodykiss
Chinese (traditional): Andrew Liu, Anthony Fok, Po-Jen Hsu, Rex Tsai,
Toomore, [email protected]
Croatian: Saša Teković, gogo
Czech: David Kovář, Dominik Janků, Jakub Kozisek, Jan Hrdina, Konki,
Kuvaly [LCT], Ladislav Prskavec, Mailo, Martin Rotter, Martin Volf,
Marv-CZ, Milos-kroulik-3, Roman Horník, Tadeáš Pařík, Viktor
Danish: Aputsiaq Niels Janussen, Ask Hjorth Larsen, Gamayun, Mogens Isager,
Peter Skov, mkjeldsen
Dutch: Emilien Klein, Foppe Benedictus, Hanssen, Heureka, Lionel Dricot,
Lucas Vieites, Nathan, Pieter J. Kersten (EduSense BV)Rachid,
Reinout van Schouwen, Tino Meinen, klap-in, puccha, rob
English (Canada): Itai Molenaar, Ken Sharp, Luca Falavigna, Paul Kishimoto
English (United Kingdom): Alexandre COLLIGNON, Alfredo Hernández,
Anthony Harrington, Bruce Cowan, Cyril, Jonathon Hodges, Lionel Dricot,
Luca Invernizzi, Michael Keppler, Olly Betts, Philipp Kleinhenz,
Robert Readman, Sid Roberts, ascenseur, lopho, Luca Falavigna
Finnish: Heikki Mattila, Jussi Tiira, Mika Tapojärvi
French: AkaiKen, Alexandre COLLIGNON, Bertrand Rousseau, Bruno Veilleux,
Cyril, David Coeurjolly, Dominick Rivard, Emilien Klein, FrancoisSchoubben,
François, Greizgh, Jibec, Lionel Dricot, Maxime Veroone, Nicolas Delvaux,
Perniflosse, Philippe Le Toquin, Pititjo, Ptitphysik, Quentin Pagès,
Rafik Ouerchefani, Raphaël, Sid Roberts, Simon Leblanc, Stanislas Michalak,
Thibault Févry, Timothee Bernard, Yves Masson, bsaleil, gaetanpralong,
sebz, Alexandre Germain, François Vogelweith, Jonathan Fromentin,
Loic Dreux, Nicolas Gaulard-Querol, Sebastien Moran, Sylvie Gallet,
[email protected]
Galician: Dario, Fran Diéguez, Jorge Álvarez, Miguel Anxo Bouzada, keko
Georgian: Giorgi Jvaridze
German: Abb, Alexandre COLLIGNON, Christoph-M. Lehmann, Daniel Dietrich,
Daniel Winzen, Fabian Affolter, Gerfried Fuchs, Henry78, Izidor Matušov,
Jan, Jens Maucher, Jonas Endersch, Marcel Stimberg, Michael Keppler,
Philip Stewart, Raphael J. Schmid, Scott, Seraphyn, Thomas Pitlik,
Tim Bordemann, gerber, schulz3000, brylli, Michael Odaischi,
Florian Bäuerle, Borim
Greek: Fotis Tsamis, George Chronis, Stathis Iosifidis
Hebrew: Erik, Yaron
Hindi: Abdul Rauf, Nimit Shah
Hungarian: András Kárász, Crcsz Tibcr, Gabor Kelemen, Gergely Szarka,
Krasznecz Zoltán, Muszela Balázs
Icelandic: Baldur, Palmar Thorsteinsson, Samúel Jón Gunnarsson
Indonesian: Andika Triwidada, Wayan Sudiarnata
Interlingua: Emilio Sepulveda
Italian: Antonio Roquentin, Claudio Arseni, Dario Bertini,
Davide Alberelli, Dennis Anfossi, Fabio Marconi, Francesco de Virgilio,
Gianfranco, Luca Falavigna, Luca Invernizzi, Milo Casagrande, Radina Matic
, jollyr0ger
Japanese: 9APPAT3CH, Chris Harrington, Hideki Yamane, Hiroshi Tagawa
Korean: khmirage
Lithuanian: Algimantas Margevičius, Mantas Kriaučiūnas, Rytis Ūsalis,
aurisc4, jaro
Lojabn: David Futcher
Low German: Michael Odaischi, brylli
Malagasy: Thierry Andriamirado
Malay: Ibrahim Elias, abuyop, itiknila, melayubuntu
Norwegian Bokmal: Gamlerik, Runar Ingebrigtsen, Terje Andre Arnøy, Øystein
Occitan (post 1500): Cédric VALMARY (Tot en òc)
Persian: Sepehr Lajevardi
Polish: Jakub Kołakowski, Konrad, Piotr Hałas, Piotr Strębski, Radek Puła,
Tomasz 'Zen' Napierala, Tomasz Maciejewski, btomasz, szczym, tomlee,
Witek M., Xevaquor, Tomasz Woźniak
Portuguese: Emanuel Ângelo, Joel Patrão, Paulo Cabido, raul_pereira
Romanian: Adi Roiban, Jibec, Lucian Adrian Grijincu, Ovidiu
Russian: A. S. Popov, Alexander Semyonov, Alexey Ivanov, Alexey Kostyuk,
Alexey Nedilko, Andrej Surkov, DmDr, Dmitry "Divius" Tantsur,
Eugene Sysmanov, Nergal, Nikita Putko, Oleg Koptev, Peter Romov, Serfer,
Vadim Barsukov, Yevgeny Sysmanov, Yuriy Vidineev, a220, dueMiR, maxzda,
mrk, wiz, Олег Малахов
Serbian: Miloš Mandarić, Игор Миловановић, Мирослав Николић, Саша Петровић
Sinhalese: ජීවන්ත ලේකම්වසම්
Slovak: Izidor Matušov, Milan Slovák, Slavko
Slovenian: Andrej Znidarsic, Igor2x, grofaty
Spanish: Adolfo Jayme Barrientos, Alberto Caso, Carlos Alberto Ospina,
Christopher, Denis Fuenzalida, DiegoJ, Grillo, Javier García Díaz,
Javier Jardón, Javier Suárez, Jorge González, Jose R. Megia, Lionel Dricot,
Marcello Nicolas Manso, Nicolás M. Zahlut, Nukeador, Radina Matic,
Rafael Soler - Domatix, Sergio Redondo, Xuacu Saturio, ixzus, n_alex,
perriman
Swedish: Christian Widell, Daniel Nylander, David Bengtsson,
Erik Piehl Sylvén, Kalle Persson, Maxim Andersson, Nikke, Petri Rosenström,
jens persson
Telugu: Arjuna Rao Chavala, వీవెన్ (Veeven)
Thai: NullZer0, SiraNokyoongtong
Turkish: Aean, Ali KIBICI, Baris Ata, Emre Ayca, Murat Gunes,
Onur ALTINTAŞI, Yiğit Ateş, Volkan Gezer
Ukrainian: Alex Chmyr, Gontsa, Pavel Druzyak, alex, dueMiR, yvadim
Urdu: Abdul Rauf
"""
DOCUMENTERS = ["Radina Matic <[email protected]>"]
| partp/gtg-services | GTG/info.py | Python | gpl-3.0 | 11,510 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of MoaT, the Master of all Things.
##
## MoaT is Copyright © 2007-2016 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
## This header is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘scripts/_boilerplate.py’.
## Thus, do not remove the next line, or insert any blank lines above.
##BP
"""List of known Tasks"""
import os
from moat.script import Command as _Command
class Command(_Command):
name = "dummy"
usage = "[no options]"
summary = "A command that does not do anything"
description = """\
This command does not do anything. It is used for testing.
"""
foo = (
"I told you that this does not do anything.",
"Please listen to me next time.",
"Stop adding verbosity!",
"The error is intentional.",
)
async def do(self,args):
n = 0
if args:
if args[0] == "nope":
raise NotImplementedError("nope")
self.outputUsage()
return 1
while n < self.root.verbose:
print(self.foo[n], file=self.stdout)
n += 1
return self.root.verbose # test exit values
| smurfix/MoaT | moat/cmd/dummy.py | Python | gpl-3.0 | 1,858 |
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import httplib, zlib, json, binascii, time, os
from io import BytesIO
from calibre.ebooks.metadata.epub import get_metadata
from calibre.ebooks.metadata.opf2 import OPF
from calibre.srv.tests.base import LibraryBaseTest
from calibre.utils.imghdr import identify
from calibre.utils.shared_file import share_open
def setUpModule():
# Needed for cover generation
from calibre.gui2 import ensure_app, load_builtin_fonts
ensure_app(), load_builtin_fonts()
class ContentTest(LibraryBaseTest):
def test_static(self): # {{{
'Test serving of static content'
with self.create_server() as server:
conn = server.connect()
def missing(url, body=b''):
conn.request('GET', url)
r = conn.getresponse()
self.ae(r.status, httplib.NOT_FOUND)
self.ae(r.read(), body)
for prefix in ('static', 'icon'):
missing('/%s/missing.xxx' % prefix)
missing('/%s/../out.html' % prefix, b'Naughty, naughty!')
missing('/%s/C:/out.html' % prefix, b'Naughty, naughty!')
def test_response(r):
self.assertIn(b'max-age=', r.getheader('Cache-Control'))
self.assertIn(b'public', r.getheader('Cache-Control'))
self.assertIsNotNone(r.getheader('Expires'))
self.assertIsNotNone(r.getheader('ETag'))
self.assertIsNotNone(r.getheader('Content-Type'))
def test(src, url, sz=None):
raw = P(src, data=True)
conn.request('GET', url)
r = conn.getresponse()
self.ae(r.status, httplib.OK)
data = r.read()
if sz is None:
self.ae(data, raw)
else:
self.ae(sz, identify(data)[1])
test_response(r)
conn.request('GET', url, headers={'If-None-Match':r.getheader('ETag')})
r = conn.getresponse()
self.ae(r.status, httplib.NOT_MODIFIED)
self.ae(b'', r.read())
test('content-server/empty.html', '/static/empty.html')
test('images/lt.png', '/favicon.png')
test('images/lt.png', '/icon/lt.png?sz=full')
test('images/lt.png', '/icon/lt.png', sz=48)
test('images/lt.png', '/icon/lt.png?sz=16', sz=16)
# }}}
def test_get(self): # {{{
'Test /get'
with self.create_server() as server:
db = server.handler.router.ctx.library_broker.get(None)
conn = server.connect()
def get(what, book_id, library_id=None, q=''):
q = ('?' + q) if q else q
conn.request('GET', '/get/%s/%s' % (what, book_id) + (('/' + library_id) if library_id else '') + q)
r = conn.getresponse()
return r, r.read()
# Test various invalid parameters
def bad(*args):
r, data = get(*args)
self.ae(r.status, httplib.NOT_FOUND)
bad('xxx', 1)
bad('fmt1', 10)
bad('fmt1', 1, 'zzzz')
bad('fmt1', 'xx')
# Test simple fetching of format without metadata update
r, data = get('fmt1', 1, db.server_library_id)
self.ae(data, db.format(1, 'fmt1'))
self.assertIsNotNone(r.getheader('Content-Disposition'))
self.ae(r.getheader('Used-Cache'), 'no')
r, data = get('fmt1', 1)
self.ae(data, db.format(1, 'fmt1'))
self.ae(r.getheader('Used-Cache'), 'yes')
# Test fetching of format with metadata update
raw = P('quick_start/eng.epub', data=True)
r, data = get('epub', 1)
self.ae(r.status, httplib.OK)
etag = r.getheader('ETag')
self.assertIsNotNone(etag)
self.ae(r.getheader('Used-Cache'), 'no')
self.assertTrue(data.startswith(b'PK'))
self.assertGreaterEqual(len(data), len(raw))
db.set_field('title', {1:'changed'})
r, data = get('epub', 1)
self.assertNotEqual(r.getheader('ETag'), etag)
etag = r.getheader('ETag')
self.ae(r.getheader('Used-Cache'), 'no')
mi = get_metadata(BytesIO(data), extract_cover=False)
self.ae(mi.title, 'changed')
r, data = get('epub', 1)
self.ae(r.getheader('Used-Cache'), 'yes')
# Test plugboards
import calibre.library.save_to_disk as c
orig, c.DEBUG = c.DEBUG, False
try:
db.set_pref('plugboards', {u'epub': {u'content_server': [[u'changed, {title}', u'title']]}})
# this is needed as the cache is not invalidated for plugboard changes
db.set_field('title', {1:'again'})
r, data = get('epub', 1)
self.assertNotEqual(r.getheader('ETag'), etag)
etag = r.getheader('ETag')
self.ae(r.getheader('Used-Cache'), 'no')
mi = get_metadata(BytesIO(data), extract_cover=False)
self.ae(mi.title, 'changed, again')
finally:
c.DEBUG = orig
# Test the serving of covers
def change_cover(count, book_id=2):
cpath = db.format_abspath(book_id, '__COVER_INTERNAL__')
db.set_cover({2:I('lt.png', data=True)})
t = time.time() + 1 + count
# Ensure mtime changes, needed on OS X where HFS+ has a 1s
# mtime resolution
os.utime(cpath, (t, t))
r, data = get('cover', 1)
self.ae(r.status, httplib.OK)
self.ae(data, db.cover(1))
self.ae(r.getheader('Used-Cache'), 'no')
self.ae(r.getheader('Content-Type'), 'image/jpeg')
r, data = get('cover', 1)
self.ae(r.status, httplib.OK)
self.ae(data, db.cover(1))
self.ae(r.getheader('Used-Cache'), 'yes')
r, data = get('cover', 3)
self.ae(r.status, httplib.OK) # Auto generated cover
r, data = get('thumb', 1)
self.ae(r.status, httplib.OK)
self.ae(identify(data), ('jpeg', 60, 60))
self.ae(r.getheader('Used-Cache'), 'no')
r, data = get('thumb', 1)
self.ae(r.status, httplib.OK)
self.ae(r.getheader('Used-Cache'), 'yes')
r, data = get('thumb', 1, q='sz=100')
self.ae(r.status, httplib.OK)
self.ae(identify(data), ('jpeg', 100, 100))
self.ae(r.getheader('Used-Cache'), 'no')
r, data = get('thumb', 1, q='sz=100x100')
self.ae(r.status, httplib.OK)
self.ae(r.getheader('Used-Cache'), 'yes')
change_cover(1, 1)
r, data = get('thumb', 1, q='sz=100')
self.ae(r.status, httplib.OK)
self.ae(identify(data), ('jpeg', 100, 100))
self.ae(r.getheader('Used-Cache'), 'no')
# Test file sharing in cache
r, data = get('cover', 2)
self.ae(r.status, httplib.OK)
self.ae(data, db.cover(2))
self.ae(r.getheader('Used-Cache'), 'no')
path = binascii.unhexlify(r.getheader('Tempfile')).decode('utf-8')
f, fdata = share_open(path, 'rb'), data
# Now force an update
change_cover(1)
r, data = get('cover', 2)
self.ae(r.status, httplib.OK)
self.ae(data, db.cover(2))
self.ae(r.getheader('Used-Cache'), 'no')
path = binascii.unhexlify(r.getheader('Tempfile')).decode('utf-8')
f2, f2data = share_open(path, 'rb'), data
# Do it again
change_cover(2)
r, data = get('cover', 2)
self.ae(r.status, httplib.OK)
self.ae(data, db.cover(2))
self.ae(r.getheader('Used-Cache'), 'no')
self.ae(f.read(), fdata)
self.ae(f2.read(), f2data)
# Test serving of metadata as opf
r, data = get('opf', 1)
self.ae(r.status, httplib.OK)
self.ae(r.getheader('Content-Type'), 'application/oebps-package+xml; charset=UTF-8')
self.assertIsNotNone(r.getheader('Last-Modified'))
opf = OPF(BytesIO(data), populate_spine=False, try_to_guess_cover=False)
self.ae(db.field_for('title', 1), opf.title)
self.ae(db.field_for('authors', 1), tuple(opf.authors))
conn.request('GET', '/get/opf/1', headers={'Accept-Encoding':'gzip'})
r = conn.getresponse()
self.ae(r.status, httplib.OK), self.ae(r.getheader('Content-Encoding'), 'gzip')
raw = r.read()
self.ae(zlib.decompress(raw, 16+zlib.MAX_WBITS), data)
# Test serving metadata as json
r, data = get('json', 1)
self.ae(r.status, httplib.OK)
self.ae(db.field_for('title', 1), json.loads(data)['title'])
conn.request('GET', '/get/json/1', headers={'Accept-Encoding':'gzip'})
r = conn.getresponse()
self.ae(r.status, httplib.OK), self.ae(r.getheader('Content-Encoding'), 'gzip')
raw = r.read()
self.ae(zlib.decompress(raw, 16+zlib.MAX_WBITS), data)
# }}}
| jelly/calibre | src/calibre/srv/tests/content.py | Python | gpl-3.0 | 9,684 |
"""
WSGI config for osmcha-django project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| batpad/osmcha-django | config/wsgi.py | Python | gpl-3.0 | 1,452 |
"""
Copyright 2014-2021 Vincent Texier <[email protected]>
DuniterPy is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DuniterPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import asyncio
from duniterpy.api import bma
from duniterpy.api.client import Client
# CONFIG #######################################
# You can either use a complete defined endpoint : [NAME_OF_THE_API] [DOMAIN] [IPv4] [IPv6] [PORT] [PATH]
# or the simple definition : [NAME_OF_THE_API] [DOMAIN] [PORT] [PATH]
# Here we use the secure BASIC_MERKLED_API (BMAS)
BMAS_ENDPOINT = "BMAS g1-test.duniter.org 443"
################################################
async def print_response(request):
print(await request)
async def main():
"""
Main code (asynchronous requests)
You can send one millions request with aiohttp :
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
But don't do that on one server, it's DDOS !
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
tasks = []
# Get the node summary infos by dedicated method (with json schema validation)
print("\nCall bma.node.summary:")
task = asyncio.ensure_future(client(bma.node.summary))
tasks.append(task)
# Get the money parameters located in the first block
print("\nCall bma.blockchain.parameters:")
task = asyncio.ensure_future(client(bma.blockchain.parameters))
tasks.append(task)
responses = await asyncio.gather(*tasks)
# you now have all response bodies in this variable
print("\nResponses:")
print(responses)
# Close client aiohttp session
await client.close()
# Latest duniter-python-api is asynchronous and you have to use asyncio, an asyncio loop and a "as" on the data.
# ( https://docs.python.org/3/library/asyncio.html )
asyncio.get_event_loop().run_until_complete(main())
| ucoin-io/ucoin-python-api | examples/request_data_async.py | Python | gpl-3.0 | 2,394 |
# -*- coding: utf-8 -*-
class Charset:
common_name = u'Danish Accents'
native_name = u'Danske Accenter'
key = ord(u'å')
abbreviation = 'DANA'
danishAccents = u"æøåÆØÅ"
glyphs = map(ord, danishAccents)
| davelab6/pyfontaine | fontaine/charsets/internals/danish-accents.py | Python | gpl-3.0 | 232 |
class Tank:
def __init__(self, leftMotor, rightMotor):
self._leftMotor = leftMotor
self._rightMotor = rightMotor
def off(self, brake):
self._leftMotor.off(brake = brake)
self._rightMotor.off(brake = brake)
def on(self, leftPower, rightPower):
self._leftMotor.on(power = leftPower)
self._rightMotor.on(power = rightPower)
def onForSeconds(self, leftPower, rightPower, seconds, brake, wait):
self._leftMotor.onForSeconds(power = leftPower, seconds = seconds, brake = brake, wait = False)
self._rightMotor.onForSeconds(power = rightPower, seconds = seconds, brake = brake, wait = wait)
def onForDegrees(self, leftPower, rightPower, degrees, brake, wait):
self._leftMotor.onForDegrees(power = leftPower, degrees = degrees, brake = brake, wait = False)
self._rightMotor.onForDegrees(power = rightPower, degrees = degrees, brake = brake, wait = wait)
def onForRotations(self, leftPower, rightPower, rotations, brake, wait):
self.onForDegrees(leftPower = leftPower, rightPower = rightPower, degrees = rotations * 360, brake = brake, wait = wait)
| LE-GO-LE-STOP/Robocup-Junior-Rescue-2016 | src/python/ev3/controllers.py | Python | gpl-3.0 | 1,072 |
# -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico. If not, see <http://www.gnu.org/licenses/>.
import indico.ext.livesync.chrome as handlers
from indico.web.flask.wrappers import IndicoBlueprint
blueprint = IndicoBlueprint('livesync', __name__, url_prefix='/livesync')
blueprint.add_url_rule('/manage', 'manage', handlers.RHAdminLiveSyncManagement)
blueprint.add_url_rule('/status', 'status', handlers.RHAdminLiveSyncStatus)
blueprint.add_url_rule('/<path:filepath>', 'htdocs', handlers.RHLiveSyncHtdocs)
| Ictp/indico | indico/ext/livesync/blueprint.py | Python | gpl-3.0 | 1,186 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Interface to the TreeTagger POS-tagger
#
# Copyright (C) Mirko Otto
# Author: Mirko Otto <[email protected]>
"""
A Python module for interfacing with the Treetagger by Helmut Schmid.
"""
import os
from subprocess import Popen, PIPE
from nltk.internals import find_binary, find_file
from nltk.tag.api import TaggerI
def tUoB(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
_treetagger_url = 'http://www.cis.uni-muenchen.de/~schmid/tools/TreeTagger/'
_treetagger_languages = {
u'latin-1':['latin', 'latinIT', 'mongolian', 'swahili'],
u'utf-8' : ['bulgarian', 'dutch', 'english', 'estonian', 'finnish', 'french', 'galician', 'german', 'italian', 'polish', 'russian', 'slovak', 'slovak2', 'spanish']}
"""The default encoding used by TreeTagger: utf-8. u'' means latin-1; ISO-8859-1"""
_treetagger_charset = [u'utf-8', u'latin-1']
class TreeTagger(TaggerI):
ur"""
A class for pos tagging with TreeTagger. The input is the paths to:
- a language trained on training data
- (optionally) the path to the TreeTagger binary
- (optionally) the encoding of the training data (default: utf-8)
This class communicates with the TreeTagger binary via pipes.
Example:
.. doctest::
:options: +SKIP
>>> from treetagger import TreeTagger
>>> tt = TreeTagger(encoding='utf-8',language='english')
>>> tt.tag(u'What is the airspeed of an unladen swallow ?')
[[u'What', u'WP', u'What'],
[u'is', u'VBZ', u'be'],
[u'the', u'DT', u'the'],
[u'airspeed', u'NN', u'airspeed'],
[u'of', u'IN', u'of'],
[u'an', u'DT', u'an'],
[u'unladen', u'JJ', u'<unknown>'],
[u'swallow', u'NN', u'swallow'],
[u'?', u'SENT', u'?']]
.. doctest::
:options: +SKIP
>>> from treetagger import TreeTagger
>>> tt = TreeTagger()
>>> tt.tag(u'Das Haus ist sehr schön und groß. Es hat auch einen hübschen Garten.')
[[u'Das', u'ART', u'd'],
[u'Haus', u'NN', u'Haus'],
[u'ist', u'VAFIN', u'sein'],
[u'sehr', u'ADV', u'sehr'],
[u'sch\xf6n', u'ADJD', u'sch\xf6n'],
[u'und', u'KON', u'und'],
[u'gro\xdf', u'ADJD', u'gro\xdf'],
[u'.', u'$.', u'.'],
[u'Es', u'PPER', u'es'],
[u'hat', u'VAFIN', u'haben'],
[u'auch', u'ADV', u'auch'],
[u'einen', u'ART', u'ein'],
[u'h\xfcbschen', u'ADJA', u'h\xfcbsch'],
[u'Garten', u'NN', u'Garten'],
[u'.', u'$.', u'.']]
"""
def __init__(self, path_to_home=None, language='german',
encoding='utf-8', verbose=False, abbreviation_list=None):
"""
Initialize the TreeTagger.
:param path_to_home: The TreeTagger binary.
:param language: Default language is german.
:param encoding: The encoding used by the model. Unicode tokens
passed to the tag() and batch_tag() methods are converted to
this charset when they are sent to TreeTagger.
The default is utf-8.
This parameter is ignored for str tokens, which are sent as-is.
The caller must ensure that tokens are encoded in the right charset.
"""
treetagger_paths = ['.', '/usr/bin', '/usr/local/bin', '/opt/local/bin',
'/Applications/bin', '~/bin', '~/Applications/bin',
'~/work/TreeTagger/cmd', '~/tree-tagger/cmd']
treetagger_paths = map(os.path.expanduser, treetagger_paths)
self._abbr_list = abbreviation_list
try:
if encoding in _treetagger_languages.keys() and language in _treetagger_languages[encoding]:
if encoding == u'latin-1':
self._encoding = u'latin-1'
else:
self._encoding = encoding
treetagger_bin_name = 'tree-tagger-' + language
else:
raise LookupError('NLTK was unable to find the TreeTagger bin!')
except KeyError as e:
raise LookupError('NLTK was unable to find the TreeTagger bin!')
self._treetagger_bin = find_binary(
treetagger_bin_name, path_to_home,
env_vars=('TREETAGGER', 'TREETAGGER_HOME'),
searchpath=treetagger_paths,
url=_treetagger_url,
verbose=verbose)
def tag(self, sentences):
"""Tags a single sentence: a list of words.
The tokens should not contain any newline characters.
"""
encoding = self._encoding
# Write the actual sentences to the temporary input file
if isinstance(sentences, list):
_input = '\n'.join((x for x in sentences))
else:
_input = sentences
if isinstance(_input, unicode) and encoding:
_input = _input.encode(encoding)
# Run the tagger and get the output
if(self._abbr_list is None):
p = Popen([self._treetagger_bin],
shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE)
elif(self._abbr_list is not None):
p = Popen([self._treetagger_bin,"-a",self._abbr_list],
shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(_input)
# Check the return code.
if p.returncode != 0:
print stderr
raise OSError('TreeTagger command failed!')
if isinstance(stdout, unicode) and encoding:
treetagger_output = stdout.decode(encoding)
else:
treetagger_output = tUoB(stdout)
# Output the tagged sentences
tagged_sentences = []
for tagged_word in treetagger_output.strip().split('\n'):
tagged_word_split = tagged_word.split('\t')
tagged_sentences.append(tagged_word_split)
return tagged_sentences
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| metzzo/Paraphrase_Identification | treetagger.py | Python | gpl-3.0 | 6,201 |
import os
import time
import sys
import subprocess
import urllib.request
import pyfastaq
class Error (Exception): pass
def syscall(cmd, allow_fail=False, verbose=False, verbose_filehandle=sys.stdout, print_errors=True, shell=True):
if verbose:
print('syscall:', cmd, flush=True, file=verbose_filehandle)
if not shell:
print('syscall string:', " ".join('"{}"'.format(_) for _ in cmd), flush=True, file=verbose_filehandle)
try:
subprocess.check_output(cmd, shell=shell, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
errors = error.output.decode()
if print_errors:
print('The following command failed with exit code', error.returncode, file=sys.stderr)
print(cmd, file=sys.stderr)
print('\nThe output was:\n', file=sys.stderr)
print(errors, file=sys.stderr, flush=True)
if allow_fail:
return False, errors
else:
sys.exit(1)
except Exception as msg:
print("Unexpected exception: ", msg, file=sys.stderr)
raise
return True, None
def decode(x):
try:
s = x.decode()
except:
return x
return s
def cat_files(infiles, outfile):
'''Cats all files in list infiles into outfile'''
f_out = pyfastaq.utils.open_file_write(outfile)
for filename in infiles:
if os.path.exists(filename):
f_in = pyfastaq.utils.open_file_read(filename)
for line in f_in:
print(line, end='', file=f_out)
pyfastaq.utils.close(f_in)
pyfastaq.utils.close(f_out)
def download_file(url, outfile, max_attempts=3, sleep_time=2, verbose=False):
if verbose:
print('Downloading "', url, '" and saving as "', outfile, '" ...', end='', sep='', flush=True)
for i in range(max_attempts):
time.sleep(sleep_time)
try:
urllib.request.urlretrieve(url, filename=outfile)
except:
continue
break
else:
raise Error('Error downloading: ' + url)
if verbose:
print(' done', flush=True)
def rmtree(input_dir):
'''Does rm -r on input_dir. Meant to replace shutil.rmtree,
which seems to be causing issues with files not getting deleted
and the directory non-empty afterwards'''
syscall('rm -rf ' + input_dir)
| sanger-pathogens/ariba | ariba/common.py | Python | gpl-3.0 | 2,380 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-11-02 22:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_remove_vendeur_code_carte_etudiante'),
]
operations = [
migrations.AddField(
model_name='vendeur',
name='code_carte_etudiante',
field=models.IntegerField(blank=True, help_text=b'Scannez la carte \xc3\xa9tudiante', null=True, verbose_name=b'Code carte \xc3\xa9tudiante'),
),
]
| Scriptodude/EnceFAL | django/main/migrations/0009_vendeur_code_carte_etudiante.py | Python | gpl-3.0 | 581 |
#!/usr/bin/env python
# flake8: noqa
import os
import json
import yaml
from nose.tools import *
from linchpin.InventoryFilters import BeakerInventory
def setup_beaker_inventory_filter():
global filter
global topo
filter = BeakerInventory.BeakerInventory()
provider = 'general'
base_path = '{0}'.format(os.path.dirname(
os.path.realpath(__file__))).rstrip('/')
lib_path = os.path.realpath(os.path.join(base_path, os.pardir))
mock_path = '{0}/{1}/{2}'.format(lib_path, 'mockdata', provider)
topology = 'topo.json'
topo_file = open(mock_path+'/'+topology)
topo = json.load(topo_file)
topo_file.close()
def setup_beaker_layout():
global layout
provider = 'layouts'
base_path = '{0}'.format(os.path.dirname(
os.path.realpath(__file__))).rstrip('/')
lib_path = os.path.realpath(os.path.join(base_path, os.pardir))
mock_path = '{0}/{1}/{2}'.format(lib_path, 'mockdata', provider)
template = 'parsed-layout.json'
template_file = open(mock_path+'/'+template)
layout = json.load(template_file)
@with_setup(setup_beaker_inventory_filter)
def test_get_hostnames():
blank_topo = []
hostnames = filter.get_hostnames(blank_topo)
# hostname should be an empty list, which evaluates to false
assert_false(hostnames)
# now set topology equal to some data and make sure the correct data is
# present in hostname
hostnames = filter.get_hostnames(topo)
expected_hosts = ["25.23.79.188", "207.49.135.104"]
assert_equal(set(hostnames), set(expected_hosts))
@with_setup(setup_beaker_inventory_filter)
def test_get_host_ips():
"""
"""
ips = filter.get_host_ips(topo)
expected_hosts = ["25.23.79.188", "207.49.135.104"]
assert_equal(set(ips), set(expected_hosts))
@with_setup(setup_beaker_inventory_filter)
def test_add_hosts_to_groups():
"""
this method currently has no body in class BeakerInventory
"""
pass
@with_setup(setup_beaker_inventory_filter)
@with_setup(setup_beaker_layout)
def test_get_inventory():
"""
"""
empty_topo = dict()
empty_topo['beaker_res'] = []
inventory = filter.get_inventory(empty_topo, layout)
assert_false(inventory)
inventory = filter.get_inventory(topo, layout)
assert_true(inventory) | herlo/linchpin | linchpin/tests/InventoryFilters/test_BeakerInventory_pass.py | Python | gpl-3.0 | 2,306 |
import mock
import os
import shutil
import sys
from stetl.etl import ETL
from stetl.outputs.execoutput import Ogr2OgrExecOutput
from tests.stetl_test_case import StetlTestCase
class Ogr2OgrExecOutputTest(StetlTestCase):
"""Unit tests for Ogr2OgrExecOutput"""
def setUp(self):
super(Ogr2OgrExecOutputTest, self).setUp()
# Initialize Stetl
curr_dir = os.path.dirname(os.path.realpath(__file__))
cfg_dict = {'config_file': os.path.join(curr_dir, 'configs/ogr2ogrexecoutput.cfg')}
self.etl = ETL(cfg_dict)
def test_class(self):
chain = StetlTestCase.get_chain(self.etl)
section = StetlTestCase.get_section(chain, -1)
class_name = self.etl.configdict.get(section, 'class')
self.assertEqual('stetl.outputs.execoutput.Ogr2OgrExecOutput', class_name)
def test_instance(self):
chain = StetlTestCase.get_chain(self.etl)
self.assertTrue(isinstance(chain.cur_comp, Ogr2OgrExecOutput))
def parse_command(self, command):
"""
Parses the command line. With this regex the string is split at whitespace, except when
whitespace is occurring between quotes. Because split keeps the capturing groups this way,
every second item is removed from the list.
"""
import re
list = re.split(r"\s+(?=([^\"]*\"[^\"]*\")*[^\"]*$)", command)
return list[0::2]
@mock.patch('subprocess.call', autospec=True)
def test_execute(self, mock_call):
chain = StetlTestCase.get_chain(self.etl)
chain.run()
self.assertTrue(mock_call.called)
self.assertEqual(1, mock_call.call_count)
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 5)
self.assertEqual(list[0], 'ogr2ogr')
# Compare command line arguments with config
section = StetlTestCase.get_section(chain, -1)
file_path = self.etl.configdict.get(StetlTestCase.get_section(chain), 'file_path')
# Certain options should not occur
self.assertFalse('-spat' in list)
self.assertFalse('-lco' in list)
# Destination format
self.assertTrue('-f' in list)
f_idx = list.index('-f')
dest_format = self.etl.configdict.get(section, 'dest_format')
self.assertEqual(list[f_idx + 1], dest_format)
# Destination datasource
dest_data_source = self.etl.configdict.get(section, 'dest_data_source')
self.assertEqual(list[f_idx + 2], dest_data_source)
# Source datasource
self.assertEqual(list[-1], file_path)
@mock.patch('subprocess.call', autospec=True)
def test_execute_lco(self, mock_call):
chain = StetlTestCase.get_chain(self.etl, 1)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 9)
# Check layer creation options
self.assertTrue('-lco' in list)
lco_indices = [i for i, x in enumerate(list) if x == '-lco']
self.assertEqual(len(lco_indices), 2)
self.assertEqual(list[lco_indices[0] + 1], 'LAUNDER=YES')
self.assertEqual(list[lco_indices[1] + 1], 'PRECISION=NO')
@mock.patch('subprocess.call', autospec=True)
def test_execute_extent(self, mock_call):
chain = StetlTestCase.get_chain(self.etl, 2)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 10)
# Check spatial extent
section = StetlTestCase.get_section(chain, -1)
self.assertTrue('-spat' in list)
spat_idx = list.index('-spat')
spatial_extent = self.etl.configdict.get(section, 'spatial_extent')
self.assertEqual(spatial_extent.split(), list[spat_idx + 1:spat_idx + 5])
@mock.patch('subprocess.call', autospec=True)
def test_execute_options(self, mock_call):
chain = StetlTestCase.get_chain(self.etl, 3)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 13)
# Check spatial extent
self.assertTrue('-append' in list)
self.assertTrue('-gt' in list)
self.assertTrue('--config' in list)
@mock.patch('subprocess.call', autospec=True)
def test_execute_gfs(self, mock_call):
chain = StetlTestCase.get_chain(self.etl, 4)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 5)
# Check if GFS file exists, and clean it up
file_path = self.etl.configdict.get(StetlTestCase.get_section(chain), 'file_path')
file_ext = os.path.splitext(file_path)
gfs_path = file_ext[0] + '.gfs'
self.assertTrue(os.path.exists(gfs_path))
os.remove(gfs_path)
self.assertFalse(os.path.exists(gfs_path))
@mock.patch('subprocess.call', autospec=True)
def test_execute_cleanup(self, mock_call):
# Copy test file to temporary location, because it will be cleaned up
orig_file_path = self.etl.configdict.get(StetlTestCase.get_section(StetlTestCase.get_chain(self.etl)), 'file_path')
orig_file_ext = os.path.splitext(orig_file_path)
temp_file_path = orig_file_ext[0] + "_temp" + orig_file_ext[1]
shutil.copy(orig_file_path, temp_file_path)
chain = StetlTestCase.get_chain(self.etl, 5)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 5)
# Check if temp file has been removed
self.assertFalse(os.path.exists(temp_file_path))
@mock.patch('subprocess.call', autospec=True)
def test_execute_cleanup_gfs(self, mock_call):
# Copy test file to temporary location, because it will be cleaned up
orig_file_path = self.etl.configdict.get(StetlTestCase.get_section(StetlTestCase.get_chain(self.etl)), 'file_path')
orig_file_ext = os.path.splitext(orig_file_path)
temp_file_path = orig_file_ext[0] + "_temp" + orig_file_ext[1]
shutil.copy(orig_file_path, temp_file_path)
chain = StetlTestCase.get_chain(self.etl, 6)
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 5)
# Check if temp file has been removed
self.assertFalse(os.path.exists(temp_file_path))
# Check if GFS file has already been removed
gfs_path = orig_file_ext[0] + "_temp.gfs"
self.assertFalse(os.path.exists(gfs_path))
@mock.patch('subprocess.call', autospec=True)
def test_execute_no_cleanup(self, mock_call):
chain = StetlTestCase.get_chain(self.etl, 7)
file_path = self.etl.configdict.get(StetlTestCase.get_section(chain), 'file_path')
chain.run()
# Check command line
args, kwargs = mock_call.call_args
list = self.parse_command(args[0])
self.assertEqual(len(list), 5)
# Check if input file still exists
self.assertTrue(os.path.exists(file_path))
| fsteggink/stetl | tests/outputs/test_ogr2ogr_exec_output.py | Python | gpl-3.0 | 7,744 |
"""Tests for qutebrowser.keyinput."""
| mnick/qutebrowser | qutebrowser/test/keyinput/__init__.py | Python | gpl-3.0 | 38 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BlockedEmail'
db.create_table(u'news_blockedemail', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('email_domain', self.gf('django.db.models.fields.CharField')(max_length=50)),
))
db.send_create_signal(u'news', ['BlockedEmail'])
def backwards(self, orm):
# Deleting model 'BlockedEmail'
db.delete_table(u'news_blockedemail')
models = {
u'news.apiuser': {
'Meta': {'object_name': 'APIUser'},
'api_key': ('django.db.models.fields.CharField', [], {'default': "'d637403e-2efb-4300-9f26-e352d82dc106'", 'max_length': '40', 'db_index': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'news.blockedemail': {
'Meta': {'object_name': 'BlockedEmail'},
'email_domain': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'news.failedtask': {
'Meta': {'object_name': 'FailedTask'},
'args': ('jsonfield.fields.JSONField', [], {'default': '[]'}),
'einfo': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'exc': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kwargs': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
u'news.interest': {
'Meta': {'object_name': 'Interest'},
'_welcome_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'default_steward_emails': ('news.fields.CommaSeparatedEmailField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interest_id': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'news.localestewards': {
'Meta': {'unique_together': "(('interest', 'locale'),)", 'object_name': 'LocaleStewards'},
'emails': ('news.fields.CommaSeparatedEmailField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interest': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['news.Interest']"}),
'locale': ('news.fields.LocaleField', [], {'max_length': '32'})
},
u'news.newsletter': {
'Meta': {'ordering': "['order']", 'object_name': 'Newsletter'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'confirm_message': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'requires_double_optin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'vendor_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'welcome': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
u'news.newslettergroup': {
'Meta': {'object_name': 'NewsletterGroup'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newsletters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'newsletter_groups'", 'symmetrical': 'False', 'to': u"orm['news.Newsletter']"}),
'show': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'news.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'primary_key': 'True'}),
'fxa_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'f7e6b06b-17cd-46f1-bb17-18c7fe489792'", 'max_length': '40', 'db_index': 'True'})
}
}
complete_apps = ['news'] | meandavejustice/basket | news/migrations/0013_auto__add_blockedemail.py | Python | mpl-2.0 | 6,001 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-27 02:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170921_2137'),
]
operations = [
migrations.AlterUniqueTogether(
name='datalabel',
unique_together=set([('label', 'data_object')]),
),
migrations.AlterUniqueTogether(
name='runlabel',
unique_together=set([('label', 'run')]),
),
migrations.AlterUniqueTogether(
name='templatelabel',
unique_together=set([('label', 'template')]),
),
]
| StanfordBioinformatics/loom | server/loomengine_server/api/migrations/0003_auto_20170927_0216.py | Python | agpl-3.0 | 702 |
# Generated by Django 1.11.14 on 2018-07-23 15:12
import django
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
if django.VERSION >= (2, 0):
dot_migration = '0001_initial'
else:
dot_migration = '0006_auto_20171214_2232'
class Migration(migrations.Migration):
dependencies = [
('oauth_dispatch', '0006_drop_application_id_constraints'),
('oauth2_provider', dot_migration),
]
operations = [
migrations.AlterField(
model_name='applicationaccess',
name='application',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='access', to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
),
migrations.AlterField(
model_name='applicationorganization',
name='application',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='organizations', to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
),
migrations.AlterField(
model_name='restrictedapplication',
name='application',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
),
]
| eduNEXT/edunext-platform | openedx/core/djangoapps/oauth_dispatch/migrations/0007_restore_application_id_constraints.py | Python | agpl-3.0 | 1,304 |
# -*- coding: utf-8 -*-
"""
# primary URL: http://feeds.dshield.org/block.txt
# PGP Sign.: http://feeds.dshield.org/block.txt.asc
#
# updated: Tue Dec 15 15:33:38 2015 UTC
#
# This list summarizes the top 20 attacking class C (/24) subnets
# over the last three days. The number of 'attacks' indicates the
# number of targets reporting scans from this subnet.
#
# Columns (tab delimited):
# (1) start of netblock
# (2) end of netblock
# (3) subnet (/24 for class C)
# (4) number of targets scanned
# (5) name of Network
# (6) Country
# (7) contact email address
"""
import sys
import dateutil
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class DshieldBlockParserBot(Bot):
def process(self):
report = self.receive_message()
raw_report = utils.base64_decode(report.get("raw"))
for row in raw_report.splitlines():
row = row.strip()
if row.startswith("#") or len(row) == 0 or row.startswith('Start'):
if 'updated' in row:
time_str = row[row.find(': ') + 2:]
time = dateutil.parser.parse(time_str).isoformat()
continue
values = row.split("\t")
if len(values) < 3:
continue # raise an error
network_ip = values[0]
network_mask = values[2]
network = '%s/%s' % (network_ip, network_mask)
extra = {}
event = Event(report)
if len(values) > 3:
extra['attacks'] = int(values[3])
if len(values) > 4:
extra['network_name'] = values[4]
if len(values) > 5:
event['source.geolocation.cc'] = values[5]
if len(values) > 6:
event['source.abuse_contact'] = values[6]
if extra:
event.add('extra', extra)
event.add('time.source', time)
event.add('source.network', network)
event.add('classification.type', 'blacklist')
event.add("raw", row)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = DshieldBlockParserBot(sys.argv[1])
bot.start()
| robcza/intelmq | intelmq/bots/parsers/dshield/parser_block.py | Python | agpl-3.0 | 2,308 |
"""EdX Branding API
Provides a way to retrieve "branded" parts of the site,
such as the site footer.
This information exposed to:
1) Templates in the LMS.
2) Consumers of the branding API.
This ensures that branded UI elements such as the footer
are consistent across the LMS and other sites (such as
the marketing site and blog).
"""
import logging
import urlparse
from django.conf import settings
from django.utils.translation import ugettext as _
from staticfiles.storage import staticfiles_storage
from microsite_configuration import microsite
from edxmako.shortcuts import marketing_link
from branding.models import BrandingApiConfig
log = logging.getLogger("edx.footer")
def is_enabled():
"""Check whether the branding API is enabled. """
return BrandingApiConfig.current().enabled
def get_footer(is_secure=True):
"""Retrieve information used to render the footer.
This will handle both the OpenEdX and EdX.org versions
of the footer. All user-facing text is internationalized.
Currently, this does NOT support theming.
Keyword Arguments:
is_secure (bool): If True, use https:// in URLs.
Returns: dict
Example:
>>> get_footer()
{
"copyright": "(c) 2015 EdX Inc",
"logo_image": "http://www.example.com/logo.png",
"social_links": [
{
"name": "facebook",
"title": "Facebook",
"url": "http://www.facebook.com/example",
"icon-class": "fa-facebook-square"
},
...
],
"navigation_links": [
{
"name": "about",
"title": "About",
"url": "http://www.example.com/about.html"
},
...
],
"mobile_links": [
{
"name": "apple",
"title": "Apple",
"url": "http://store.apple.com/example_app"
"image": "http://example.com/static/apple_logo.png"
},
...
],
"legal_links": [
{
"url": "http://example.com/terms-of-service.html",
"name": "terms_of_service",
"title': "Terms of Service"
},
# ...
],
"openedx_link": {
"url": "http://open.edx.org",
"title": "Powered by Open edX",
"image": "http://example.com/openedx.png"
}
}
"""
return {
"copyright": _footer_copyright(),
"logo_image": _footer_logo_img(is_secure),
"social_links": _footer_social_links(),
"navigation_links": _footer_navigation_links(),
"mobile_links": _footer_mobile_links(is_secure),
"legal_links": _footer_legal_links(),
"openedx_link": _footer_openedx_link(),
}
def _footer_copyright():
"""Return the copyright to display in the footer.
Returns: unicode
"""
org_name = (
"edX Inc" if settings.FEATURES.get('IS_EDX_DOMAIN', False)
else microsite.get_value('PLATFORM_NAME', settings.PLATFORM_NAME)
)
# Translators: 'EdX', 'edX', and 'Open edX' are trademarks of 'edX Inc.'.
# Please do not translate any of these trademarks and company names.
return _(
u"\u00A9 {org_name}. All rights reserved except where noted. "
u"EdX, Open edX and the edX and Open EdX logos are registered trademarks "
u"or trademarks of edX Inc."
).format(org_name=org_name)
def _footer_openedx_link():
"""Return the image link for "powered by OpenEdX".
Args:
is_secure (bool): Whether the request is using TLS.
Returns: dict
"""
# Translators: 'Open edX' is a brand, please keep this untranslated.
# See http://openedx.org for more information.
title = _("Powered by Open edX")
return {
"url": settings.FOOTER_OPENEDX_URL,
"title": title,
"image": settings.FOOTER_OPENEDX_LOGO_IMAGE,
}
def _footer_social_links():
"""Return the social media links to display in the footer.
Returns: list
"""
links = []
for social_name in settings.SOCIAL_MEDIA_FOOTER_NAMES:
links.append(
{
"name": social_name,
"title": unicode(settings.SOCIAL_MEDIA_FOOTER_DISPLAY.get(social_name, {}).get("title", "")),
"url": settings.SOCIAL_MEDIA_FOOTER_URLS.get(social_name, "#"),
"icon-class": settings.SOCIAL_MEDIA_FOOTER_DISPLAY.get(social_name, {}).get("icon", ""),
}
)
return links
def _footer_navigation_links():
"""Return the navigation links to display in the footer. """
return [
{
"name": link_name,
"title": link_title,
"url": link_url,
}
for link_name, link_url, link_title in [
("about", marketing_link("ABOUT"), _("About")),
("blog", marketing_link("BLOG"), _("Blog")),
("news", marketing_link("NEWS"), _("News")),
("faq", marketing_link("FAQ"), _("FAQs")),
("contact", marketing_link("CONTACT"), _("Contact")),
("jobs", marketing_link("JOBS"), _("Jobs")),
("donate", marketing_link("DONATE"), _("Donate")),
("sitemap", marketing_link("SITE_MAP"), _("Sitemap")),
]
if link_url and link_url != "#"
]
def _footer_legal_links():
"""Return the legal footer links (e.g. terms of service). """
links = [
("terms_of_service_and_honor_code", marketing_link("TOS_AND_HONOR"), _("Terms of Service & Honor Code")),
("privacy_policy", marketing_link("PRIVACY"), _("Privacy Policy")),
("accessibility_policy", marketing_link("ACCESSIBILITY"), _("Accessibility Policy")),
]
# Backwards compatibility: If a combined "terms of service and honor code"
# link isn't provided, add separate TOS and honor code links.
tos_and_honor_link = marketing_link("TOS_AND_HONOR")
if not (tos_and_honor_link and tos_and_honor_link != "#"):
links.extend([
("terms_of_service", marketing_link("TOS"), _("Terms of Service")),
("honor_code", marketing_link("HONOR"), _("Honor Code")),
])
return [
{
"name": link_name,
"title": link_title,
"url": link_url,
}
for link_name, link_url, link_title in links
if link_url and link_url != "#"
]
def _footer_mobile_links(is_secure):
"""Return the mobile app store links.
Args:
is_secure (bool): Whether the request is using TLS.
Returns: list
"""
mobile_links = []
if settings.FEATURES.get('ENABLE_FOOTER_MOBILE_APP_LINKS'):
mobile_links = [
{
"name": "apple",
"title": "Apple",
"url": settings.MOBILE_STORE_URLS.get('apple', '#'),
"image": _absolute_url_staticfile(is_secure, 'images/app/app_store_badge_135x40.svg')
},
{
"name": "google",
"title": "Google",
"url": settings.MOBILE_STORE_URLS.get('google', '#'),
"image": _absolute_url_staticfile(is_secure, 'images/app/google_play_badge_45.png')
}
]
return mobile_links
def _footer_logo_img(is_secure):
"""Return the logo used for footer about link
Args:
is_secure (bool): Whether the request is using TLS.
Returns:
Absolute url to logo
"""
logo_name = microsite.get_value('FOOTER_ORGANIZATION_IMAGE', settings.FOOTER_ORGANIZATION_IMAGE)
return _absolute_url_staticfile(is_secure, logo_name)
def _absolute_url(is_secure, url_path):
"""Construct an absolute URL back to the site.
Arguments:
is_secure (bool): If true, use HTTPS as the protocol.
url_path (unicode): The path of the URL.
Returns:
unicode
"""
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
parts = ("https" if is_secure else "http", site_name, url_path, '', '', '')
return urlparse.urlunparse(parts)
def _absolute_url_staticfile(is_secure, name):
"""Construct an absolute URL to a static resource on the site.
Arguments:
is_secure (bool): If true, use HTTPS as the protocol.
name (unicode): The name of the static resource to retrieve.
Returns:
unicode
"""
url_path = staticfiles_storage.url(name)
# In production, the static files URL will be an absolute
# URL pointing to a CDN. If this happens, we can just
# return the URL.
if urlparse.urlparse(url_path).netloc:
return url_path
# For local development, the returned URL will be relative,
# so we need to make it absolute.
return _absolute_url(is_secure, url_path)
| shubhdev/openedx | lms/djangoapps/branding/api.py | Python | agpl-3.0 | 8,875 |
#!/usr/bin/env python3
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
import os
import sys
import logging
import datetime
import json
import argparse
import socket
import tempfile
from collections import defaultdict
import contextlib
# disable InsecureRequestWarning
import urllib3
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from sdcm.results_analyze import BaseResultsAnalyzer # pylint: disable=wrong-import-position
from sdcm.utils.log import setup_stdout_logger # pylint: disable=wrong-import-position
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
setup_stdout_logger()
LOGGER = logging.getLogger("microbenchmarking")
LOGGER.setLevel(logging.DEBUG)
@contextlib.contextmanager
def chdir(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
yield
finally:
os.chdir(curdir)
class LargeNumberOfDatasetsException(Exception):
def __init__(self, msg, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message = msg
def __str__(self):
return "MBM: {0.message}".format(self)
class EmptyResultFolder(Exception):
def __init__(self, msg, *args, **kwargs):
super().__init__(*args, **kwargs)
self.message = msg
def __str__(self):
return "MBM: {0.message}".format(self)
class MicroBenchmarkingResultsAnalyzer(BaseResultsAnalyzer): # pylint: disable=too-many-instance-attributes
allowed_stats = ('Current', 'Stats', 'Last, commit, date', 'Diff last [%]', 'Best, commit, date', 'Diff best [%]')
higher_better = ('frag/s',)
lower_better = ('avg aio',)
submetrics = {'frag/s': ['mad f/s', 'max f/s', 'min f/s']}
def __init__(self, email_recipients, db_version=None):
super().__init__(es_index="microbenchmarking", es_doc_type="microbenchmark", email_recipients=email_recipients,
email_template_fp="results_microbenchmark.html", query_limit=10000, logger=LOGGER)
self.hostname = socket.gethostname()
self._run_date_pattern = "%Y-%m-%d_%H:%M:%S"
self.test_run_date = datetime.datetime.now().strftime(self._run_date_pattern)
self.db_version = db_version
self.build_url = os.getenv('BUILD_URL', "")
self.cur_version_info = None
self.metrics = self.higher_better + self.lower_better
def _get_prior_tests(self, filter_path, additional_filter=''):
query = f"hostname:'{self.hostname}' AND versions.scylla-server.version:{self.db_version[:3]}*"
if additional_filter:
query += " AND " + additional_filter
output = self._es.search( # pylint: disable=unexpected-keyword-arg; pylint doesn't understand Elasticsearch code
index=self._es_index,
q=query,
size=self._limit,
filter_path=filter_path,
)
return output
def check_regression(self, current_results): # pylint: disable=arguments-differ
# pylint: disable=too-many-locals, too-many-statements
if not current_results:
return {}
start_date = datetime.datetime.strptime("2019-01-01", "%Y-%m-%d")
filter_path = (
"hits.hits._id", # '2018-04-02_18:36:47_large-partition-skips_[64-32.1)'
"hits.hits._source.test_args", # [64-32.1)
"hits.hits.test_group_properties.name", # large-partition-skips
"hits.hits._source.hostname", # 'godzilla.cloudius-systems.com'
"hits.hits._source.test_run_date",
"hits.hits._source.test_group_properties.name", # large-partition-skips
"hits.hits._source.results.stats.aio",
"hits.hits._source.results.stats.avg aio",
"hits.hits._source.results.stats.cpu",
"hits.hits._source.results.stats.time (s)",
"hits.hits._source.results.stats.frag/s",
"hits.hits._source.versions",
"hits.hits._source.excluded"
)
self.db_version = self.cur_version_info["version"]
tests_filtered = self._get_prior_tests(
filter_path,
additional_filter='((-_exists_:excluded) OR (excluded:false))'
)
assert tests_filtered, "No results from DB"
results = []
for doc in tests_filtered['hits']['hits']:
doc_date = datetime.datetime.strptime(
doc['_source']['versions']['scylla-server']['run_date_time'], "%Y-%m-%d %H:%M:%S")
if doc_date > start_date:
results.append(doc)
sorted_by_type = defaultdict(list)
for res in results:
test_type = "%s_%s" % (res["_source"]["test_group_properties"]["name"],
res["_source"]["test_args"])
sorted_by_type[test_type].append(res)
report_results = defaultdict(dict)
# report_results = {
# "large-partition-skips_1-0.1": {
# "aio":{
# "Current":
# "Last":
# "Diff last [%]":
# "Best":
# "Diff best [%]":
# },
# "frag/s":{
# "Current":
# "Stats": { submetrica: }
# "Last":
# "Diff last [%]":
# "Best":
# "Diff best [%]":
# },
# }
def set_results_for(current_result, metrica):
list_of_results_from_db.sort(key=lambda x: datetime.datetime.strptime(x["_source"]["test_run_date"],
self._run_date_pattern))
def get_metrica_val(val):
metrica_val = val["_source"]["results"]["stats"].get(metrica, None)
return float(metrica_val) if metrica_val else None
def get_commit_id(val):
return val["_source"]['versions']['scylla-server']['commit_id']
def get_commit_date(val):
return datetime.datetime.strptime(val["_source"]['versions']['scylla-server']['date'],
"%Y%m%d").date()
def get_best_result_for_metrica():
# build new list with results where analyzing metrica is not None
# metrica s with result 0, will be included
list_for_searching = [el for el in list_of_results_from_db if get_metrica_val(el) is not None]
# if list is empty ( which could be happened for new metric),
# then return first element in list, because result will be None
if not list_for_searching:
return list_of_results_from_db[0]
if metrica in self.higher_better:
return max(list_for_searching, key=get_metrica_val)
elif metrica in self.lower_better:
return min(list_for_searching, key=get_metrica_val)
else:
return list_of_results_from_db[0]
def count_diff(cur_val, dif_val):
try:
cur_val = float(cur_val) if cur_val else None
except ValueError:
cur_val = None
if not cur_val:
return None
if dif_val is None:
return None
ret_dif = ((cur_val - dif_val) / dif_val) * 100 if dif_val > 0 else cur_val * 100
if metrica in self.higher_better:
ret_dif = -ret_dif
ret_dif = -ret_dif if ret_dif != 0 else 0
return ret_dif
def get_diffs(cur_val, best_result_val, last_val):
# if last result doesn't contain the metric
# assign 0 to last value to count formula of changes
diff_best = count_diff(cur_val, best_result_val)
diff_last = count_diff(cur_val, last_val)
return (diff_last, diff_best)
if len(list_of_results_from_db) > 1 and get_commit_id(list_of_results_from_db[-1]) == self.cur_version_info["commit_id"]:
last_idx = -2
else: # when current results are on disk but db is not updated
last_idx = -1
cur_val = current_result["results"]["stats"].get(metrica, None)
if cur_val:
cur_val = float(cur_val)
last_val = get_metrica_val(list_of_results_from_db[last_idx])
last_commit = get_commit_id(list_of_results_from_db[last_idx])
last_commit_date = get_commit_date(list_of_results_from_db[last_idx])
best_result = get_best_result_for_metrica()
best_result_val = get_metrica_val(best_result)
best_result_commit = get_commit_id(best_result)
best_commit_date = get_commit_date(best_result)
diff_last, diff_best = get_diffs(cur_val, best_result_val, last_val)
stats = {
"Current": cur_val,
"Last, commit, date": (last_val, last_commit, last_commit_date),
"Best, commit, date": (best_result_val, best_result_commit, best_commit_date),
"Diff last [%]": diff_last, # diff in percents
"Diff best [%]": diff_best,
"has_regression": False,
"has_improvement": False,
}
if ((diff_last and diff_last < -5) or (diff_best and diff_best < -5)):
report_results[test_type]["has_diff"] = True
stats["has_regression"] = True
if ((diff_last and diff_last > 50) or (diff_best and diff_best > 50)):
report_results[test_type]['has_improve'] = True
stats['has_improvement'] = True
report_results[test_type]["dataset_name"] = current_result['dataset_name']
report_results[test_type][metrica] = stats
def set_results_for_sub(current_result, metrica):
report_results[test_type][metrica].update({'Stats': {}})
for submetrica in self.submetrics.get(metrica):
submetrica_cur_val = float(current_result["results"]["stats"][submetrica])
report_results[test_type][metrica]['Stats'].update({submetrica: submetrica_cur_val})
for test_type, current_result in current_results.items():
list_of_results_from_db = sorted_by_type[test_type]
if not list_of_results_from_db:
self.log.warning("No results for '%s' in DB. Skipping", test_type)
continue
for metrica in self.metrics:
self.log.info("Analyzing %s:%s", test_type, metrica)
set_results_for(current_result, metrica)
if metrica in list(self.submetrics.keys()):
set_results_for_sub(current_result, metrica)
return report_results
def send_html_report(self, report_results, html_report_path=None):
subject = "Microbenchmarks - Performance Regression - %s" % self.test_run_date
dashboard_path = "app/kibana#/dashboard/aee9b370-09db-11e9-a976-2fe0f5890cd0?_g=(filters%3A!())"
for_render = {
"subject": subject,
"testrun_id": self.test_run_date,
"results": report_results,
"stats_names": self.allowed_stats,
"metrics": self.metrics,
"kibana_url": self.gen_kibana_dashboard_url(dashboard_path),
"job_url": self.build_url,
"full_report": True,
"hostname": self.hostname,
"test_version": self.cur_version_info
}
if html_report_path:
html_file_path = html_report_path
else:
html_file_path = tempfile.mkstemp(suffix=".html", prefix="microbenchmarking-")[1]
self.render_to_html(for_render, html_file_path=html_file_path)
for_render["full_report"] = False
summary_html = self.render_to_html(for_render)
return self._send_report(subject, summary_html, files=(html_file_path,))
def _send_report(self, subject, summary_html, files):
return self.send_email(subject, summary_html, files=files)
def get_results(self, results_path, update_db):
# pylint: disable=too-many-locals
bad_chars = " "
with chdir(os.path.join(results_path, "perf_fast_forward_output")):
results = {}
for (fullpath, subdirs, files) in os.walk(os.getcwd()):
self.log.info(fullpath)
if (os.path.dirname(fullpath).endswith('perf_fast_forward_output') and
len(subdirs) > 1):
raise LargeNumberOfDatasetsException('Test set {} has more than one datasets: {}'.format(
os.path.basename(fullpath),
subdirs))
if not subdirs:
dataset_name = os.path.basename(fullpath)
self.log.info('Dataset name: {}'.format(dataset_name))
dirname = os.path.basename(os.path.dirname(fullpath))
self.log.info("Test set: {}".format(dirname))
for filename in files:
if filename.startswith('.'):
continue
new_filename = "".join(c for c in filename if c not in bad_chars)
test_args = os.path.splitext(new_filename)[0]
test_type = dirname + "_" + test_args
json_path = os.path.join(dirname, dataset_name, filename)
with open(json_path, encoding="utf-8") as json_file:
self.log.info("Reading: %s", json_path)
datastore = json.load(json_file)
datastore.update({'hostname': self.hostname,
'test_args': test_args,
'test_run_date': self.test_run_date,
'dataset_name': dataset_name,
'excluded': False
})
if update_db:
self._es.create_doc(index=self._es_index, doc_type=self._es_doc_type,
doc_id="%s_%s" % (self.test_run_date, test_type), body=datastore)
results[test_type] = datastore
if not results:
raise EmptyResultFolder("perf_fast_forward_output folder is empty")
self.cur_version_info = results[list(results.keys())[0]]['versions']['scylla-server']
return results
def exclude_test_run(self, testrun_id=''):
"""Exclude test results by testrun id
Filter test result by hostname, scylla version and test_run_date filed
and mark the all found result with flag exluded: True
Keyword Arguments:
testrun_id {str} -- testrun id as value of field test_run_date (default: {''})
"""
if not testrun_id or not self.db_version:
self.log.info("Nothing to exclude")
return
self.log.info('Exclude testrun {} from results'.format(testrun_id))
filter_path = (
"hits.hits._id", # '2018-04-02_18:36:47_large-partition-skips_[64-32.1)'
"hits.hits._source.hostname", # 'godzilla.cloudius-systems.com'
"hits.hits._source.test_run_date",
)
testrun_results = self._get_prior_tests(filter_path, f'test_run_date:\"{testrun_id}\"')
if not testrun_results:
self.log.info("Nothing to exclude")
return
for res in testrun_results['hits']['hits']:
self.log.info(res['_id'])
self.log.info(res['_source']['test_run_date'])
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=res['_id'],
body={'excluded': True})
def exclude_by_test_id(self, test_id=''):
"""Exclude test result by id
Filter test result by id (ex. 2018-10-29_18:58:51_large-partition-single-key-slice_begin_incl_0-500000_end_incl.1)
and mark the test result with flag excluded: True
Keyword Arguments:
test_id {str} -- test id from field _id (default: {''})
"""
if not test_id or not self.db_version:
self.log.info("Nothing to exclude")
return
self.log.info('Exclude test id {} from results'.format(test_id))
doc = self._es.get_doc(index=self._es_index, doc_id=test_id)
if doc:
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=doc['_id'],
body={'excluded': True})
else:
self.log.info("Nothing to exclude")
return
def exclude_before_date(self, date=''):
"""Exclude all test results before date
Query test result by hostname and scylla version,
convert string to date object,
filter all test result with versions.scylla-server.run_date_time before
date, and mark them with flag excluded: True
Keyword Arguments:
date {str} -- date in format YYYY-MM-DD or YYYY-MM-DD hh:mm:ss (default: {''})
"""
if not date and not self.db_version:
self.log.info("Nothing to exclude")
return
format_pattern = "%Y-%m-%d %H:%M:%S"
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
try:
date = datetime.datetime.strptime(date, format_pattern)
except ValueError:
self.log.error("Wrong format of parameter --before-date. Should be \"YYYY-MM-DD\" or \"YYYY-MM-DD hh:mm:ss\"")
return
filter_path = (
"hits.hits._id",
"hits.hits._source.hostname",
"hits.hits._source.versions.scylla-server.run_date_time"
)
self.log.info('Exclude tests before date {}'.format(date))
results = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'%s' AND versions.scylla-server.version:%s*" %
(self.hostname, self.db_version[:3]))
if not results:
self.log.info('Nothing to exclude')
return
before_date_results = []
for doc in results['hits']['hits']:
doc_date = datetime.datetime.strptime(
doc['_source']['versions']['scylla-server']['run_date_time'], format_pattern)
if doc_date < date:
before_date_results.append(doc)
for res in before_date_results:
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=res['_id'],
body={'excluded': True})
def exclude_testrun_by_commit_id(self, commit_id=None):
if not commit_id and not self.db_version:
self.log.info('Nothing to exclude')
return
filter_path = (
"hits.hits._id",
"hits.hits._source.hostname",
"hits.hits._source.versions.scylla-server.commit_id",
"hits.hits._source.test_run_date"
)
self.log.info('Exclude tests by commit id #{}'.format(commit_id))
results = self._es.search(index=self._es_index, filter_path=filter_path, size=self._limit, # pylint: disable=unexpected-keyword-arg
q="hostname:'{}' \
AND versions.scylla-server.version:{}*\
AND versions.scylla-server.commit_id:'{}'".format(self.hostname, self.db_version[:3], commit_id))
if not results:
self.log.info('There is no testrun results for commit id #{}'.format(commit_id))
return
for doc in results['hits']['hits']:
self.log.info("Exclude test: %s\nCommit: #%s\nRun Date time: %s\n",
doc['_id'],
doc['_source']['versions']['scylla-server']['commit_id'],
doc['_source']['test_run_date'])
self._es.update_doc(index=self._es_index,
doc_type=self._es_doc_type,
doc_id=doc['_id'],
body={'excluded': True})
def main(args):
if args.mode == 'exclude':
mbra = MicroBenchmarkingResultsAnalyzer(email_recipients=None, db_version=args.db_version)
if args.testrun_id:
mbra.exclude_test_run(args.testrun_id)
if args.test_id:
mbra.exclude_by_test_id(args.test_id)
if args.before_date:
mbra.exclude_before_date(args.before_date)
if args.commit_id:
mbra.exclude_testrun_by_commit_id(args.commit_id)
if args.mode == 'check':
mbra = MicroBenchmarkingResultsAnalyzer(email_recipients=args.email_recipients.split(","))
results = mbra.get_results(results_path=args.results_path, update_db=args.update_db)
if results:
if args.hostname:
mbra.hostname = args.hostname
report_results = mbra.check_regression(results)
mbra.send_html_report(report_results, html_report_path=args.report_path)
else:
LOGGER.warning('Perf_fast_forward testrun is failed or not build results in json format')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser(description="Microbencmarking stats utility \
for upload and analyze or exclude test result from future analyze")
subparser = parser.add_subparsers(dest='mode',
title='Microbencmarking utility modes',
description='Microbencmarking could be run in two modes definded by subcommand',
metavar='Modes',
help='To see subcommand help use microbenchmarking.py subcommand -h')
exclude = subparser.add_parser('exclude', help='Exclude results by testrun, testid or date')
exclude_group = exclude.add_mutually_exclusive_group(required=True)
exclude_group.add_argument('--testrun-id', action='store', default='',
help='Exclude test results for testrun id')
exclude_group.add_argument('--test-id', action='store', default='',
help='Exclude test result by id')
exclude_group.add_argument('--before-date', action='store', default='',
help='Exclude all test results before date of run stored in field versions.scylla-server.run_date_time.\
Value in format YYYY-MM-DD or YYYY-MM-DD hh:mm:ss')
exclude_group.add_argument('--commit-id', action='store', default='',
help='Exclude test run for specific commit id')
exclude.add_argument('--db-version', action='store', default='',
help='Exclude test results for scylla version',
required=True)
check = subparser.add_parser('check', help='Upload and analyze test result')
check.add_argument("--update-db", action="store_true", default=False,
help="Upload current microbenchmarking stats to ElasticSearch")
check.add_argument("--results-path", action="store", default=".",
help="Path where to search for test results")
check.add_argument("--email-recipients", action="store", default="[email protected]",
help="Comma separated email addresses list that will get the report")
check.add_argument("--report-path", action="store", default="",
help="Save HTML generated results report to the file path before sending by email")
check.add_argument("--hostname", action="store", default="",
help="Run check regression for host with hostname")
return parser.parse_args()
if __name__ == '__main__':
ARGS = parse_args()
main(ARGS)
| scylladb/scylla-cluster-tests | sdcm/microbenchmarking.py | Python | agpl-3.0 | 25,144 |
import os
import struct
import tempfile
from django.contrib.gis.gdal import GDAL_VERSION, GDALRaster
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.raster.band import GDALBand
from django.contrib.gis.shortcuts import numpy
from django.test import SimpleTestCase
from ..data.rasters.textrasters import JSON_RASTER
class GDALRasterTests(SimpleTestCase):
"""
Test a GDALRaster instance created from a file (GeoTiff).
"""
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif')
self.rs = GDALRaster(self.rs_path)
def test_rs_name_repr(self):
self.assertEqual(self.rs_path, self.rs.name)
self.assertRegex(repr(self.rs), r"<Raster object at 0x\w+>")
def test_rs_driver(self):
self.assertEqual(self.rs.driver.name, 'GTiff')
def test_rs_size(self):
self.assertEqual(self.rs.width, 163)
self.assertEqual(self.rs.height, 174)
def test_rs_srs(self):
self.assertEqual(self.rs.srs.srid, 3086)
self.assertEqual(self.rs.srs.units, (1.0, 'metre'))
def test_rs_srid(self):
rast = GDALRaster({
'width': 16,
'height': 16,
'srid': 4326,
})
self.assertEqual(rast.srid, 4326)
rast.srid = 3086
self.assertEqual(rast.srid, 3086)
def test_geotransform_and_friends(self):
# Assert correct values for file based raster
self.assertEqual(
self.rs.geotransform,
[511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0]
)
self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986])
self.assertEqual(self.rs.origin.x, 511700.4680706557)
self.assertEqual(self.rs.origin.y, 435103.3771231986)
self.assertEqual(self.rs.scale, [100.0, -100.0])
self.assertEqual(self.rs.scale.x, 100.0)
self.assertEqual(self.rs.scale.y, -100.0)
self.assertEqual(self.rs.skew, [0, 0])
self.assertEqual(self.rs.skew.x, 0)
self.assertEqual(self.rs.skew.y, 0)
# Create in-memory rasters and change gtvalues
rsmem = GDALRaster(JSON_RASTER)
# geotransform accepts both floats and ints
rsmem.geotransform = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
self.assertEqual(rsmem.geotransform, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
rsmem.geotransform = range(6)
self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)])
self.assertEqual(rsmem.origin, [0, 3])
self.assertEqual(rsmem.origin.x, 0)
self.assertEqual(rsmem.origin.y, 3)
self.assertEqual(rsmem.scale, [1, 5])
self.assertEqual(rsmem.scale.x, 1)
self.assertEqual(rsmem.scale.y, 5)
self.assertEqual(rsmem.skew, [2, 4])
self.assertEqual(rsmem.skew.x, 2)
self.assertEqual(rsmem.skew.y, 4)
self.assertEqual(rsmem.width, 5)
self.assertEqual(rsmem.height, 5)
def test_geotransform_bad_inputs(self):
rsmem = GDALRaster(JSON_RASTER)
error_geotransforms = [
[1, 2],
[1, 2, 3, 4, 5, 'foo'],
[1, 2, 3, 4, 5, 6, 'foo'],
]
msg = 'Geotransform must consist of 6 numeric values.'
for geotransform in error_geotransforms:
with self.subTest(i=geotransform), self.assertRaisesMessage(ValueError, msg):
rsmem.geotransform = geotransform
def test_rs_extent(self):
self.assertEqual(
self.rs.extent,
(511700.4680706557, 417703.3771231986, 528000.4680706557, 435103.3771231986)
)
def test_rs_bands(self):
self.assertEqual(len(self.rs.bands), 1)
self.assertIsInstance(self.rs.bands[0], GDALBand)
def test_memory_based_raster_creation(self):
# Create uint8 raster with full pixel data range (0-255)
rast = GDALRaster({
'datatype': 1,
'width': 16,
'height': 16,
'srid': 4326,
'bands': [{
'data': range(256),
'nodata_value': 255,
}],
})
# Get array from raster
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Assert data is same as original input
self.assertEqual(result, list(range(256)))
def test_file_based_raster_creation(self):
# Prepare tempfile
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Create file-based raster from scratch
GDALRaster({
'datatype': self.rs.bands[0].datatype(),
'driver': 'tif',
'name': rstfile.name,
'width': 163,
'height': 174,
'nr_of_bands': 1,
'srid': self.rs.srs.wkt,
'origin': (self.rs.origin.x, self.rs.origin.y),
'scale': (self.rs.scale.x, self.rs.scale.y),
'skew': (self.rs.skew.x, self.rs.skew.y),
'bands': [{
'data': self.rs.bands[0].data(),
'nodata_value': self.rs.bands[0].nodata_value,
}],
})
# Reload newly created raster from file
restored_raster = GDALRaster(rstfile.name)
self.assertEqual(restored_raster.srs.wkt, self.rs.srs.wkt)
self.assertEqual(restored_raster.geotransform, self.rs.geotransform)
if numpy:
numpy.testing.assert_equal(
restored_raster.bands[0].data(),
self.rs.bands[0].data()
)
else:
self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data())
def test_vsi_raster_creation(self):
# Open a raster as a file object.
with open(self.rs_path, 'rb') as dat:
# Instantiate a raster from the file binary buffer.
vsimem = GDALRaster(dat.read())
# The data of the in-memory file is equal to the source file.
result = vsimem.bands[0].data()
target = self.rs.bands[0].data()
if numpy:
result = result.flatten().tolist()
target = target.flatten().tolist()
self.assertEqual(result, target)
def test_vsi_raster_deletion(self):
path = '/vsimem/raster.tif'
# Create a vsi-based raster from scratch.
vsimem = GDALRaster({
'name': path,
'driver': 'tif',
'width': 4,
'height': 4,
'srid': 4326,
'bands': [{
'data': range(16),
}],
})
# The virtual file exists.
rst = GDALRaster(path)
self.assertEqual(rst.width, 4)
# Delete GDALRaster.
del vsimem
del rst
# The virtual file has been removed.
msg = 'Could not open the datasource at "/vsimem/raster.tif"'
with self.assertRaisesMessage(GDALException, msg):
GDALRaster(path)
def test_vsi_invalid_buffer_error(self):
msg = 'Failed creating VSI raster from the input buffer.'
with self.assertRaisesMessage(GDALException, msg):
GDALRaster(b'not-a-raster-buffer')
def test_vsi_buffer_property(self):
# Create a vsi-based raster from scratch.
rast = GDALRaster({
'name': '/vsimem/raster.tif',
'driver': 'tif',
'width': 4,
'height': 4,
'srid': 4326,
'bands': [{
'data': range(16),
}],
})
# Do a round trip from raster to buffer to raster.
result = GDALRaster(rast.vsi_buffer).bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to nodata value except on input block of ones.
self.assertEqual(result, list(range(16)))
# The vsi buffer is None for rasters that are not vsi based.
self.assertIsNone(self.rs.vsi_buffer)
def test_offset_size_and_shape_on_raster_creation(self):
rast = GDALRaster({
'datatype': 1,
'width': 4,
'height': 4,
'srid': 4326,
'bands': [{
'data': (1,),
'offset': (1, 1),
'size': (2, 2),
'shape': (1, 1),
'nodata_value': 2,
}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to nodata value except on input block of ones.
self.assertEqual(
result,
[2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2]
)
def test_set_nodata_value_on_raster_creation(self):
# Create raster filled with nodata values.
rast = GDALRaster({
'datatype': 1,
'width': 2,
'height': 2,
'srid': 4326,
'bands': [{'nodata_value': 23}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# All band data is equal to nodata value.
self.assertEqual(result, [23] * 4)
def test_set_nodata_none_on_raster_creation(self):
if GDAL_VERSION < (2, 1):
self.skipTest("GDAL >= 2.1 is required for this test.")
# Create raster without data and without nodata value.
rast = GDALRaster({
'datatype': 1,
'width': 2,
'height': 2,
'srid': 4326,
'bands': [{'nodata_value': None}],
})
# Get array from raster.
result = rast.bands[0].data()
if numpy:
result = result.flatten().tolist()
# Band data is equal to zero becaues no nodata value has been specified.
self.assertEqual(result, [0] * 4)
def test_raster_metadata_property(self):
# Check for required gdal version.
if GDAL_VERSION < (1, 11):
msg = 'GDAL ≥ 1.11 is required for using the metadata property.'
with self.assertRaisesMessage(ValueError, msg):
self.rs.metadata
return
data = self.rs.metadata
self.assertEqual(data['DEFAULT'], {'AREA_OR_POINT': 'Area'})
self.assertEqual(data['IMAGE_STRUCTURE'], {'INTERLEAVE': 'BAND'})
# Create file-based raster from scratch
source = GDALRaster({
'datatype': 1,
'width': 2,
'height': 2,
'srid': 4326,
'bands': [{'data': range(4), 'nodata_value': 99}],
})
# Set metadata on raster and on a band.
metadata = {
'DEFAULT': {'OWNER': 'Django', 'VERSION': '1.0', 'AREA_OR_POINT': 'Point'},
}
source.metadata = metadata
source.bands[0].metadata = metadata
self.assertEqual(source.metadata['DEFAULT'], metadata['DEFAULT'])
self.assertEqual(source.bands[0].metadata['DEFAULT'], metadata['DEFAULT'])
# Update metadata on raster.
metadata = {
'DEFAULT': {'VERSION': '2.0'},
}
source.metadata = metadata
self.assertEqual(source.metadata['DEFAULT']['VERSION'], '2.0')
# Remove metadata on raster.
metadata = {
'DEFAULT': {'OWNER': None},
}
source.metadata = metadata
self.assertNotIn('OWNER', source.metadata['DEFAULT'])
def test_raster_info_accessor(self):
if GDAL_VERSION < (2, 1):
msg = 'GDAL ≥ 2.1 is required for using the info property.'
with self.assertRaisesMessage(ValueError, msg):
self.rs.info
return
gdalinfo = """
Driver: GTiff/GeoTIFF
Files: {0}
Size is 163, 174
Coordinate System is:
PROJCS["NAD83 / Florida GDL Albers",
GEOGCS["NAD83",
DATUM["North_American_Datum_1983",
SPHEROID["GRS 1980",6378137,298.257222101,
AUTHORITY["EPSG","7019"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6269"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4269"]],
PROJECTION["Albers_Conic_Equal_Area"],
PARAMETER["standard_parallel_1",24],
PARAMETER["standard_parallel_2",31.5],
PARAMETER["latitude_of_center",24],
PARAMETER["longitude_of_center",-84],
PARAMETER["false_easting",400000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["X",EAST],
AXIS["Y",NORTH],
AUTHORITY["EPSG","3086"]]
Origin = (511700.468070655711927,435103.377123198588379)
Pixel Size = (100.000000000000000,-100.000000000000000)
Metadata:
AREA_OR_POINT=Area
Image Structure Metadata:
INTERLEAVE=BAND
Corner Coordinates:
Upper Left ( 511700.468, 435103.377) ( 82d51'46.16"W, 27d55' 1.53"N)
Lower Left ( 511700.468, 417703.377) ( 82d51'52.04"W, 27d45'37.50"N)
Upper Right ( 528000.468, 435103.377) ( 82d41'48.81"W, 27d54'56.30"N)
Lower Right ( 528000.468, 417703.377) ( 82d41'55.54"W, 27d45'32.28"N)
Center ( 519850.468, 426403.377) ( 82d46'50.64"W, 27d50'16.99"N)
Band 1 Block=163x50 Type=Byte, ColorInterp=Gray
NoData Value=15
""".format(self.rs_path)
# Data
info_dyn = [line.strip() for line in self.rs.info.split('\n') if line.strip() != '']
info_ref = [line.strip() for line in gdalinfo.split('\n') if line.strip() != '']
self.assertEqual(info_dyn, info_ref)
def test_compressed_file_based_raster_creation(self):
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
# Make a compressed copy of an existing raster.
compressed = self.rs.warp({'papsz_options': {'compress': 'packbits'}, 'name': rstfile.name})
# Check physically if compression worked.
self.assertLess(os.path.getsize(compressed.name), os.path.getsize(self.rs.name))
if GDAL_VERSION > (1, 11):
# Create file-based raster with options from scratch.
compressed = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 40,
'height': 40,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(40 ^ 2),
'nodata_value': 255,
}],
'papsz_options': {
'compress': 'packbits',
'pixeltype': 'signedbyte',
'blockxsize': 23,
'blockysize': 23,
}
})
# Check if options used on creation are stored in metadata.
# Reopening the raster ensures that all metadata has been written
# to the file.
compressed = GDALRaster(compressed.name)
self.assertEqual(compressed.metadata['IMAGE_STRUCTURE']['COMPRESSION'], 'PACKBITS',)
self.assertEqual(compressed.bands[0].metadata['IMAGE_STRUCTURE']['PIXELTYPE'], 'SIGNEDBYTE')
if GDAL_VERSION >= (2, 1):
self.assertIn('Block=40x23', compressed.info)
def test_raster_warp(self):
# Create in memory raster
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'sourceraster',
'width': 4,
'height': 4,
'nr_of_bands': 1,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 255,
}],
})
# Test altering the scale, width, and height of a raster
data = {
'scale': [200, -200],
'width': 2,
'height': 2,
}
target = source.warp(data)
self.assertEqual(target.width, data['width'])
self.assertEqual(target.height, data['height'])
self.assertEqual(target.scale, data['scale'])
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertEqual(target.name, 'sourceraster_copy.MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(result, [5, 7, 13, 15])
# Test altering the name and datatype (to float)
data = {
'name': '/path/to/targetraster.tif',
'datatype': 6,
}
target = source.warp(data)
self.assertEqual(target.bands[0].datatype(), 6)
self.assertEqual(target.name, '/path/to/targetraster.tif')
self.assertEqual(target.driver.name, 'MEM')
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
self.assertEqual(
result,
[0.0, 1.0, 2.0, 3.0,
4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0,
12.0, 13.0, 14.0, 15.0]
)
def test_raster_warp_nodata_zone(self):
# Create in memory raster.
source = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'width': 4,
'height': 4,
'srid': 3086,
'origin': (500000, 400000),
'scale': (100, -100),
'skew': (0, 0),
'bands': [{
'data': range(16),
'nodata_value': 23,
}],
})
# Warp raster onto a location that does not cover any pixels of the original.
result = source.warp({'origin': (200000, 200000)}).bands[0].data()
if numpy:
result = result.flatten().tolist()
# The result is an empty raster filled with the correct nodata value.
self.assertEqual(result, [23] * 16)
def test_raster_transform(self):
# Prepare tempfile and nodata value
rstfile = tempfile.NamedTemporaryFile(suffix='.tif')
ndv = 99
# Create in file based raster
source = GDALRaster({
'datatype': 1,
'driver': 'tif',
'name': rstfile.name,
'width': 5,
'height': 5,
'nr_of_bands': 1,
'srid': 4326,
'origin': (-5, 5),
'scale': (2, -2),
'skew': (0, 0),
'bands': [{
'data': range(25),
'nodata_value': ndv,
}],
})
# Transform raster into srid 4326.
target = source.transform(3086)
# Reload data from disk
target = GDALRaster(target.name)
self.assertEqual(target.srs.srid, 3086)
self.assertEqual(target.width, 7)
self.assertEqual(target.height, 7)
self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype())
self.assertAlmostEqual(target.origin[0], 9124842.791079799)
self.assertAlmostEqual(target.origin[1], 1589911.6476407414)
self.assertAlmostEqual(target.scale[0], 223824.82664250192)
self.assertAlmostEqual(target.scale[1], -223824.82664250192)
self.assertEqual(target.skew, [0, 0])
result = target.bands[0].data()
if numpy:
result = result.flatten().tolist()
# The reprojection of a raster that spans over a large area
# skews the data matrix and might introduce nodata values.
self.assertEqual(
result,
[
ndv, ndv, ndv, ndv, 4, ndv, ndv,
ndv, ndv, 2, 3, 9, ndv, ndv,
ndv, 1, 2, 8, 13, 19, ndv,
0, 6, 6, 12, 18, 18, 24,
ndv, 10, 11, 16, 22, 23, ndv,
ndv, ndv, 15, 21, 22, ndv, ndv,
ndv, ndv, 20, ndv, ndv, ndv, ndv,
]
)
class GDALBandTests(SimpleTestCase):
def setUp(self):
self.rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif')
rs = GDALRaster(self.rs_path)
self.band = rs.bands[0]
def test_band_data(self):
pam_file = self.rs_path + '.aux.xml'
self.assertEqual(self.band.width, 163)
self.assertEqual(self.band.height, 174)
self.assertEqual(self.band.description, '')
self.assertEqual(self.band.datatype(), 1)
self.assertEqual(self.band.datatype(as_string=True), 'GDT_Byte')
self.assertEqual(self.band.color_interp(), 1)
self.assertEqual(self.band.color_interp(as_string=True), 'GCI_GrayIndex')
self.assertEqual(self.band.nodata_value, 15)
if numpy:
data = self.band.data()
assert_array = numpy.loadtxt(
os.path.join(os.path.dirname(__file__), '../data/rasters/raster.numpy.txt')
)
numpy.testing.assert_equal(data, assert_array)
self.assertEqual(data.shape, (self.band.height, self.band.width))
try:
smin, smax, smean, sstd = self.band.statistics(approximate=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.842331288343558)
self.assertAlmostEqual(sstd, 2.3965567248965356)
smin, smax, smean, sstd = self.band.statistics(approximate=False, refresh=True)
self.assertEqual(smin, 0)
self.assertEqual(smax, 9)
self.assertAlmostEqual(smean, 2.828326634228898)
self.assertAlmostEqual(sstd, 2.4260526986669095)
self.assertEqual(self.band.min, 0)
self.assertEqual(self.band.max, 9)
self.assertAlmostEqual(self.band.mean, 2.828326634228898)
self.assertAlmostEqual(self.band.std, 2.4260526986669095)
# Statistics are persisted into PAM file on band close
self.band = None
self.assertTrue(os.path.isfile(pam_file))
finally:
# Close band and remove file if created
self.band = None
if os.path.isfile(pam_file):
os.remove(pam_file)
def test_read_mode_error(self):
# Open raster in read mode
rs = GDALRaster(self.rs_path, write=False)
band = rs.bands[0]
# Setting attributes in write mode raises exception in the _flush method
with self.assertRaises(GDALException):
setattr(band, 'nodata_value', 10)
def test_band_data_setters(self):
# Create in-memory raster and get band
rsmem = GDALRaster({
'datatype': 1,
'driver': 'MEM',
'name': 'mem_rst',
'width': 10,
'height': 10,
'nr_of_bands': 1,
'srid': 4326,
})
bandmem = rsmem.bands[0]
# Set nodata value
bandmem.nodata_value = 99
self.assertEqual(bandmem.nodata_value, 99)
# Set data for entire dataset
bandmem.data(range(100))
if numpy:
numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10))
else:
self.assertEqual(bandmem.data(), list(range(100)))
# Prepare data for setting values in subsequent tests
block = list(range(100, 104))
packed_block = struct.pack('<' + 'B B B B', *block)
# Set data from list
bandmem.data(block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from packed block
bandmem.data(packed_block, (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytes
bandmem.data(bytes(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from bytearray
bandmem.data(bytearray(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from memoryview
bandmem.data(memoryview(packed_block), (1, 1), (2, 2))
result = bandmem.data(offset=(1, 1), size=(2, 2))
if numpy:
numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2))
else:
self.assertEqual(result, block)
# Set data from numpy array
if numpy:
bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2))
numpy.testing.assert_equal(
bandmem.data(offset=(1, 1), size=(2, 2)),
numpy.array(block).reshape(2, 2)
)
# Test json input data
rsmemjson = GDALRaster(JSON_RASTER)
bandmemjson = rsmemjson.bands[0]
if numpy:
numpy.testing.assert_equal(
bandmemjson.data(),
numpy.array(range(25)).reshape(5, 5)
)
else:
self.assertEqual(bandmemjson.data(), list(range(25)))
def test_band_statistics_automatic_refresh(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 2,
'height': 2,
'bands': [{'data': [0] * 4, 'nodata_value': 99}],
})
band = rsmem.bands[0]
# Populate statistics cache
self.assertEqual(band.statistics(), (0, 0, 0, 0))
# Change data
band.data([1, 1, 0, 0])
# Statistics are properly updated
self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5))
# Change nodata_value
band.nodata_value = 0
# Statistics are properly updated
self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0))
def test_band_statistics_empty_band(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 1,
'height': 1,
'bands': [{'data': [0], 'nodata_value': 0}],
})
self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None))
def test_band_delete_nodata(self):
rsmem = GDALRaster({
'srid': 4326,
'width': 1,
'height': 1,
'bands': [{'data': [0], 'nodata_value': 1}],
})
if GDAL_VERSION < (2, 1):
msg = 'GDAL >= 2.1 required to delete nodata values.'
with self.assertRaisesMessage(ValueError, msg):
rsmem.bands[0].nodata_value = None
else:
rsmem.bands[0].nodata_value = None
self.assertIsNone(rsmem.bands[0].nodata_value)
def test_band_data_replication(self):
band = GDALRaster({
'srid': 4326,
'width': 3,
'height': 3,
'bands': [{'data': range(10, 19), 'nodata_value': 0}],
}).bands[0]
# Variations for input (data, shape, expected result).
combos = (
([1], (1, 1), [1] * 9),
(range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]),
(range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]),
)
for combo in combos:
band.data(combo[0], shape=combo[1])
if numpy:
numpy.testing.assert_equal(band.data(), numpy.array(combo[2]).reshape(3, 3))
else:
self.assertEqual(band.data(), list(combo[2]))
| nesdis/djongo | tests/django_tests/tests/v21/tests/gis_tests/gdal_tests/test_raster.py | Python | agpl-3.0 | 28,256 |
#!/usr/bin/python3
# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing pseudo-XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates pseudo-XML from standalone reStructuredText '
'sources (for testing purposes). ' + default_description)
publish_cmdline(description=description)
| superdesk/Live-Blog | documentor/libraries/docutils-0.9.1-py3.2/EGG-INFO/scripts/rst2pseudoxml.py | Python | agpl-3.0 | 601 |
import urllib2
import urllib
import sys
import os
import re
def writeLog(msg):
global log
log += msg + '\r\n'
print msg
# log http redirects
class MyHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
newurl = None
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
writeLog('Redirecting to %s' % newurl)
return urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
def getUrlData(url, requestHeaders={}):
url = url.replace('adstitchdemo.kaltura.com', 'localhost:1337')
writeLog('Getting %s...' % url)
request = urllib2.Request(url, headers=requestHeaders)
f = urllib2.urlopen(request)
cookies = []
for curHeader in f.info().headers:
splittedHeader = curHeader.split(':', 1)
if splittedHeader[0].strip().lower() == 'set-cookie':
cookies.append(splittedHeader[1].strip())
return (cookies, f.read())
def saveFlavorManifestDebugInfo(url, outputFile, segmentCount):
global log
cookies, origManifest = getUrlData(url)
requestHeaders = {'Cookie': '; '.join(cookies)}
# parse the manifest
tsSegments = []
manifestHeader = []
flavorInfo = ''
for curLine in origManifest.split('\n'):
curLine = curLine.strip()
if len(curLine) == 0:
continue
if curLine.startswith('#EXT-X-ENDLIST'):
continue
elif curLine.startswith('#EXTINF:') or curLine.startswith('#EXT-X-DISCONTINUITY'):
flavorInfo += '\n' + curLine
elif curLine[0] != '#':
segIds = re.findall('segment(\d+)_', curLine)
if len(segIds) > 0:
segId = int(segIds[0])
else:
segId = None
tsSegments.append([flavorInfo, curLine, segId])
flavorInfo = ''
else:
manifestHeader.append(curLine)
tsSegments = tsSegments[-segmentCount:]
# get original URLs
segId0 = None
for curIndex in xrange(len(tsSegments)):
if tsSegments[curIndex][-1] != None:
segId0 = tsSegments[curIndex][-1] - curIndex
origUrl = tsSegments[curIndex][1]
break
for curIndex in xrange(len(tsSegments)):
if segId0 != None and tsSegments[curIndex][-1] == None:
tsSegments[curIndex][-1] = re.sub('segment(\d+)_', 'segment%s_' % (segId0 + curIndex), origUrl)
else:
tsSegments[curIndex][-1] = None
# get the TS files and generate the manifest
origManifest = resultManifest = '\n'.join(manifestHeader)
curIndex = 1
for flavorInfo, tsUrl, origTsUrl in tsSegments:
tsOutputFile = '%s-%s.ts' % (os.path.splitext(outputFile)[0], curIndex)
_, tsData = getUrlData(tsUrl, requestHeaders)
writeLog('Saving to %s' % tsOutputFile)
file(tsOutputFile, 'wb').write(tsData)
if origTsUrl != None:
origTsOutputFile = '%s-%s-orig.ts' % (os.path.splitext(outputFile)[0], curIndex)
_, tsData = getUrlData(origTsUrl, requestHeaders)
writeLog('Saving to %s' % origTsOutputFile)
file(origTsOutputFile, 'wb').write(tsData)
else:
origTsOutputFile = tsOutputFile
origManifest += '%s\n%s' % (flavorInfo, os.path.basename(origTsOutputFile))
resultManifest += '%s\n%s' % (flavorInfo, os.path.basename(tsOutputFile))
curIndex += 1
resultManifest += '\n#EXT-X-ENDLIST\n'
origManifest += '\n#EXT-X-ENDLIST\n'
# write the manifests and the log
file(outputFile, 'wb').write(resultManifest)
splittedOutputFile = os.path.splitext(outputFile)
file(splittedOutputFile[0] + '-orig' + splittedOutputFile[1], 'wb').write(origManifest)
file(outputFile + '.log', 'wb').write(log)
log = ''
if len(sys.argv) < 3:
print 'Usage\n\tpython downloadM3U8.py <url> <output m3u8> [<segment count>]'
sys.exit(1)
url, outputFile = sys.argv[1:3]
segmentCount = 10
if len(sys.argv) >= 4:
segmentCount = int(sys.argv[3])
log = ''
cookieprocessor = urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(MyHTTPRedirectHandler, cookieprocessor)
urllib2.install_opener(opener)
_, origManifest = getUrlData(url)
if not '#EXT-X-STREAM-INF:' in origManifest:
# flavor manifest
saveFlavorManifestDebugInfo(url, outputFile, segmentCount)
sys.exit(0)
# master manifest
curIndex = 0
outputFilePath, outputFileBase = os.path.split(outputFile)
for curLine in origManifest.split('\n'):
curLine = curLine.strip()
if len(curLine) == 0 or curLine[0] == '#':
continue
curPath = os.path.join(outputFilePath, str(curIndex))
try:
os.mkdir(curPath)
except OSError:
pass
saveFlavorManifestDebugInfo(curLine, os.path.join(curPath, outputFileBase), segmentCount)
curIndex += 1
file(outputFile, 'wb').write(origManifest)
| jessp01/play-server | poc/utils/debug/debugStream.py | Python | agpl-3.0 | 4,577 |
# -*- coding: utf-8 -*-
# Copyright 2021 FactorLibre - Rodrigo Bonilla <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import test_l10n_es_aeat_sii
| JuanjoA/l10n-spain | l10n_es_aeat_sii_oss/tests/__init__.py | Python | agpl-3.0 | 206 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import crm_partner_binding
import crm_lead_to_opportunity
import crm_merge_opportunities
| tvtsoft/odoo8 | addons/crm/wizard/__init__.py | Python | agpl-3.0 | 189 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import logging
from django.core.urlresolvers import reverse
from wger.core.tests.base_testcase import WorkoutManagerTestCase
from wger.weight.models import WeightEntry
logger = logging.getLogger(__name__)
class WeightCsvImportTestCase(WorkoutManagerTestCase):
'''
Test case for the CSV import for weight entries
'''
def import_csv(self):
'''
Helper function to test the CSV import
'''
response = self.client.get(reverse('weight:import-csv'))
self.assertEqual(response.status_code, 200)
# Do a direct post request
# 1st step
count_before = WeightEntry.objects.count()
csv_input = '''Datum Gewicht KJ
05.01.10 error here 111
22.01.aa 69,2 222
27.01.10 69,6 222
02.02.10 69 222
11.02.10 70,4 222
19.02.10 71 222
26.02.10 71,9 222
26.02.10 71,9 222
19.03.10 72 222'''
response = self.client.post(reverse('weight:import-csv'),
{'stage': 1,
'csv_input': csv_input,
'date_format': '%d.%m.%y'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['weight_list']), 6)
self.assertEqual(len(response.context['error_list']), 4)
hash_value = response.context['hash_value']
# 2nd. step
response = self.client.post(reverse('weight:import-csv'),
{'stage': 2,
'hash': hash_value,
'csv_input': csv_input,
'date_format': '%d.%m.%y'})
count_after = WeightEntry.objects.count()
self.assertEqual(response.status_code, 302)
self.assertGreater(count_after, count_before)
def test_import_csv_loged_in(self):
'''
Test deleting a category by a logged in user
'''
self.user_login('test')
self.import_csv()
| DeveloperMal/wger | wger/weight/tests/test_csv_import.py | Python | agpl-3.0 | 2,631 |
__author__ = 'Cosmo Harrigan'
from flask import Flask, request
from flask_restful import Api
from flask_cors import CORS
from apiatomcollection import *
from apitypes import *
from apishell import *
from apischeme import *
from apighost import *
from flask_restful_swagger import swagger
class RESTAPI(object):
"""
REST API for OpenCog
Implemented using the Flask micro-framework and Flask-RESTful extension
Documentation:
http://wiki.opencog.org/w/REST_API
Prerequisites:
Flask, mock, flask-restful, six, flask-restful-swagger
Default endpoint: http://127.0.0.1:5000/api/v1.1/
(Replace 127.0.0.1 with the IP address of the server if necessary)
Example request: http://127.0.0.1:5000/api/v1.1/atoms?type=ConceptNode
See: opencog/python/web/api/exampleclient.py for detailed examples of
usage, and review the method definitions in each resource for request/
response specifications.
"""
def __init__(self, atomspace):
self.atomspace = atomspace
# Initialize the web server and set the routing
self.app = Flask(__name__, static_url_path="")
self.api = swagger.docs(Api(self.app), apiVersion='1.1', api_spec_url='/api/v1.1/spec')
# Allow Cross Origin Resource Sharing (CORS) so that javascript apps
# can use this API from other domains, ports and protocols.
self.cors = CORS(self.app, resources={r"/api/*": {"origins": "*"}})
# Create and add each resource
atom_collection_api = AtomCollectionAPI.new(self.atomspace)
atom_types_api = TypesAPI
shell_api = ShellAPI
scheme_api = SchemeAPI.new(self.atomspace)
ghost_api = GhostApi.new(self.atomspace)
self.api.decorators=[cors.crossdomain(origin='*', automatic_options=False)]
self.api.add_resource(atom_collection_api,
'/api/v1.1/atoms',
'/api/v1.1/atoms/<int:id>', endpoint='atoms')
self.api.add_resource(atom_types_api,
'/api/v1.1/types',
endpoint='types')
self.api.add_resource(shell_api,
'/api/v1.1/shell',
endpoint='shell')
self.api.add_resource(scheme_api,
'/api/v1.1/scheme',
endpoint='scheme')
self.api.add_resource(ghost_api,
'/api/v1.1/ghost',
endpoint='ghost')
def run(self, host='127.0.0.1', port=5000):
"""
Runs the REST API
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``
"""
self.app.run(debug=False, host=host, port=port)
def test(self):
"""
Returns a test client for the REST API
"""
return self.app.test_client()
| misgeatgit/opencog | opencog/python/web/api/apimain.py | Python | agpl-3.0 | 3,098 |
# Generated by Django 2.2.5 on 2020-01-09 16:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('salesforce', '0030_auto_20191220_1353'),
]
operations = [
migrations.AddField(
model_name='partner',
name='image_1',
field=models.ImageField(blank=True, null=True, upload_to='partner_images/'),
),
migrations.AddField(
model_name='partner',
name='image_2',
field=models.ImageField(blank=True, null=True, upload_to='partner_images/'),
),
migrations.AddField(
model_name='partner',
name='image_3',
field=models.ImageField(blank=True, null=True, upload_to='partner_images/'),
),
migrations.AddField(
model_name='partner',
name='rich_description',
field=models.TextField(blank=True, null=True),
),
]
| Connexions/openstax-cms | salesforce/migrations/0031_auto_20200109_1048.py | Python | agpl-3.0 | 979 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Matio(AutotoolsPackage):
"""matio is an C library for reading and writing Matlab MAT files"""
homepage = "https://sourceforge.net/projects/matio/"
git = "https://github.com/tbeu/matio"
url = "https://github.com/tbeu/matio/releases/download/v1.5.9/matio-1.5.9.tar.gz"
version('1.5.17', sha256='5e455527d370ab297c4abe5a2ab4d599c93ac7c1a0c85d841cc5c22f8221c400')
version('1.5.16', sha256='47ba3d5d269d5709b8d9a7385c88c8b5fb5ff875ef781a1ced4892b5b03c4f44')
version('1.5.15', sha256='21bf4587bb7f0231dbb4fcc88728468f1764c06211d5a0415cd622036f09b1cf')
version('1.5.14', sha256='0b3abb98f5cd75627122a72522db4e4280eb580bdbeafe90a8a0d2df22801f6e')
version('1.5.13', sha256='feadb2f54ba7c9db6deba8c994e401d7a1a8e7afd0fe74487691052b8139e5cb')
version('1.5.12', sha256='8695e380e465056afa5b5e20128935afe7d50e03830f9f7778a72e1e1894d8a9')
version('1.5.11', sha256='0ccced0c55c9c2cdc21348b7e16447843402d729ffaadd6135767faad7c9cf0b')
version('1.5.10', sha256='41209918cebd8cc87a4aa815fed553610911be558f027aee54af8b599c78b501')
version('1.5.9', sha256='beb7f965831ec5b4ef43f8830ee1ef1c121cd98e11b0f6e1d98713d9f860c05c')
version('1.5.8', sha256='6e49353d1d9d5127696f2e67b46cf9a1dc639663283c9bc4ce5280489c03e1f0')
version('1.5.7', sha256='84b9a17ada1ee08374fb47cc2d0e10a95b8f7f603b58576239f90b8c576fad48')
version('1.5.6', sha256='39a6e6a40d9fd8d707f4494483b8df30ffd617ba0a19c663e3685ad55ff55878')
version('1.5.5', sha256='72f00cc3cd8f7603c48867887cef886289f1f04a30f1c777aeef0b9ddd7d9c9d')
version('1.5.4', sha256='90d16dfea9070d241ef5818fee2345aee251a3c55b86b5d0314967e61fcd18ef')
version('1.5.3', sha256='85ba46e192331473dc4d8a9d266679f8f81e60c06debdc4b6f9d7906bad46257')
version('1.5.2', sha256='db02d0fb3373c3d766a606309b17e64a5d8da55610e921a9f1a0ec171e911d45')
variant("zlib", default=True,
description='support for compressed mat files')
variant("hdf5", default=True,
description='support for version 7.3 mat files via hdf5')
variant("shared", default=True, description='Enables the build of shared libraries.')
depends_on("zlib", when="+zlib")
depends_on("hdf5", when="+hdf5")
def configure_args(self):
args = []
if '+zlib' in self.spec:
args.append("--with-zlib=%s" % self.spec['zlib'].prefix)
if '+hdf5' in self.spec:
args.append("--with-hdf5=%s" % self.spec['hdf5'].prefix)
if '+shared' not in self.spec:
args.append("--disable-shared")
return args
def patch(self):
if self.spec.satisfies('%nvhpc'):
# workaround anonymous version tag linker error for the NVIDIA
# compilers
filter_file('${wl}-version-script '
'${wl}$output_objdir/$libname.ver', '',
'configure', string=True)
| iulian787/spack | var/spack/repos/builtin/packages/matio/package.py | Python | lgpl-2.1 | 3,139 |
# This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import re
from tornado.testing import AsyncHTTPTestCase
from gosa.common.gjson import dumps
from tornado.web import decode_signed_value
class RemoteTestCase(AsyncHTTPTestCase):
def setUp(self):
super(RemoteTestCase, self).setUp()
self.__cookies = ''
self._xsrf = None
self.session_id = None
def _update_cookies(self, headers):
try:
raw = headers['Set-Cookie']
# remove expires + path
raw = re.sub(r"; expires=[^;]+;", "", raw)
raw = re.sub(r";? Path=[^,]+,", ";", raw)
# last path
raw = re.sub(r";? Path=[^,]$", "", raw)
for cookie in raw.split(";"):
(key, value) = cookie.split("=", 1)
if key == "_xsrf":
self._xsrf = value
elif key == "REMOTE_SESSION":
tmp = decode_signed_value('TecloigJink4', 'REMOTE_SESSION', value)
self.session_id = tmp.decode('ascii') if tmp else None
self.__cookies = raw
except KeyError:
return
def fetch(self, url, **kw):
headers = kw.pop('headers', {})
if self.__cookies != '':
headers['Cookie'] = self.__cookies
if self._xsrf:
headers['X-XSRFToken'] = self._xsrf
if len(headers['Cookie'])>0 and '_xsrf' not in headers['Cookie']:
headers['Cookie'] = "%s;%s=%s" % (headers['Cookie'], '_xsrf', self._xsrf)
# if 'body' in kw:
# print("URL: {}, Body: {}, Headers: {}".format(url, kw['body'] , headers))
# else:
# print("URL: {}, Headers: {}".format(url, headers))
resp = AsyncHTTPTestCase.fetch(self, url, headers=headers, **kw)
self._update_cookies(resp.headers)
return resp
def fetch_async(self, url, **kw):
header = kw.pop('headers', {})
if self.__cookies != '':
header['Cookie'] = self.__cookies
if self._xsrf:
header['X-XSRFToken'] = self._xsrf
if len(header['Cookie'])>0 and '_xsrf' not in header['Cookie']:
header['Cookie'] = "%s;%s=%s" % (header['Cookie'], '_xsrf', self._xsrf)
# if 'body' in kw:
# print("URL: {}, Body: {}, Headers: {}".format(url, kw['body'] , header))
# else:
# print("URL: {}, Headers: {}".format(url, header))
return self.http_client.fetch(url, self.stop, headers=header, **kw)
def login(self):
# fetch the xsrf cookie
self.fetch('/rpc', method='GET')
data = dumps({
"id": 0,
"method": "login",
"params": ["admin", "tester"]
})
# login
return self.fetch('/rpc',
method='POST',
body=data
)
| gonicus/gosa | common/src/tests/RemoteTestCase.py | Python | lgpl-2.1 | 3,081 |
import os
import sys
red = "\033[31m"
reset = "\033[0m"
bright = "\033[1;32m"
dir = sys.path[0]
if len(sys.argv) >= 2:
dir = sys.argv[1]
is_test = lambda x: x.startswith('test-') and not os.path.splitext(x)[1]
tests = [f for f in os.listdir(dir) if is_test(f)]
if not tests:
print 'No tests found in "%s".' % dir
print 'Maybe you want to run "make check" build them?'
sys.exit()
for test in tests:
print bright + '==', test, '==' + reset
code = os.system(os.path.join(dir, test))
if code:
print (red + '*** Exited with return code ' +
str(code) + ' ***' + reset)
| GNOME/gtkimageview | tests/alltests.py | Python | lgpl-2.1 | 617 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This file contains code for creating spack mirror directories. A
mirror is an organized hierarchy containing specially named archive
files. This enabled spack to know where to find files in a mirror if
the main server for a particular package is down. Or, if the computer
where spack is run is not connected to the internet, it allows spack
to download packages directly from a mirror (e.g., on an intranet).
"""
import sys
import os
import llnl.util.tty as tty
from llnl.util.filesystem import *
import spack
import spack.error
import spack.url as url
import spack.fetch_strategy as fs
from spack.spec import Spec
from spack.version import *
from spack.util.compression import allowed_archive
def mirror_archive_filename(spec, fetcher, resourceId=None):
"""Get the name of the spec's archive in the mirror."""
if not spec.version.concrete:
raise ValueError("mirror.path requires spec with concrete version.")
if isinstance(fetcher, fs.URLFetchStrategy):
if fetcher.expand_archive:
# If we fetch with a URLFetchStrategy, use URL's archive type
ext = url.determine_url_file_extension(fetcher.url)
# If the filename does not end with a normal suffix,
# see if the package explicitly declares the extension
if not ext:
ext = spec.package.versions[spec.package.version].get(
'extension', None)
if ext:
# Remove any leading dots
ext = ext.lstrip('.')
if not ext:
msg = """\
Unable to parse extension from {0}.
If this URL is for a tarball but does not include the file extension
in the name, you can explicitly declare it with the following syntax:
version('1.2.3', 'hash', extension='tar.gz')
If this URL is for a download like a .jar or .whl that does not need
to be expanded, or an uncompressed installation script, you can tell
Spack not to expand it with the following syntax:
version('1.2.3', 'hash', expand=False)
"""
raise MirrorError(msg.format(fetcher.url))
else:
# If the archive shouldn't be expanded, don't check extension.
ext = None
else:
# Otherwise we'll make a .tar.gz ourselves
ext = 'tar.gz'
if resourceId:
filename = "%s-%s" % (resourceId, spec.version) + ".%s" % ext
else:
filename = "%s-%s" % (spec.package.name, spec.version) + ".%s" % ext
return filename
def mirror_archive_path(spec, fetcher, resourceId=None):
"""Get the relative path to the spec's archive within a mirror."""
return join_path(
spec.name, mirror_archive_filename(spec, fetcher, resourceId))
def get_matching_versions(specs, **kwargs):
"""Get a spec for EACH known version matching any spec in the list."""
matching = []
for spec in specs:
pkg = spec.package
# Skip any package that has no known versions.
if not pkg.versions:
tty.msg("No safe (checksummed) versions for package %s" % pkg.name)
continue
num_versions = kwargs.get('num_versions', 0)
matching_spec = []
for i, v in enumerate(reversed(sorted(pkg.versions))):
# Generate no more than num_versions versions for each spec.
if num_versions and i >= num_versions:
break
# Generate only versions that satisfy the spec.
if v.satisfies(spec.versions):
s = Spec(pkg.name)
s.versions = VersionList([v])
s.variants = spec.variants.copy()
# This is needed to avoid hanging references during the
# concretization phase
s.variants.spec = s
matching_spec.append(s)
if not matching_spec:
tty.warn("No known version matches spec: %s" % spec)
matching.extend(matching_spec)
return matching
def suggest_archive_basename(resource):
"""Return a tentative basename for an archive.
Raises:
RuntimeError: if the name is not an allowed archive type.
"""
basename = os.path.basename(resource.fetcher.url)
if not allowed_archive(basename):
raise RuntimeError("%s is not an allowed archive tye" % basename)
return basename
def create(path, specs, **kwargs):
"""Create a directory to be used as a spack mirror, and fill it with
package archives.
Arguments:
path: Path to create a mirror directory hierarchy in.
specs: Any package versions matching these specs will be added \
to the mirror.
Keyword args:
no_checksum: If True, do not checkpoint when fetching (default False)
num_versions: Max number of versions to fetch per spec, \
if spec is ambiguous (default is 0 for all of them)
Return Value:
Returns a tuple of lists: (present, mirrored, error)
* present: Package specs that were already present.
* mirrored: Package specs that were successfully mirrored.
* error: Package specs that failed to mirror due to some error.
This routine iterates through all known package versions, and
it creates specs for those versions. If the version satisfies any spec
in the specs list, it is downloaded and added to the mirror.
"""
# Make sure nothing is in the way.
if os.path.isfile(path):
raise MirrorError("%s already exists and is a file." % path)
# automatically spec-ify anything in the specs array.
specs = [s if isinstance(s, Spec) else Spec(s) for s in specs]
# Get concrete specs for each matching version of these specs.
version_specs = get_matching_versions(
specs, num_versions=kwargs.get('num_versions', 0))
for s in version_specs:
s.concretize()
# Get the absolute path of the root before we start jumping around.
mirror_root = os.path.abspath(path)
if not os.path.isdir(mirror_root):
try:
mkdirp(mirror_root)
except OSError as e:
raise MirrorError(
"Cannot create directory '%s':" % mirror_root, str(e))
# Things to keep track of while parsing specs.
categories = {
'present': [],
'mirrored': [],
'error': []
}
# Iterate through packages and download all safe tarballs for each
for spec in version_specs:
add_single_spec(spec, mirror_root, categories, **kwargs)
return categories['present'], categories['mirrored'], categories['error']
def add_single_spec(spec, mirror_root, categories, **kwargs):
tty.msg("Adding package {pkg} to mirror".format(pkg=spec.format("$_$@")))
spec_exists_in_mirror = True
try:
with spec.package.stage:
# fetcher = stage.fetcher
# fetcher.fetch()
# ...
# fetcher.archive(archive_path)
for ii, stage in enumerate(spec.package.stage):
fetcher = stage.fetcher
if ii == 0:
# create a subdirectory for the current package@version
archive_path = os.path.abspath(join_path(
mirror_root, mirror_archive_path(spec, fetcher)))
name = spec.cformat("$_$@")
else:
resource = stage.resource
archive_path = os.path.abspath(join_path(
mirror_root,
mirror_archive_path(spec, fetcher, resource.name)))
name = "{resource} ({pkg}).".format(
resource=resource.name, pkg=spec.cformat("$_$@"))
subdir = os.path.dirname(archive_path)
mkdirp(subdir)
if os.path.exists(archive_path):
tty.msg("{name} : already added".format(name=name))
else:
spec_exists_in_mirror = False
fetcher.fetch()
if not kwargs.get('no_checksum', False):
fetcher.check()
tty.msg("{name} : checksum passed".format(name=name))
# Fetchers have to know how to archive their files. Use
# that to move/copy/create an archive in the mirror.
fetcher.archive(archive_path)
tty.msg("{name} : added".format(name=name))
if spec_exists_in_mirror:
categories['present'].append(spec)
else:
categories['mirrored'].append(spec)
except Exception as e:
if spack.debug:
sys.excepthook(*sys.exc_info())
else:
tty.warn(
"Error while fetching %s" % spec.cformat('$_$@'), e.message)
categories['error'].append(spec)
class MirrorError(spack.error.SpackError):
"""Superclass of all mirror-creation related errors."""
def __init__(self, msg, long_msg=None):
super(MirrorError, self).__init__(msg, long_msg)
| wscullin/spack | lib/spack/spack/mirror.py | Python | lgpl-2.1 | 10,271 |
#
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
r"""Module to define a class :py:class:`~BasisFamily` that associates
fitting basis sets to an orbital basis and to provide functions to
query appropriate fitting bases for any orbital basis distributed
with Psi4.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
basisfamily_list = []
class BasisFamily(object):
"""Class to associate with an orbital basis name *ornate*
the gbs file names in which the orbital basis *orbital*
(usually the coded form of *ornate*) and *jfit*, *jkfit*,
*rifit*, and *dualfit* auxiliary bases can be found.
"""
def __init__(self, ornate, orbital=None):
"""Constructor"""
# literature name of orbital basis set, e.g., aug-cc-pVTZ or 6-31+G*
self.ornate = ornate
# gbs file name of orbital basis set, e.g., aug-cc-pvtz or 6-31pgs
self.orbital = sanitize_basisname(ornate) if orbital is None else sanitize_basisname(orbital)
# gbs file name of JKFIT designed for orbital basis
self.jkfit = None
# gbs friendly file name of JFIT designed for orbital basis
self.jfit = None
# gbs file name of CFIT designed for orbital basis
self.rifit = None
# gbs file name of DUAL designed for orbital basis
self.dualfit = None
# gbs file name of JKFIT default when self.jkfit unavailable
#self.jkdef = jkdef
# gbs file name of JFIT default when self.jfit unavailable
#self.jdef = jdef
# gbs file name of CFIT default when self.rifit unavailable
#self.ridef = ridef
def __str__(self):
text = ''
text += """ ==> %s Family <==\n\n""" % (self.ornate)
text += """ Orbital basis: %s\n""" % (self.orbital)
#text += """ JK auxiliary basis: %s\n""" % (self.jkfit)
#text += """ MP2 auxiliary basis: %s\n""" % (self.rifit)
text += """ JK auxiliary basis: %s Def: %s\n""" % (self.jkfit, self.jkdef)
text += """ J auxiliary basis: %s Def: %s\n""" % (self.jfit, self.jdef)
text += """ MP2 auxiliary basis: %s Def: %s\n""" % (self.rifit, self.ridef)
text += """ DUAL auxiliary basis: %s\n""" % (self.dualfit)
text += """\n"""
return text
def name(self):
"""Function to return the ornate name of the orbital basis,
e.g., 6-311++G** for 6-311ppgss.
"""
return self.ornate
def add_jkfit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jkfit* to a BasisFamily object.
"""
self.jkfit = sanitize_basisname(fit)
def add_rifit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *rifit* to a BasisFamily object.
"""
self.rifit = sanitize_basisname(fit)
def add_dualfit(self, fit):
"""Function to add basis *fit* as associated helper basis
member *dualfit* to a BasisFamily object.
"""
self.dualfit = sanitize_basisname(fit)
def add_jfit(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jfit* to a BasisFamily object.
"""
self.jfit = sanitize_basisname(fit)
def add_jfit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jdef* to a BasisFamily object.
"""
self.jdef = sanitize_basisname(fit)
def add_jkfit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *jkdef* to a BasisFamily object.
"""
self.jkdef = sanitize_basisname(fit)
def add_rifit_default(self, fit):
"""Function to add basis *fit* as associated fitting basis
member *ridef* to a BasisFamily object.
"""
self.ridef = sanitize_basisname(fit)
def sanitize_basisname(name):
"""Function to return *name* in coded form, stripped of
characters that confuse filenames, characters into lowercase,
``+`` into ``p``, ``*`` into ``s``, and ``(``, ``)``, & ``,``
into ``_``.
"""
temp = name.lower()
temp = temp.replace('+', 'p')
temp = temp.replace('*', 's')
temp = temp.replace('(', '_')
temp = temp.replace(')', '_')
temp = temp.replace(',', '_')
return temp
def load_basis_families():
"""Function to load into the array ``basisfamily_list``
BasisFamily objects for all Psi4's standard installed bases.
"""
from .basislistdunning import load_basfam_dunning
from .basislistother import load_basfam_other
if len(basisfamily_list) == 0:
load_basfam_dunning()
load_basfam_other()
return basisfamily_list
def print_basis_families():
"""Function to print to the output file a formatted summary
of all the BasisFamily objects in ``basisfamily_list``, by
default all Psi4's standard installed bases.
"""
basisfamily_list = load_basis_families()
text = ''
for fam in basisfamily_list:
text += '%s' % (fam)
return text
def corresponding_basis(name, role='BASIS'):
"""Function to validate if the orbital basis *name* in coded or
ornate form is in Psi4's standard installed bases list. ``None``
is returned if the orbital basis is not found.
"""
role = role.upper()
basisfamily_list = load_basis_families()
for fam in basisfamily_list:
if sanitize_basisname(fam.ornate) == sanitize_basisname(name):
if role == 'ORNATE':
return fam.ornate
elif role == 'BASIS' or role == 'ORBITAL':
return fam.orbital
elif role == 'JFIT':
return fam.jfit
elif role == 'JKFIT':
return fam.jkfit
elif role == 'RIFIT':
return fam.rifit
elif role == 'DUALFIT':
return fam.dualfit
return None
| loriab/qcdb | qcdb/basislist.py | Python | lgpl-3.0 | 6,873 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('skreeg_hunter')
mobileTemplate.setLevel(66)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Herbivore Meat")
mobileTemplate.setMeatAmount(5)
mobileTemplate.setHideType("Bristley Hide")
mobileTemplate.setHideAmount(9)
mobileTemplate.setBoneType("Animal Bones")
mobileTemplate.setBoneAmount(7)
mobileTemplate.setSocialGroup("skreeg")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_skreeg.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_4')
attacks.add('bm_defensive_4')
attacks.add('bm_disease_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('skreeg_hunter', mobileTemplate)
return | agry/NGECore2 | scripts/mobiles/yavin4/skreeg_hunter.py | Python | lgpl-3.0 | 1,701 |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
MappingRule class
============================================================================
The MappingRule class is designed to make it very easy to create a rule
based on a mapping of spoken-forms to semantic values.
This class has the following parameters to customize its behavior:
- *mapping* -- mapping of spoken-forms to semantic values
- *extras* -- extras elements referenced from the compound spec
- *defaults* -- default values for the extras
- *exported* -- whether the rule is exported
- *context* -- context in which the rule will be active
Each of these parameters can be passed as a (keyword) arguments to the
constructor, or defined as a class attribute in a derived class.
Example usage
............................................................................
The MappingRule class can be used to define a voice-command as follows::
class ExampleRule(MappingRule):
mapping = {
"[feed] address [bar]": Key("a-d"),
"subscribe [[to] [this] feed]": Key("a-u"),
"paste [feed] address": Key("a-d, c-v, enter"),
"feeds | feed (list | window | win)": Key("a-d, tab:2, s-tab"),
"down [<n>] (feed | feeds)": Key("a-d, tab:2, s-tab, down:%(n)d"),
"up [<n>] (feed | feeds)": Key("a-d, tab:2, s-tab, up:%(n)d"),
"open [item]": Key("a-d, tab:2, c-s"),
"newer [<n>]": Key("a-d, tab:2, up:%(n)d"),
"older [<n>]": Key("a-d, tab:2, down:%(n)d"),
"mark all [as] read": Key("cs-r"),
"mark all [as] unread": Key("cs-u"),
"search [bar]": Key("a-s"),
"search [for] <text>": Key("a-s") + Text("%(text)s\\n"),
}
extras = [
Integer("n", 1, 20),
Dictation("text"),
]
defaults = {
"n": 1,
}
rule = ExampleRule()
grammar.add_rule(rule)
Class reference
............................................................................
"""
from six import string_types
from .rule_base import Rule
from .elements import ElementBase, Compound, Alternative
from ..actions.actions import ActionBase
#---------------------------------------------------------------------------
class MappingRule(Rule):
"""
Rule class based on a mapping of spoken-forms to semantic values.
Constructor arguments:
- *name* (*str*) -- the rule's name
- *mapping* (*dict*) -- mapping of spoken-forms to semantic
values
- *extras* (sequence) -- extras elements referenced from the
spoken-forms in *mapping*
- *defaults* (*dict*) -- default values for the extras
- *exported* (boolean) -- whether the rule is exported
- *context* (*Context*) -- context in which the rule will be active
"""
mapping = {}
extras = []
defaults = {}
context = None
_default_exported = True
#-----------------------------------------------------------------------
def __init__(self, name=None, mapping=None, extras=None, defaults=None,
exported=None, context=None):
# pylint: disable=too-many-branches
if name is None: name = self.__class__.__name__
if mapping is None: mapping = self.mapping
if extras is None: extras = self.extras
if defaults is None: defaults = self.defaults
if context is None: context = self.context
# Complex handling of exported, because of clashing use of the
# exported name at the class level: property & class-value.
if exported is not None:
pass
elif (hasattr(self.__class__, "exported")
and not isinstance(self.__class__.exported, property)):
exported = self.__class__.exported
else:
exported = self._default_exported
# Type checking of initialization values.
assert isinstance(name, string_types)
assert isinstance(mapping, dict)
for key, value in mapping.items():
assert isinstance(key, string_types)
assert isinstance(extras, (list, tuple))
for item in extras:
assert isinstance(item, ElementBase)
assert exported in (True, False)
self._name = name
self._mapping = mapping
self._extras = {element.name : element for element in extras}
self._defaults = defaults
children = []
for spec, value in self._mapping.items():
c = Compound(spec, elements=self._extras, value=value)
children.append(c)
if children: element = Alternative(children)
else: element = None
Rule.__init__(self, self._name, element, exported=exported,
context=context)
#-----------------------------------------------------------------------
@property
def specs(self):
"""
Each spoken-form in the rule.
:rtype: list
"""
return [k for k, _ in self._mapping.items()]
def value(self, node):
node = node.children[0]
value = node.value()
if hasattr(value, "copy_bind"):
# Prepare *extras* dict for passing to _copy_bind().
extras = {
"_grammar": self.grammar,
"_rule": self,
"_node": node,
}
extras.update(self._defaults)
for name, element in self._extras.items():
extra_node = node.get_child_by_name(name, shallow=True)
if extra_node:
extras[name] = extra_node.value()
elif element.has_default():
extras[name] = element.default
value = value.copy_bind(extras)
return value
def process_recognition(self, node):
"""
Process a recognition of this rule.
This method is called by the containing Grammar when
this rule is recognized. This method collects information
about the recognition and then calls
MappingRule._process_recognition.
- *node* -- The root node of the recognition parse tree.
"""
item_value = node.value()
# Prepare *extras* dict for passing to _process_recognition().
extras = {
"_grammar": self.grammar,
"_rule": self,
"_node": node,
}
extras.update(self._defaults)
for name, element in self._extras.items():
extra_node = node.get_child_by_name(name, shallow=True)
if extra_node:
extras[name] = extra_node.value()
elif element.has_default():
extras[name] = element.default
# Call the method to do the actual processing.
self._process_recognition(item_value, extras)
def _process_recognition(self, value, extras):
"""
Default recognition processing.
This is the method which should be overridden in most cases
to provide derived classes with custom recognition
processing functionality.
This default processing method takes the mapping value
from the recognition and, if it is an action, executes it
with the given extras as a dynamic values.
- *value* -- The mapping value recognized.
- *extras* -- A dictionary of all elements from the
extras list contained within this recognition.
Maps element name -> element value.
"""
if isinstance(value, ActionBase):
value.execute(extras)
elif self._log_proc:
self._log_proc.warning("%s: mapping value is not an action,"
" cannot execute.", self)
| Versatilus/dragonfly | dragonfly/grammar/rule_mapping.py | Python | lgpl-3.0 | 9,033 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from autoslug.fields import AutoSlugField
from django_markdown.models import MarkdownField
from imagekit.models import ImageSpecField, ProcessedImageField
from imagekit.processors import ResizeToFill
from slugify import Slugify
slugify_lower = Slugify(to_lower=True)
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(_('Post title'), max_length=255)
content = MarkdownField(_('Content'))
published = models.BooleanField(_('Published status'), default=False)
modified = models.DateTimeField(auto_now=True)
created = models.DateTimeField(auto_now_add=True)
banner = models.ImageField(_('Banner image'),
upload_to='post_banner_images',
null=True,
blank=True)
large_thumbnail = ImageSpecField(source='banner',
processors=[ResizeToFill(600, 600)],
format='JPEG',
options={'quality': 80})
thumbnail = ImageSpecField(source='banner',
processors=[ResizeToFill(300, 300)],
format='JPEG',
options={'quality': 80})
slug = AutoSlugField(unique=True,
populate_from='title')
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:detail', kwargs={'slug': self.slug})
| kevinmanncito/django-markdown-blog | build/lib/blog/models.py | Python | unlicense | 1,762 |
import urllib2
import json
import urllib
#https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}.jpg
for ii in range(1,25):
inp = urllib2.urlopen('https://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=76711e1fc2a9195b21fe63994864ed1c&tags=fire&content_type=1&per_page=1000&page='+str(ii)+'&format=json&nojsoncallback=1')
data = json.load(inp)
for elm in data['photos']['photo']:
while True:
try:
urllib.urlretrieve("https://farm"+str(elm['farm'])+".staticflickr.com/"+str(elm['server'])+"/"+str(elm['id'])+"_"+str(elm['secret'])+".jpg", "fire_"+str(jj)+".jpg")
except IOError:
print "not able to download image"
| JBed/Fire_Findr | 3_Training_Data/flickr_download.py | Python | apache-2.0 | 667 |
import os
import tempfile
import yaml
from test.framework.functional.base_functional_test_case import BaseFunctionalTestCase
from test.functional.job_configs import JOB_WITH_SETUP_AND_TEARDOWN
class TestConsoleOutput(BaseFunctionalTestCase):
def setUp(self):
super().setUp()
self.project_dir = tempfile.TemporaryDirectory()
def test_logs_are_still_available_after_slave_goes_offline(self):
master = self.cluster.start_master()
self.cluster.start_slave()
build_resp = master.post_new_build({
'type': 'directory',
'config': yaml.safe_load(JOB_WITH_SETUP_AND_TEARDOWN.config[os.name])['JobWithSetupAndTeardown'],
'project_directory': self.project_dir.name,
})
build_id = build_resp['build_id']
self.assertTrue(master.block_until_build_finished(build_id, timeout=30),
'The build should finish building within the timeout.')
self.assert_build_has_successful_status(build_id)
# Bring down the single slave and assert that console output for the build is still available.
self.cluster.kill_slaves()
console_output_1 = master.get_console_output(build_id=build_id, subjob_id=0, atom_id=0)
self.assertEqual(
console_output_1['content'].strip(),
'Doing subjob 1.'
)
console_output_2 = master.get_console_output(build_id=build_id, subjob_id=1, atom_id=0)
self.assertEqual(
console_output_2['content'].strip(),
'Doing subjob 2.'
)
| nickzuber/ClusterRunner | test/functional/master/test_console_output.py | Python | apache-2.0 | 1,578 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
def adam_update_numpy_amsgrad(param,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
vhat_t = np.maximum(vhat, v_t)
param_t = param - lr_t * m_t / (np.sqrt(vhat_t) + epsilon)
return param_t, m_t, v_t, vhat_t
def adam_sparse_update_numpy_amsgrad(param,
indices,
g_t,
t,
m,
v,
vhat,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7):
m_t, v_t, vhat_t, param_t = (np.copy(m), np.copy(v), np.copy(vhat),
np.copy(param))
lr_t = lr * np.sqrt(1 - beta2**(t + 1)) / (1 - beta1**(t + 1))
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = beta2 * v[indices] + (1 - beta2) * g_t * g_t
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
v_hat_t = np.maximum(vhat_t, v_t)
v_hat_t_slice = v_hat_t[indices]
param_t_slice = param[indices] - (
lr_t * (m_t_slice / (np.sqrt(v_hat_t_slice) + epsilon)))
param_t[indices] = param_t_slice
return param_t, m_t, v_t, vhat_t
def get_beta_accumulators(opt, dtype):
local_step = math_ops.cast(opt.iterations + 1, dtype)
beta_1_t = math_ops.cast(opt._get_hyper("beta_1"), dtype)
beta_1_power = math_ops.pow(beta_1_t, local_step)
beta_2_t = math_ops.cast(opt._get_hyper("beta_2"), dtype)
beta_2_power = math_ops.pow(beta_2_t, local_step)
return (beta_1_power, beta_2_power)
class AdamOptimizerTest(test.TestCase):
def testSparse(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
opt = adam.Adam()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for index_dtype in [dtypes.int32, dtypes.int64]:
with ops.Graph().as_default(), self.cached_session(
force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
optimizer = adam.Adam(3.0)
minimize_op = optimizer.minimize(g_sum, var_list=[var])
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.Adam().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 0.001
beta1 = lambda: 0.9
beta2 = lambda: 0.999
epsilon = lambda: 1e-8
if not use_callable_params:
learning_rate = learning_rate()
beta1 = beta1()
beta2 = beta2()
epsilon = epsilon()
opt = adam.Adam(learning_rate=learning_rate)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasicWithAmsgrad(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, v0hat, m1, v1, v1hat = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if not context.executing_eagerly():
self.evaluate(update)
else:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, m0, v0, v0hat = adam_update_numpy_amsgrad(
var0_np, grads0_np, t, m0, v0, v0hat)
var1_np, m1, v1, v1hat = adam_update_numpy_amsgrad(
var1_np, grads1_np, t, m1, v1, v1hat)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testSparseWithAmsgrad(self):
# dtypes.half does not work on gpu + eager.
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
m0 = np.array([[0.0], [0.0]])
v0 = np.array([[0.0], [0.0]])
v0hat = np.array([[0.0], [0.0]])
indices_np = np.array([1])
indices = constant_op.constant(indices_np, dtype=dtypes.int32)
var0_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = variables.Variable(var0_np, dtype=dtype)
aggregated_update_var = variables.Variable(var0_np, dtype=dtype)
grads0_np = np.array([[0.2]], dtype=dtype.as_numpy_dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant([0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]), constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(grads0_np, indices,
constant_op.constant([2, 1]))
opt_repeated = adam.Adam(amsgrad=True)
opt_aggregated = adam.Adam(amsgrad=True)
if not context.executing_eagerly():
repeated_update = opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
for t in range(3):
if not context.executing_eagerly():
self.evaluate(repeated_update)
self.evaluate(aggregated_update)
else:
opt_repeated.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
opt_aggregated.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
var0_np, m0, v0, v0hat = adam_sparse_update_numpy_amsgrad(
var0_np, indices_np, grads0_np, t, m0, v0, v0hat)
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(aggregated_update_var))
self.assertAllCloseAccordingToType(
self.evaluate(aggregated_update_var),
self.evaluate(repeated_index_update_var))
def testBasicWithLearningRateDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
decay = 0.5
opt = adam.Adam(
learning_rate=learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
decay=decay)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 0.001
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
beta_1 = 0.9
beta_2 = 0.999
epsilon = 1e-7
opt = adam.Adam(
learning_rate=lr_schedule,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Run 3 steps of Adam
for t in range(3):
self.evaluate(update)
lr_np = learning_rate / (1 + decay * t)
var0_np, m0, v0 = adam_update_numpy(
var0_np, grads0_np, t, m0, v0, lr=lr_np)
var1_np, m1, v1 = adam_update_numpy(
var1_np, grads1_np, t, m1, v1, lr=lr_np)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.Adam()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta_1_power))
self.assertAllCloseAccordingToType(0.999**(t + 1),
self.evaluate(beta_2_power))
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adam.Adam(1.)
opt.minimize(lambda: v1 + v2, var_list=[v1, v2])
# There should be iteration, and two unique slot variables for v1 and v2.
self.assertEqual(5, len(set(v.ref() for v in opt.variables())))
self.assertEqual(
self.evaluate(opt.variables()[0]), self.evaluate(opt.iterations))
def testSetWeightsFromV1AdamWithoutMinimize(self):
keras_v1_adam = optimizers.Adam()
keras_v2_adam = adam.Adam()
keras_v2_adam.set_weights(keras_v1_adam.get_weights())
keras_v1_iteration = keras_v1_adam.iterations
keras_v2_iteration = keras_v2_adam.iterations
self.evaluate(variables.global_variables_initializer())
self.assertEqual(
self.evaluate(keras_v1_iteration), self.evaluate(keras_v2_iteration))
def testConstructAdamWithLR(self):
opt = adam.Adam(lr=1.0)
opt_2 = adam.Adam(learning_rate=0.1, lr=1.0)
opt_3 = adam.Adam(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
| xzturn/tensorflow | tensorflow/python/keras/optimizer_v2/adam_test.py | Python | apache-2.0 | 25,146 |
# Copyright 2012-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A shortest-path forwarding application.
This is a standalone L2 switch that learns ethernet addresses
across the entire network and picks short paths between them.
You shouldn't really write an application this way -- you should
keep more state in the controller (that is, your flow tables),
and/or you should make your topology more static. However, this
does (mostly) work. :)
Depends on openflow.discovery
Works with openflow.spanning_tree
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.revent import *
from pox.lib.recoco import Timer
from collections import defaultdict
from pox.openflow.discovery import Discovery
from pox.lib.util import dpid_to_str
import time
log = core.getLogger()
# Adjacency map. [sw1][sw2] -> port from sw1 to sw2
adjacency = defaultdict(lambda:defaultdict(lambda:None))
# Switches we know of. [dpid] -> Switch
switches = {}
# ethaddr -> (switch, port)
mac_map = {}
# [sw1][sw2] -> (distance, intermediate)
path_map = defaultdict(lambda:defaultdict(lambda:(None,None)))
# Waiting path. (dpid,xid)->WaitingPath
waiting_paths = {}
# Time to not flood in seconds
FLOOD_HOLDDOWN = 5
# Flow timeouts
FLOW_IDLE_TIMEOUT = 10
FLOW_HARD_TIMEOUT = 30
# How long is allowable to set up a path?
PATH_SETUP_TIME = 4
def _calc_paths ():
"""
Essentially Floyd-Warshall algorithm
"""
def dump ():
for i in sws:
for j in sws:
a = path_map[i][j][0]
#a = adjacency[i][j]
if a is None: a = "*"
print a,
print
sws = switches.values()
path_map.clear()
for k in sws:
for j,port in adjacency[k].iteritems():
if port is None: continue
path_map[k][j] = (1,None)
path_map[k][k] = (0,None) # distance, intermediate
#dump()
for k in sws:
for i in sws:
for j in sws:
if path_map[i][k][0] is not None:
if path_map[k][j][0] is not None:
# i -> k -> j exists
ikj_dist = path_map[i][k][0]+path_map[k][j][0]
if path_map[i][j][0] is None or ikj_dist < path_map[i][j][0]:
# i -> k -> j is better than existing
path_map[i][j] = (ikj_dist, k)
#print "--------------------"
#dump()
def _get_raw_path (src, dst):
"""
Get a raw path (just a list of nodes to traverse)
"""
if len(path_map) == 0: _calc_paths()
if src is dst:
# We're here!
return []
if path_map[src][dst][0] is None:
return None
intermediate = path_map[src][dst][1]
if intermediate is None:
# Directly connected
return []
return _get_raw_path(src, intermediate) + [intermediate] + \
_get_raw_path(intermediate, dst)
def _check_path (p):
"""
Make sure that a path is actually a string of nodes with connected ports
returns True if path is valid
"""
for a,b in zip(p[:-1],p[1:]):
if adjacency[a[0]][b[0]] != a[2]:
return False
if adjacency[b[0]][a[0]] != b[2]:
return False
return True
def _get_path (src, dst, first_port, final_port):
"""
Gets a cooked path -- a list of (node,in_port,out_port)
"""
# Start with a raw path...
if src == dst:
path = [src]
else:
path = _get_raw_path(src, dst)
if path is None: return None
path = [src] + path + [dst]
# Now add the ports
r = []
in_port = first_port
for s1,s2 in zip(path[:-1],path[1:]):
out_port = adjacency[s1][s2]
r.append((s1,in_port,out_port))
in_port = adjacency[s2][s1]
r.append((dst,in_port,final_port))
assert _check_path(r), "Illegal path!"
return r
class WaitingPath (object):
"""
A path which is waiting for its path to be established
"""
def __init__ (self, path, packet):
"""
xids is a sequence of (dpid,xid)
first_switch is the DPID where the packet came from
packet is something that can be sent in a packet_out
"""
self.expires_at = time.time() + PATH_SETUP_TIME
self.path = path
self.first_switch = path[0][0].dpid
self.xids = set()
self.packet = packet
if len(waiting_paths) > 1000:
WaitingPath.expire_waiting_paths()
def add_xid (self, dpid, xid):
self.xids.add((dpid,xid))
waiting_paths[(dpid,xid)] = self
@property
def is_expired (self):
return time.time() >= self.expires_at
def notify (self, event):
"""
Called when a barrier has been received
"""
self.xids.discard((event.dpid,event.xid))
if len(self.xids) == 0:
# Done!
if self.packet:
log.debug("Sending delayed packet out %s"
% (dpid_to_str(self.first_switch),))
msg = of.ofp_packet_out(data=self.packet,
action=of.ofp_action_output(port=of.OFPP_TABLE))
core.openflow.sendToDPID(self.first_switch, msg)
core.l2_multi.raiseEvent(PathInstalled(self.path))
@staticmethod
def expire_waiting_paths ():
packets = set(waiting_paths.values())
killed = 0
for p in packets:
if p.is_expired:
killed += 1
for entry in p.xids:
waiting_paths.pop(entry, None)
if killed:
log.error("%i paths failed to install" % (killed,))
class PathInstalled (Event):
"""
Fired when a path is installed
"""
def __init__ (self, path):
Event.__init__(self)
self.path = path
class Switch (EventMixin):
def __init__ (self):
self.connection = None
self.ports = None
self.dpid = None
self._listeners = None
self._connected_at = None
def __repr__ (self):
return dpid_to_str(self.dpid)
def _install (self, switch, in_port, out_port, match, buf = None):
msg = of.ofp_flow_mod()
msg.match = match
msg.match.in_port = in_port
msg.idle_timeout = FLOW_IDLE_TIMEOUT
msg.hard_timeout = FLOW_HARD_TIMEOUT
msg.actions.append(of.ofp_action_output(port = out_port))
msg.buffer_id = buf
switch.connection.send(msg)
def _install_path (self, p, match, packet_in=None):
wp = WaitingPath(p, packet_in)
for sw,in_port,out_port in p:
self._install(sw, in_port, out_port, match)
msg = of.ofp_barrier_request()
sw.connection.send(msg)
wp.add_xid(sw.dpid,msg.xid)
def install_path (self, dst_sw, last_port, match, event):
"""
Attempts to install a path between this switch and some destination
"""
p = _get_path(self, dst_sw, event.port, last_port)
if p is None:
log.warning("Can't get from %s to %s", match.dl_src, match.dl_dst)
import pox.lib.packet as pkt
if (match.dl_type == pkt.ethernet.IP_TYPE and
event.parsed.find('ipv4')):
# It's IP -- let's send a destination unreachable
log.debug("Dest unreachable (%s -> %s)",
match.dl_src, match.dl_dst)
from pox.lib.addresses import EthAddr
e = pkt.ethernet()
e.src = EthAddr(dpid_to_str(self.dpid)) #FIXME: Hmm...
e.dst = match.dl_src
e.type = e.IP_TYPE
ipp = pkt.ipv4()
ipp.protocol = ipp.ICMP_PROTOCOL
ipp.srcip = match.nw_dst #FIXME: Ridiculous
ipp.dstip = match.nw_src
icmp = pkt.icmp()
icmp.type = pkt.ICMP.TYPE_DEST_UNREACH
icmp.code = pkt.ICMP.CODE_UNREACH_HOST
orig_ip = event.parsed.find('ipv4')
d = orig_ip.pack()
d = d[:orig_ip.hl * 4 + 8]
import struct
d = struct.pack("!HH", 0,0) + d #FIXME: MTU
icmp.payload = d
ipp.payload = icmp
e.payload = ipp
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = event.port))
msg.data = e.pack()
self.connection.send(msg)
return
log.debug("Installing path for %s -> %s %04x (%i hops)",
match.dl_src, match.dl_dst, match.dl_type, len(p))
# We have a path -- install it
self._install_path(p, match, event.ofp)
# Now reverse it and install it backwards
# (we'll just assume that will work)
p = [(sw,out_port,in_port) for sw,in_port,out_port in p]
self._install_path(p, match.flip())
def _handle_PacketIn (self, event):
def flood ():
""" Floods the packet """
if self.is_holding_down:
log.warning("Not flooding -- holddown active")
msg = of.ofp_packet_out()
# OFPP_FLOOD is optional; some switches may need OFPP_ALL
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
def drop ():
# Kill the buffer
if event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
event.ofp.buffer_id = None # Mark is dead
msg.in_port = event.port
self.connection.send(msg)
packet = event.parsed
loc = (self, event.port) # Place we saw this ethaddr
oldloc = mac_map.get(packet.src) # Place we last saw this ethaddr
if packet.effective_ethertype == packet.LLDP_TYPE:
drop()
return
if oldloc is None:
if packet.src.is_multicast == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif oldloc != loc:
# ethaddr seen at different place!
if core.openflow_discovery.is_edge_port(loc[0].dpid, loc[1]):
# New place is another "plain" port (probably)
log.debug("%s moved from %s.%i to %s.%i?", packet.src,
dpid_to_str(oldloc[0].dpid), oldloc[1],
dpid_to_str( loc[0].dpid), loc[1])
if packet.src.is_multicast == False:
mac_map[packet.src] = loc # Learn position for ethaddr
log.debug("Learned %s at %s.%i", packet.src, loc[0], loc[1])
elif packet.dst.is_multicast == False:
# New place is a switch-to-switch port!
# Hopefully, this is a packet we're flooding because we didn't
# know the destination, and not because it's somehow not on a
# path that we expect it to be on.
# If spanning_tree is running, we might check that this port is
# on the spanning tree (it should be).
if packet.dst in mac_map:
# Unfortunately, we know the destination. It's possible that
# we learned it while it was in flight, but it's also possible
# that something has gone wrong.
log.warning("Packet from %s to known destination %s arrived "
"at %s.%i without flow", packet.src, packet.dst,
dpid_to_str(self.dpid), event.port)
if packet.dst.is_multicast:
log.debug("Flood multicast from %s", packet.src)
flood()
else:
if packet.dst not in mac_map:
log.debug("%s unknown -- flooding" % (packet.dst,))
flood()
else:
dest = mac_map[packet.dst]
match = of.ofp_match.from_packet(packet)
self.install_path(dest[0], dest[1], match, event)
def disconnect (self):
if self.connection is not None:
log.debug("Disconnect %s" % (self.connection,))
self.connection.removeListeners(self._listeners)
self.connection = None
self._listeners = None
def connect (self, connection):
if self.dpid is None:
self.dpid = connection.dpid
assert self.dpid == connection.dpid
if self.ports is None:
self.ports = connection.features.ports
self.disconnect()
log.debug("Connect %s" % (connection,))
self.connection = connection
self._listeners = self.listenTo(connection)
self._connected_at = time.time()
@property
def is_holding_down (self):
if self._connected_at is None: return True
if time.time() - self._connected_at > FLOOD_HOLDDOWN:
return False
return True
def _handle_ConnectionDown (self, event):
self.disconnect()
class l2_multi (EventMixin):
_eventMixin_events = set([
PathInstalled,
])
def __init__ (self):
# Listen to dependencies
def startup ():
core.openflow.addListeners(self, priority=0)
core.openflow_discovery.addListeners(self)
core.call_when_ready(startup, ('openflow','openflow_discovery'))
def _handle_LinkEvent (self, event):
def flip (link):
return Discovery.Link(link[2],link[3], link[0],link[1])
l = event.link
sw1 = switches[l.dpid1]
sw2 = switches[l.dpid2]
# Invalidate all flows and path info.
# For link adds, this makes sure that if a new link leads to an
# improved path, we use it.
# For link removals, this makes sure that we don't use a
# path that may have been broken.
#NOTE: This could be radically improved! (e.g., not *ALL* paths break)
clear = of.ofp_flow_mod(command=of.OFPFC_DELETE)
for sw in switches.itervalues():
if sw.connection is None: continue
sw.connection.send(clear)
path_map.clear()
if event.removed:
# This link no longer okay
if sw2 in adjacency[sw1]: del adjacency[sw1][sw2]
if sw1 in adjacency[sw2]: del adjacency[sw2][sw1]
# But maybe there's another way to connect these...
for ll in core.openflow_discovery.adjacency:
if ll.dpid1 == l.dpid1 and ll.dpid2 == l.dpid2:
if flip(ll) in core.openflow_discovery.adjacency:
# Yup, link goes both ways
adjacency[sw1][sw2] = ll.port1
adjacency[sw2][sw1] = ll.port2
# Fixed -- new link chosen to connect these
break
else:
# If we already consider these nodes connected, we can
# ignore this link up.
# Otherwise, we might be interested...
if adjacency[sw1][sw2] is None:
# These previously weren't connected. If the link
# exists in both directions, we consider them connected now.
if flip(l) in core.openflow_discovery.adjacency:
# Yup, link goes both ways -- connected!
adjacency[sw1][sw2] = l.port1
adjacency[sw2][sw1] = l.port2
# If we have learned a MAC on this port which we now know to
# be connected to a switch, unlearn it.
bad_macs = set()
for mac,(sw,port) in mac_map.iteritems():
if sw is sw1 and port == l.port1: bad_macs.add(mac)
if sw is sw2 and port == l.port2: bad_macs.add(mac)
for mac in bad_macs:
log.debug("Unlearned %s", mac)
del mac_map[mac]
def _handle_ConnectionUp (self, event):
sw = switches.get(event.dpid)
if sw is None:
# New switch
sw = Switch()
switches[event.dpid] = sw
sw.connect(event.connection)
else:
sw.connect(event.connection)
def _handle_BarrierIn (self, event):
wp = waiting_paths.pop((event.dpid,event.xid), None)
if not wp:
#log.info("No waiting packet %s,%s", event.dpid, event.xid)
return
#log.debug("Notify waiting packet %s,%s", event.dpid, event.xid)
wp.notify(event)
def launch ():
core.registerNew(l2_multi)
timeout = min(max(PATH_SETUP_TIME, 5) * 2, 15)
Timer(timeout, WaitingPath.expire_waiting_paths, recurring=True)
| damomeen/pox-datapath | pox/forwarding/l2_multi.py | Python | apache-2.0 | 15,558 |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import json
import os
import re
import shutil
import subprocess
# pylint: disable=import-error
import ruamel.yaml as yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/label -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_label
short_description: Create, modify, and idempotently manage openshift labels.
description:
- Modify openshift labels programmatically.
options:
state:
description:
- State controls the action that will be taken with resource
- 'present' will create or update and object to the desired state
- 'absent' will ensure certain labels are removed
- 'list' will read the labels
- 'add' will insert labels to the already existing labels
default: present
choices: ["present", "absent", "list", "add"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
kind:
description:
- The kind of object that can be managed.
default: node
choices:
- node
- pod
- namespace
aliases: []
labels:
description:
- A list of labels for the resource.
- Each list consists of a key and a value.
- eg, {'key': 'foo', 'value': 'bar'}
required: false
default: None
aliases: []
selector:
description:
- The selector to apply to the resource query
required: false
default: None
aliases: []
author:
- "Joel Diaz <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: Add a single label to a node's existing labels
oc_label:
name: ip-172-31-5-23.ec2.internal
state: add
kind: node
labels:
- key: logging-infra-fluentd
value: 'true'
- name: remove a label from a node
oc_label:
name: ip-172-31-5-23.ec2.internal
state: absent
kind: node
labels:
- key: color
value: blue
- name: Ensure node has these exact labels
oc_label:
name: ip-172-31-5-23.ec2.internal
state: present
kind: node
labels:
- key: color
value: green
- key: type
value: master
- key: environment
value: production
'''
# -*- -*- -*- End included fragment: doc/label -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout, stderr
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['oadm']
else:
cmds = ['oc']
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
returncode, stdout, stderr = self._run(cmds, input_data)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(value)
print(user_def[key])
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(api_values)
print(user_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_label.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCLabel(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
name,
namespace,
kind,
kubeconfig,
labels=None,
selector=None,
verbose=False):
''' Constructor for OCLabel '''
super(OCLabel, self).__init__(namespace, kubeconfig)
self.name = name
self.namespace = namespace
self.kind = kind
self.kubeconfig = kubeconfig
self.labels = labels
self._curr_labels = None
self.selector = selector
@property
def current_labels(self):
'''property for the current labels'''
if self._curr_labels is None:
results = self.get()
self._curr_labels = results['labels']
return self._curr_labels
@current_labels.setter
def current_labels(self, data):
'''property setter for current labels'''
self._curr_labels = data
def compare_labels(self, host_labels):
''' compare incoming labels against current labels'''
for label in self.labels:
if label['key'] not in host_labels or \
label['value'] != host_labels[label['key']]:
return False
return True
def all_user_labels_exist(self):
''' return whether all the labels already exist '''
for current_host_labels in self.current_labels:
rbool = self.compare_labels(current_host_labels)
if not rbool:
return False
return True
def any_label_exists(self):
''' return whether any single label already exists '''
for current_host_labels in self.current_labels:
for label in self.labels:
if label['key'] in current_host_labels:
return True
return False
def get_user_keys(self):
''' go through list of user key:values and return all keys '''
user_keys = []
for label in self.labels:
user_keys.append(label['key'])
return user_keys
def get_current_label_keys(self):
''' collect all the current label keys '''
current_label_keys = []
for current_host_labels in self.current_labels:
for key in current_host_labels.keys():
current_label_keys.append(key)
return list(set(current_label_keys))
def get_extra_current_labels(self):
''' return list of labels that are currently stored, but aren't
in user-provided list '''
extra_labels = []
user_label_keys = self.get_user_keys()
current_label_keys = self.get_current_label_keys()
for current_key in current_label_keys:
if current_key not in user_label_keys:
extra_labels.append(current_key)
return extra_labels
def extra_current_labels(self):
''' return whether there are labels currently stored that user
hasn't directly provided '''
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
return True
return False
def replace(self):
''' replace currently stored labels with user provided labels '''
cmd = self.cmd_template()
# First delete any extra labels
extra_labels = self.get_extra_current_labels()
if len(extra_labels) > 0:
for label in extra_labels:
cmd.append("{}-".format(label))
# Now add/modify the user-provided label list
if len(self.labels) > 0:
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
# --overwrite for the case where we are updating existing labels
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def get(self):
'''return label information '''
result_dict = {}
label_list = []
if self.name:
result = self._get(resource=self.kind, rname=self.name, selector=self.selector)
if 'labels' in result['results'][0]['metadata']:
label_list.append(result['results'][0]['metadata']['labels'])
else:
label_list.append({})
else:
result = self._get(resource=self.kind, selector=self.selector)
for item in result['results'][0]['items']:
if 'labels' in item['metadata']:
label_list.append(item['metadata']['labels'])
else:
label_list.append({})
self.current_labels = label_list
result_dict['labels'] = self.current_labels
result_dict['item_count'] = len(self.current_labels)
result['results'] = result_dict
return result
def cmd_template(self):
''' boilerplate oc command for modifying lables on this object '''
# let's build the cmd with what we have passed in
cmd = ["label", self.kind]
if self.selector:
cmd.extend(["--selector", self.selector])
elif self.name:
cmd.extend([self.name])
return cmd
def add(self):
''' add labels '''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}={}".format(label['key'], label['value']))
cmd.append("--overwrite")
return self.openshift_cmd(cmd)
def delete(self):
'''delete the labels'''
cmd = self.cmd_template()
for label in self.labels:
cmd.append("{}-".format(label['key']))
return self.openshift_cmd(cmd)
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode=False):
''' run the idempotent ansible code
prams comes from the ansible portion of this module
check_mode: does the module support check mode. (module.check_mode)
'''
oc_label = OCLabel(params['name'],
params['namespace'],
params['kind'],
params['kubeconfig'],
params['labels'],
params['selector'],
verbose=params['debug'])
state = params['state']
name = params['name']
selector = params['selector']
api_rval = oc_label.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
#######
# Add
#######
if state == 'add':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'add'"}
if not oc_label.all_user_labels_exist():
if check_mode:
return {'changed': False, 'msg': 'Would have performed an addition.'}
api_rval = oc_label.add()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "add"}
return {'changed': False, 'state': "add"}
########
# Delete
########
if state == 'absent':
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'absent'"}
if oc_label.any_label_exists():
if check_mode:
return {'changed': False, 'msg': 'Would have performed a delete.'}
api_rval = oc_label.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Update
########
if not (name or selector):
return {'failed': True,
'msg': "Param 'name' or 'selector' is required if state == 'present'"}
# if all the labels passed in don't already exist
# or if there are currently stored labels that haven't
# been passed in
if not oc_label.all_user_labels_exist() or \
oc_label.extra_current_labels():
if check_mode:
return {'changed': False, 'msg': 'Would have made changes.'}
api_rval = oc_label.replace()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_label.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'results': 'Unknown state passed. %s' % state,
'state': "unknown"}
# -*- -*- -*- End included fragment: class/oc_label.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_label.py -*- -*- -*-
def main():
''' ansible oc module for labels '''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list', 'add']),
debug=dict(default=False, type='bool'),
kind=dict(default='node', type='str', required=True,
choices=['node', 'pod', 'namespace']),
name=dict(default=None, type='str'),
namespace=dict(default=None, type='str'),
labels=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=(['name', 'selector']),
)
results = OCLabel.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_label.py -*- -*- -*-
| tiwillia/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.5/roles/lib_openshift/library/oc_label.py | Python | apache-2.0 | 52,074 |
"""
Classes allowing "generic" relations through ContentType and object-id fields.
"""
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import signals
from django.db import models
from django.db.models.fields.related import RelatedField, Field, ManyToManyRel
from django.db.models.loading import get_model
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet, modelformset_factory, save_instance
from django.contrib.admin.options import InlineModelAdmin, flatten_fieldsets
from django.utils.encoding import smart_unicode
class GenericForeignKey(object):
"""
Provides a generic relation to any object through content-type/object-id
fields.
"""
def __init__(self, ct_field="content_type", fk_field="object_id"):
self.ct_field = ct_field
self.fk_field = fk_field
def contribute_to_class(self, cls, name):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_virtual_field(self)
# For some reason I don't totally understand, using weakrefs here doesn't work.
signals.pre_init.connect(self.instance_pre_init, sender=cls, weak=False)
# Connect myself as the descriptor for this field
setattr(cls, name, self)
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handles initializing an object with the generic FK instaed of
content-type/object-id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
def get_content_type(self, obj=None, id=None):
# Convenience function using get_model avoids a circular import when
# using this model
ContentType = get_model("contenttypes", "contenttype")
if obj:
return ContentType.objects.get_for_model(obj)
elif id:
return ContentType.objects.get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id:
ct = self.get_content_type(id=ct_id)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError, u"%s must be accessed via instance" % self.related.opts.object_name
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRelation(RelatedField, Field):
"""Provides an accessor to generic related objects (e.g. comments)"""
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = GenericRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', True))
# By its very nature, a GenericRelation doesn't create a table.
self.creates_table = False
# Override content-type/object-id field names on the related class
self.object_id_field_name = kwargs.pop("object_id_field", "object_id")
self.content_type_field_name = kwargs.pop("content_type_field", "content_type")
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
Field.__init__(self, **kwargs)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_unicode([instance._get_pk_val() for instance in qs])
def m2m_db_table(self):
return self.rel.to._meta.db_table
def m2m_column_name(self):
return self.object_id_field_name
def m2m_reverse_name(self):
return self.rel.to._meta.pk.column
def contribute_to_class(self, cls, name):
super(GenericRelation, self).contribute_to_class(cls, name)
# Save a reference to which model this class is on for future use
self.model = cls
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self))
def contribute_to_related_class(self, cls, related):
pass
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def db_type(self):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
def extra_filters(self, pieces, pos, negate):
"""
Return an extra filter to the queryset so that the results are filtered
on the appropriate content type.
"""
if negate:
return []
ContentType = get_model("contenttypes", "contenttype")
content_type = ContentType.objects.get_for_model(self.model)
prefix = "__".join(pieces[:pos + 1])
return [("%s__%s" % (prefix, self.content_type_field_name),
content_type)]
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object
managers available as attributes on a model class, for fields that have
multiple "remote" values and have a GenericRelation defined in their model
(rather than having another model pointed *at* them). In the example
"article.publications", the publications attribute is a
ReverseGenericRelatedObjectsDescriptor instance.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# This import is done here to avoid circular import importing this module
from django.contrib.contenttypes.models import ContentType
# Dynamically create a class that subclasses the related model's
# default manager.
rel_model = self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_generic_related_manager(superclass)
qn = connection.ops.quote_name
manager = RelatedManager(
model = rel_model,
instance = instance,
symmetrical = (self.field.rel.symmetrical and instance.__class__ == rel_model),
join_table = qn(self.field.m2m_db_table()),
source_col_name = qn(self.field.m2m_column_name()),
target_col_name = qn(self.field.m2m_reverse_name()),
content_type = ContentType.objects.get_for_model(instance),
content_type_field_name = self.field.content_type_field_name,
object_id_field_name = self.field.object_id_field_name
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError, "Manager must be accessed via instance"
manager = self.__get__(instance)
manager.clear()
for obj in value:
manager.add(obj)
def create_generic_related_manager(superclass):
"""
Factory function for a manager that subclasses 'superclass' (which is a
Manager) and adds behavior for generic related objects.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_col_name=None, target_col_name=None, content_type=None,
content_type_field_name=None, object_id_field_name=None):
super(GenericRelatedObjectManager, self).__init__()
self.core_filters = core_filters or {}
self.model = model
self.content_type = content_type
self.symmetrical = symmetrical
self.instance = instance
self.join_table = join_table
self.join_table = model._meta.db_table
self.source_col_name = source_col_name
self.target_col_name = target_col_name
self.content_type_field_name = content_type_field_name
self.object_id_field_name = object_id_field_name
self.pk_val = self.instance._get_pk_val()
def get_query_set(self):
query = {
'%s__pk' % self.content_type_field_name : self.content_type.id,
'%s__exact' % self.object_id_field_name : self.pk_val,
}
return superclass.get_query_set(self).filter(**query)
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError, "'%s' instance expected" % self.model._meta.object_name
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs):
for obj in objs:
obj.delete()
remove.alters_data = True
def clear(self):
for obj in self.all():
obj.delete()
clear.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
return super(GenericRelatedObjectManager, self).create(**kwargs)
create.alters_data = True
return GenericRelatedObjectManager
class GenericRel(ManyToManyRel):
def __init__(self, to, related_name=None, limit_choices_to=None, symmetrical=True):
self.to = to
self.related_name = related_name
self.limit_choices_to = limit_choices_to or {}
self.symmetrical = symmetrical
self.multiple = True
self.through = None
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.object_name.lower(),
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(self.instance),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix
)
#@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join((opts.app_label, opts.object_name.lower(),
cls.ct_field.name, cls.ct_fk_field.name,
))
get_default_prefix = classmethod(get_default_prefix)
def save_new(self, form, commit=True):
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
kwargs = {
self.ct_field.get_attname(): ContentType.objects.get_for_model(self.instance).pk,
self.ct_fk_field.get_attname(): self.instance.pk,
}
new_obj = self.model(**kwargs)
return save_instance(form, new_obj, commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=0,
formfield_callback=lambda f: f.formfield()):
"""
Returns an ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``object_id`` if they different from the
defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# Avoid a circular import.
from django.contrib.contenttypes.models import ContentType
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.rel.to != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
return FormSet
class GenericInlineModelAdmin(InlineModelAdmin):
ct_field = "content_type"
ct_fk_field = "object_id"
formset = BaseGenericInlineFormSet
def get_formset(self, request, obj=None):
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
defaults = {
"ct_field": self.ct_field,
"fk_field": self.ct_fk_field,
"form": self.form,
"formfield_callback": self.formfield_for_dbfield,
"formset": self.formset,
"extra": self.extra,
"can_delete": self.can_delete,
"can_order": False,
"fields": fields,
"max_num": self.max_num,
"exclude": self.exclude
}
return generic_inlineformset_factory(self.model, **defaults)
class GenericStackedInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class GenericTabularInline(GenericInlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
| sanjuro/RCJK | vendor/django/contrib/contenttypes/generic.py | Python | apache-2.0 | 15,830 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import functools
import os
from collections import defaultdict
from multiprocessing import cpu_count
from pants.backend.jvm.subsystems.java import Java
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.javac_plugin import JavacPlugin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_compile.compile_context import CompileContext, DependencyContext
from pants.backend.jvm.tasks.jvm_compile.execution_graph import (ExecutionFailure, ExecutionGraph,
Job)
from pants.backend.jvm.tasks.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import TaskIdentityFingerprintStrategy
from pants.base.worker_pool import WorkerPool
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.resources import Resources
from pants.build_graph.target_scopes import Scopes
from pants.goal.products import MultipleRootedProducts
from pants.reporting.reporting_utils import items_to_report_element
from pants.util.dirutil import fast_relpath, safe_delete, safe_mkdir, safe_walk
from pants.util.fileutil import create_size_estimators
from pants.util.memo import memoized_property
class ResolvedJarAwareTaskIdentityFingerprintStrategy(TaskIdentityFingerprintStrategy):
"""Task fingerprint strategy that also includes the resolved coordinates of dependent jars."""
def __init__(self, task, classpath_products):
super(ResolvedJarAwareTaskIdentityFingerprintStrategy, self).__init__(task)
self._classpath_products = classpath_products
def compute_fingerprint(self, target):
if isinstance(target, Resources):
# Just do nothing, this kind of dependency shouldn't affect result's hash.
return None
hasher = self._build_hasher(target)
if isinstance(target, JarLibrary):
# NB: Collects only the jars for the current jar_library, and hashes them to ensure that both
# the resolved coordinates, and the requested coordinates are used. This ensures that if a
# source file depends on a library with source compatible but binary incompatible signature
# changes between versions, that you won't get runtime errors due to using an artifact built
# against a binary incompatible version resolved for a previous compile.
classpath_entries = self._classpath_products.get_artifact_classpath_entries_for_targets(
[target])
for _, entry in classpath_entries:
hasher.update(str(entry.coordinate))
return hasher.hexdigest()
def __hash__(self):
return hash((type(self), self._task.fingerprint))
def __eq__(self, other):
return (isinstance(other, ResolvedJarAwareTaskIdentityFingerprintStrategy) and
super(ResolvedJarAwareTaskIdentityFingerprintStrategy, self).__eq__(other))
class JvmCompile(NailgunTaskBase):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
size_estimators = create_size_estimators()
@classmethod
def size_estimator_by_name(cls, estimation_strategy_name):
return cls.size_estimators[estimation_strategy_name]
@staticmethod
def _analysis_for_target(analysis_dir, target):
return os.path.join(analysis_dir, target.id + '.analysis')
@staticmethod
def _portable_analysis_for_target(analysis_dir, target):
return JvmCompile._analysis_for_target(analysis_dir, target) + '.portable'
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--args', advanced=True, type=list,
default=list(cls.get_args_default(register.bootstrap)), fingerprint=True,
help='Pass these extra args to the compiler.')
register('--confs', advanced=True, type=list, default=['default'],
help='Compile for these Ivy confs.')
# TODO: Stale analysis should be automatically ignored via Task identities:
# https://github.com/pantsbuild/pants/issues/1351
register('--clear-invalid-analysis', advanced=True, type=bool,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
register('--warnings', default=True, type=bool, fingerprint=True,
help='Compile with all configured warnings enabled.')
register('--warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_warning_args_default()),
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_no_warning_args_default()),
help='Extra compiler args to use when warnings are disabled.')
register('--fatal-warnings-enabled-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_fatal_warnings_enabled_args_default()),
help='Extra compiler args to use when fatal warnings are enabled.')
register('--fatal-warnings-disabled-args', advanced=True, type=list, fingerprint=True,
default=list(cls.get_fatal_warnings_disabled_args_default()),
help='Extra compiler args to use when fatal warnings are disabled.')
register('--debug-symbols', type=bool, fingerprint=True,
help='Compile with debug symbol enabled.')
register('--debug-symbol-args', advanced=True, type=list, fingerprint=True,
default=['-C-g:lines,source,vars'],
help='Extra args to enable debug symbol.')
register('--delete-scratch', advanced=True, default=True, type=bool,
help='Leave intermediate scratch files around, for debugging build problems.')
register('--worker-count', advanced=True, type=int, default=cpu_count(),
help='The number of concurrent workers to use when '
'compiling with {task}. Defaults to the '
'current machine\'s CPU count.'.format(task=cls._name))
register('--size-estimator', advanced=True,
choices=list(cls.size_estimators.keys()), default='filesize',
help='The method of target size estimation. The size estimator estimates the size '
'of targets in order to build the largest targets first (subject to dependency '
'constraints). Choose \'random\' to choose random sizes for each target, which '
'may be useful for distributed builds.')
register('--capture-log', advanced=True, type=bool,
fingerprint=True,
help='Capture compilation output to per-target logs.')
register('--capture-classpath', advanced=True, type=bool, default=True,
fingerprint=True,
help='Capture classpath to per-target newline-delimited text files. These files will '
'be packaged into any jar artifacts that are created from the jvm targets.')
register('--unused-deps', choices=['ignore', 'warn', 'fatal'], default='warn',
fingerprint=True,
help='Controls whether unused deps are checked, and whether they cause warnings or '
'errors.')
register('--use-classpath-jars', advanced=True, type=bool, fingerprint=True,
help='Use jar files on the compile_classpath. Note: Using this option degrades '
'incremental compile between targets.')
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
round_manager.require_data('compile_classpath')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.require_data('java')
round_manager.require_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.require_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_name = None
_supports_concurrent_execution = None
@classmethod
def subsystem_dependencies(cls):
return super(JvmCompile, cls).subsystem_dependencies() + (Java, JvmPlatform, ScalaPlatform)
@classmethod
def name(cls):
return cls._name
@classmethod
def compiler_plugin_types(cls):
"""A tuple of target types which are compiler plugins."""
return ()
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@classmethod
def get_fatal_warnings_enabled_args_default(cls):
"""Override to set default for --fatal-warnings-enabled-args option."""
return ()
@classmethod
def get_fatal_warnings_disabled_args_default(cls):
"""Override to set default for --fatal-warnings-disabled-args option."""
return ()
@property
def cache_target_dirs(self):
return True
def select(self, target):
raise NotImplementedError()
def select_source(self, source_file_path):
raise NotImplementedError()
def create_analysis_tools(self):
"""Returns an AnalysisTools implementation.
Subclasses must implement.
"""
raise NotImplementedError()
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings, javac_plugins_to_exclude):
"""Invoke the compiler.
Must raise TaskError on compile failure.
Subclasses must implement.
:param list args: Arguments to the compiler (such as jmake or zinc).
:param list classpath: List of classpath entries.
:param list sources: Source files.
:param str classes_output_dir: Where to put the compiled output.
:param upstream_analysis:
:param analysis_file: Where to write the compile analysis.
:param log_file: Where to write logs.
:param JvmPlatformSettings settings: platform settings determining the -source, -target, etc for
javac to use.
:param fatal_warnings: whether to convert compilation warnings to errors.
:param javac_plugins_to_exclude: A list of names of javac plugins that mustn't be used in
this compilation, even if requested (typically because
this compilation is building those same plugins).
"""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def write_extra_resources(self, compile_context):
"""Writes any extra, out-of-band resources for a target to its classes directory.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the runtime_classpath.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
pass
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
self._targets_to_compile_settings = None
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
if self.get_options().debug_symbols:
self._args.extend(self.get_options().debug_symbol_args)
# The ivy confs for which we're building.
self._confs = self.get_options().confs
# Determines which sources are relevant to this target.
self._sources_predicate = self.select_source
self._capture_log = self.get_options().capture_log
self._delete_scratch = self.get_options().delete_scratch
self._clear_invalid_analysis = self.get_options().clear_invalid_analysis
try:
worker_count = self.get_options().worker_count
except AttributeError:
# tasks that don't support concurrent execution have no worker_count registered
worker_count = 1
self._worker_count = worker_count
self._size_estimator = self.size_estimator_by_name(self.get_options().size_estimator)
self._analysis_tools = self.create_analysis_tools()
self._dep_context = DependencyContext(self.compiler_plugin_types(),
dict(include_scopes=Scopes.JVM_COMPILE_SCOPES,
respect_intransitive=True))
@property
def _unused_deps_check_enabled(self):
return self.get_options().unused_deps != 'ignore'
@memoized_property
def _dep_analyzer(self):
return JvmDependencyAnalyzer(get_buildroot(),
self.context.products.get_data('runtime_classpath'),
self.context.products.get_data('product_deps_by_src'))
@property
def _analysis_parser(self):
return self._analysis_tools.parser
def _fingerprint_strategy(self, classpath_products):
return ResolvedJarAwareTaskIdentityFingerprintStrategy(self, classpath_products)
def _compile_context(self, target, target_workdir):
analysis_file = JvmCompile._analysis_for_target(target_workdir, target)
portable_analysis_file = JvmCompile._portable_analysis_for_target(target_workdir, target)
classes_dir = os.path.join(target_workdir, 'classes')
jar_file = os.path.join(target_workdir, 'z.jar')
log_file = os.path.join(target_workdir, 'debug.log')
strict_deps = self._compute_language_property(target, lambda x: x.strict_deps)
return CompileContext(target,
analysis_file,
portable_analysis_file,
classes_dir,
jar_file,
log_file,
self._compute_sources_for_target(target),
strict_deps)
def execute(self):
# In case we have no relevant targets and return early create the requested product maps.
self._create_empty_products()
relevant_targets = list(self.context.targets(predicate=self.select))
if not relevant_targets:
return
# Clone the compile_classpath to the runtime_classpath.
compile_classpath = self.context.products.get_data('compile_classpath')
classpath_product = self.context.products.get_data('runtime_classpath', compile_classpath.copy)
def classpath_for_context(context):
if self.get_options().use_classpath_jars:
return context.jar_file
return context.classes_dir
fingerprint_strategy = self._fingerprint_strategy(classpath_product)
# Note, JVM targets are validated (`vts.update()`) as they succeed. As a result,
# we begin writing artifacts out to the cache immediately instead of waiting for
# all targets to finish.
with self.invalidated(relevant_targets,
invalidate_dependents=True,
fingerprint_strategy=fingerprint_strategy,
topological_order=True) as invalidation_check:
# Initialize the classpath for all targets.
compile_contexts = {vt.target: self._compile_context(vt.target, vt.results_dir)
for vt in invalidation_check.all_vts}
for cc in compile_contexts.values():
classpath_product.add_for_target(cc.target, [(conf, classpath_for_context(cc))
for conf in self._confs])
# Register products for valid targets.
valid_targets = [vt.target for vt in invalidation_check.all_vts if vt.valid]
self._register_vts([compile_contexts[t] for t in valid_targets])
# Build any invalid targets (which will register products in the background).
if invalidation_check.invalid_vts:
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
self.do_compile(
invalidation_check,
compile_contexts,
invalid_targets,
self.extra_compile_time_classpath_elements(),
)
if not self.get_options().use_classpath_jars:
# Once compilation has completed, replace the classpath entry for each target with
# its jar'd representation.
for cc in compile_contexts.values():
for conf in self._confs:
classpath_product.remove_for_target(cc.target, [(conf, cc.classes_dir)])
classpath_product.add_for_target(cc.target, [(conf, cc.jar_file)])
def do_compile(self,
invalidation_check,
compile_contexts,
invalid_targets,
extra_compile_time_classpath_elements):
"""Executes compilations for the invalid targets contained in a single chunk."""
assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."
# This ensures the workunit for the worker pool is set before attempting to compile.
with self.context.new_workunit('isolation-{}-pool-bootstrap'.format(self._name)) \
as workunit:
# This uses workunit.parent as the WorkerPool's parent so that child workunits
# of different pools will show up in order in the html output. This way the current running
# workunit is on the bottom of the page rather than possibly in the middle.
worker_pool = WorkerPool(workunit.parent,
self.context.run_tracker,
self._worker_count)
# Prepare the output directory for each invalid target, and confirm that analysis is valid.
for target in invalid_targets:
cc = compile_contexts[target]
safe_mkdir(cc.classes_dir)
self.validate_analysis(cc.analysis_file)
# Get the classpath generated by upstream JVM tasks and our own prepare_compile().
classpath_products = self.context.products.get_data('runtime_classpath')
extra_compile_time_classpath = self._compute_extra_classpath(
extra_compile_time_classpath_elements)
# Now create compile jobs for each invalid target one by one.
jobs = self._create_compile_jobs(classpath_products,
compile_contexts,
extra_compile_time_classpath,
invalid_targets,
invalidation_check.invalid_vts)
exec_graph = ExecutionGraph(jobs)
try:
exec_graph.execute(worker_pool, self.context.log)
except ExecutionFailure as e:
raise TaskError("Compilation failure: {}".format(e))
def _record_compile_classpath(self, classpath, targets, outdir):
text = '\n'.join(classpath)
for target in targets:
path = os.path.join(outdir, 'compile_classpath', '{}.txt'.format(target.id))
safe_mkdir(os.path.dirname(path), clean=False)
with open(path, 'w') as f:
f.write(text.encode('utf-8'))
def _compile_vts(self, vts, sources, analysis_file, upstream_analysis, classpath, outdir,
log_file, progress_message, settings, fatal_warnings, counter):
"""Compiles sources for the given vts into the given output dir.
vts - versioned target set
sources - sources for this target set
analysis_file - the analysis file to manipulate
classpath - a list of classpath entries
outdir - the output dir to send classes to
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
counter_val = str(counter()).rjust(counter.format_length(), b' ')
counter_str = '[{}/{}] '.format(counter_val, counter.size)
# Do some reporting.
self.context.log.info(
counter_str,
'Compiling ',
items_to_report_element(sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile', labels=[WorkUnitLabel.COMPILER]):
# The compiler may delete classfiles, then later exit on a compilation error. Then if the
# change triggering the error is reverted, we won't rebuild to restore the missing
# classfiles. So we force-invalidate here, to be on the safe side.
vts.force_invalidate()
if self.get_options().capture_classpath:
self._record_compile_classpath(classpath, vts.targets, outdir)
# If compiling a plugin, don't try to use it on itself.
javac_plugins_to_exclude = (t.plugin for t in vts.targets if isinstance(t, JavacPlugin))
self.compile(self._args, classpath, sources, outdir, upstream_analysis, analysis_file,
log_file, settings, fatal_warnings, javac_plugins_to_exclude)
def check_artifact_cache(self, vts):
"""Localizes the fetched analysis for targets we found in the cache."""
def post_process(cached_vts):
for vt in cached_vts:
cc = self._compile_context(vt.target, vt.results_dir)
safe_delete(cc.analysis_file)
self._analysis_tools.localize(cc.portable_analysis_file, cc.analysis_file)
return self.do_check_artifact_cache(vts, post_process_cached_vts=post_process)
def _create_empty_products(self):
if self.context.products.is_required_data('classes_by_source'):
make_products = lambda: defaultdict(MultipleRootedProducts)
self.context.products.safe_create_data('classes_by_source', make_products)
if self.context.products.is_required_data('product_deps_by_src') \
or self._unused_deps_check_enabled:
self.context.products.safe_create_data('product_deps_by_src', dict)
def compute_classes_by_source(self, compile_contexts):
"""Compute a map of (context->(src->classes)) for the given compile_contexts.
It's possible (although unfortunate) for multiple targets to own the same sources, hence
the top level division. Srcs are relative to buildroot. Classes are absolute paths.
Returning classes with 'None' as their src indicates that the compiler analysis indicated
that they were un-owned. This case is triggered when annotation processors generate
classes (or due to bugs in classfile tracking in zinc/jmake.)
"""
buildroot = get_buildroot()
# Build a mapping of srcs to classes for each context.
classes_by_src_by_context = defaultdict(dict)
for compile_context in compile_contexts:
# Walk the context's jar to build a set of unclaimed classfiles.
unclaimed_classes = set()
with compile_context.open_jar(mode='r') as jar:
for name in jar.namelist():
if not name.endswith('/'):
unclaimed_classes.add(os.path.join(compile_context.classes_dir, name))
# Grab the analysis' view of which classfiles were generated.
classes_by_src = classes_by_src_by_context[compile_context]
if os.path.exists(compile_context.analysis_file):
products = self._analysis_parser.parse_products_from_path(compile_context.analysis_file,
compile_context.classes_dir)
for src, classes in products.items():
relsrc = os.path.relpath(src, buildroot)
classes_by_src[relsrc] = classes
unclaimed_classes.difference_update(classes)
# Any remaining classfiles were unclaimed by sources/analysis.
classes_by_src[None] = list(unclaimed_classes)
return classes_by_src_by_context
def classname_for_classfile(self, compile_context, class_file_name):
assert class_file_name.startswith(compile_context.classes_dir)
rel_classfile_path = class_file_name[len(compile_context.classes_dir) + 1:]
return ClasspathUtil.classname_for_rel_classfile(rel_classfile_path)
def _register_vts(self, compile_contexts):
classes_by_source = self.context.products.get_data('classes_by_source')
product_deps_by_src = self.context.products.get_data('product_deps_by_src')
# Register a mapping between sources and classfiles (if requested).
if classes_by_source is not None:
ccbsbc = self.compute_classes_by_source(compile_contexts).items()
for compile_context, computed_classes_by_source in ccbsbc:
classes_dir = compile_context.classes_dir
for source in compile_context.sources:
classes = computed_classes_by_source.get(source, [])
classes_by_source[source].add_abs_paths(classes_dir, classes)
# Register classfile product dependencies (if requested).
if product_deps_by_src is not None:
for compile_context in compile_contexts:
product_deps_by_src[compile_context.target] = \
self._analysis_parser.parse_deps_from_path(compile_context.analysis_file)
def _check_unused_deps(self, compile_context):
"""Uses `product_deps_by_src` to check unused deps and warn or error."""
with self.context.new_workunit('unused-check', labels=[WorkUnitLabel.COMPILER]):
# Compute replacement deps.
replacement_deps = self._dep_analyzer.compute_unused_deps(compile_context.target)
if not replacement_deps:
return
# Warn or error for unused.
def joined_dep_msg(deps):
return '\n '.join('\'{}\','.format(dep.address.spec) for dep in sorted(deps))
flat_replacements = set(r for replacements in replacement_deps.values() for r in replacements)
replacements_msg = ''
if flat_replacements:
replacements_msg = 'Suggested replacements:\n {}\n'.format(joined_dep_msg(flat_replacements))
unused_msg = (
'unused dependencies:\n {}\n{}'
'(If you\'re seeing this message in error, you might need to '
'change the `scope` of the dependencies.)'.format(
joined_dep_msg(replacement_deps.keys()),
replacements_msg,
)
)
if self.get_options().unused_deps == 'fatal':
raise TaskError(unused_msg)
else:
self.context.log.warn('Target {} had {}\n'.format(
compile_context.target.address.spec, unused_msg))
def _upstream_analysis(self, compile_contexts, classpath_entries):
"""Returns tuples of classes_dir->analysis_file for the closure of the target."""
# Reorganize the compile_contexts by class directory.
compile_contexts_by_directory = {}
for compile_context in compile_contexts.values():
compile_contexts_by_directory[compile_context.classes_dir] = compile_context
# If we have a compile context for the target, include it.
for entry in classpath_entries:
if not entry.endswith('.jar'):
compile_context = compile_contexts_by_directory.get(entry)
if not compile_context:
self.context.log.debug('Missing upstream analysis for {}'.format(entry))
else:
yield compile_context.classes_dir, compile_context.analysis_file
def exec_graph_key_for_target(self, compile_target):
return "compile({})".format(compile_target.address.spec)
def _create_compile_jobs(self, classpath_products, compile_contexts, extra_compile_time_classpath,
invalid_targets, invalid_vts):
class Counter(object):
def __init__(self, size, initial=0):
self.size = size
self.count = initial
def __call__(self):
self.count += 1
return self.count
def format_length(self):
return len(str(self.size))
counter = Counter(len(invalid_vts))
def check_cache(vts):
"""Manually checks the artifact cache (usually immediately before compilation.)
Returns true if the cache was hit successfully, indicating that no compilation is necessary.
"""
if not self.artifact_cache_reads_enabled():
return False
cached_vts, _, _ = self.check_artifact_cache([vts])
if not cached_vts:
self.context.log.debug('Missed cache during double check for {}'
.format(vts.target.address.spec))
return False
assert cached_vts == [vts], (
'Cache returned unexpected target: {} vs {}'.format(cached_vts, [vts])
)
self.context.log.info('Hit cache during double check for {}'.format(vts.target.address.spec))
counter()
return True
def should_compile_incrementally(vts):
"""Check to see if the compile should try to re-use the existing analysis.
Returns true if we should try to compile the target incrementally.
"""
if not vts.is_incremental:
return False
if not self._clear_invalid_analysis:
return True
return os.path.exists(compile_context.analysis_file)
def work_for_vts(vts, ctx):
progress_message = ctx.target.address.spec
# Capture a compilation log if requested.
log_file = ctx.log_file if self._capture_log else None
# Double check the cache before beginning compilation
hit_cache = check_cache(vts)
if not hit_cache:
# Compute the compile classpath for this target.
cp_entries = [compile_context.classes_dir]
cp_entries.extend(ClasspathUtil.compute_classpath(ctx.dependencies(self._dep_context),
classpath_products,
extra_compile_time_classpath,
self._confs))
# TODO: always provide transitive analysis, but not always all classpath entries?
upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))
# Write analysis to a temporary file, and move it to the final location on success.
tmp_analysis_file = "{}.tmp".format(ctx.analysis_file)
if should_compile_incrementally(vts):
# If this is an incremental compile, rebase the analysis to our new classes directory.
self._analysis_tools.rebase_from_path(ctx.analysis_file,
tmp_analysis_file,
vts.previous_results_dir,
vts.results_dir)
else:
# Otherwise, simply ensure that it is empty.
safe_delete(tmp_analysis_file)
tgt, = vts.targets
fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
self._compile_vts(vts,
ctx.sources,
tmp_analysis_file,
upstream_analysis,
cp_entries,
ctx.classes_dir,
log_file,
progress_message,
tgt.platform,
fatal_warnings,
counter)
os.rename(tmp_analysis_file, ctx.analysis_file)
self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)
# Write any additional resources for this target to the target workdir.
self.write_extra_resources(ctx)
# Jar the compiled output.
self._create_context_jar(ctx)
# Update the products with the latest classes.
self._register_vts([ctx])
# Once products are registered, check for unused dependencies (if enabled).
if not hit_cache and self._unused_deps_check_enabled:
self._check_unused_deps(ctx)
jobs = []
invalid_target_set = set(invalid_targets)
for ivts in invalid_vts:
# Invalidated targets are a subset of relevant targets: get the context for this one.
compile_target = ivts.targets[0]
compile_context = compile_contexts[compile_target]
compile_target_closure = compile_target.closure()
# dependencies of the current target which are invalid for this chunk
invalid_dependencies = (compile_target_closure & invalid_target_set) - [compile_target]
jobs.append(Job(self.exec_graph_key_for_target(compile_target),
functools.partial(work_for_vts, ivts, compile_context),
[self.exec_graph_key_for_target(target) for target in invalid_dependencies],
self._size_estimator(compile_context.sources),
# If compilation and analysis work succeeds, validate the vts.
# Otherwise, fail it.
on_success=ivts.update,
on_failure=ivts.force_invalidate))
return jobs
def _create_context_jar(self, compile_context):
"""Jar up the compile_context to its output jar location.
TODO(stuhood): In the medium term, we hope to add compiler support for this step, which would
allow the jars to be used as compile _inputs_ as well. Currently using jar'd compile outputs as
compile inputs would make the compiler's analysis useless.
see https://github.com/twitter-forks/sbt/tree/stuhood/output-jars
"""
root = compile_context.classes_dir
with compile_context.open_jar(mode='w') as jar:
for abs_sub_dir, dirnames, filenames in safe_walk(root):
for name in dirnames + filenames:
abs_filename = os.path.join(abs_sub_dir, name)
arcname = fast_relpath(abs_filename, root)
jar.write(abs_filename, arcname)
def validate_analysis(self, path):
"""Throws a TaskError for invalid analysis files."""
try:
self._analysis_parser.validate_analysis(path)
except Exception as e:
if self._clear_invalid_analysis:
self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these "
"automatically, but\nyou may experience spurious warnings until "
"clean-all is executed.\n{}".format(path, e))
safe_delete(path)
else:
raise TaskError("An internal build directory contains invalid/mismatched analysis: please "
"run `clean-all` if your tools versions changed recently:\n{}".format(e))
def _compute_sources_for_target(self, target):
"""Computes and returns the sources (relative to buildroot) for the given target."""
def resolve_target_sources(target_sources):
resolved_sources = []
for tgt in target_sources:
if tgt.has_sources():
resolved_sources.extend(tgt.sources_relative_to_buildroot())
return resolved_sources
sources = [s for s in target.sources_relative_to_buildroot() if self._sources_predicate(s)]
# TODO: Make this less hacky. Ideally target.java_sources will point to sources, not targets.
if hasattr(target, 'java_sources') and target.java_sources:
sources.extend(resolve_target_sources(target.java_sources))
return sources
def _compute_language_property(self, target, selector):
"""Computes the a language property setting for the given target sources.
:param target The target whose language property will be calculated.
:param selector A function that takes a target or platform and returns the boolean value of the
property for that target or platform, or None if that target or platform does
not directly define the property.
If the target does not override the language property, returns true iff the property
is true for any of the matched languages for the target.
"""
if selector(target) is not None:
return selector(target)
prop = False
if target.has_sources('.java'):
prop |= selector(Java.global_instance())
if target.has_sources('.scala'):
prop |= selector(ScalaPlatform.global_instance())
return prop
def _compute_extra_classpath(self, extra_compile_time_classpath_elements):
"""Compute any extra compile-time-only classpath elements.
TODO(benjy): Model compile-time vs. runtime classpaths more explicitly.
"""
def extra_compile_classpath_iter():
for conf in self._confs:
for jar in extra_compile_time_classpath_elements:
yield (conf, jar)
return list(extra_compile_classpath_iter())
| gmalmquist/pants | src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py | Python | apache-2.0 | 38,176 |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
SYSTEM_RUN = "system.Run"
SYSTEM_EXPERIMENT = "system.Experiment"
SYSTEM_PIPELINE = "system.Pipeline"
SYSTEM_METRICS = "system.Metrics"
_DEFAULT_SCHEMA_VERSION = "0.0.1"
SCHEMA_VERSIONS = {
SYSTEM_RUN: _DEFAULT_SCHEMA_VERSION,
SYSTEM_EXPERIMENT: _DEFAULT_SCHEMA_VERSION,
SYSTEM_PIPELINE: _DEFAULT_SCHEMA_VERSION,
SYSTEM_METRICS: _DEFAULT_SCHEMA_VERSION,
}
# The EXPERIMENT_METADATA is needed until we support context deletion in backend service.
# TODO: delete EXPERIMENT_METADATA once backend supports context deletion.
EXPERIMENT_METADATA = {"experiment_deleted": False}
| googleapis/python-aiplatform | google/cloud/aiplatform/metadata/constants.py | Python | apache-2.0 | 1,194 |
Subsets and Splits