repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
open-power-ref-design-toolkit/cluster-genesis | software/paie112.py | 1 | 74088 | #! /usr/bin/env python
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import argparse
import glob
import os
import platform
import re
import sys
from shutil import copy2
import calendar
import time
import yaml
import json
from getpass import getpass
import pwd
import grp
import click
import lib.logger as logger
from repos import PowerupRepo, PowerupRepoFromDir, PowerupYumRepoFromRepo, \
PowerupAnaRepoFromRepo, PowerupRepoFromRpm, setup_source_file, \
PowerupPypiRepoFromRepo, get_name_dir
from software_hosts import get_ansible_inventory, validate_software_inventory
from lib.utilities import sub_proc_display, sub_proc_exec, heading1, Color, \
get_selection, get_yesno, rlinput, bold, ansible_pprint, replace_regex, \
firewall_add_services
from lib.genesis import GEN_SOFTWARE_PATH, get_ansible_playbook_path
from nginx_setup import nginx_setup
class software(object):
""" Software installation class. The prep method is used to setup
repositories, download files to the installer node or perform other
initialization activities. The install method implements the actual
installation.
"""
def __init__(self, eval_ver=False, non_int=False):
self.log = logger.getlogger()
self.my_name = sys.modules[__name__].__name__
self.yum_powerup_repo_files = []
self.eval_ver = eval_ver
self.non_int = non_int
yaml.add_constructor(YAMLVault.yaml_tag, YAMLVault.from_yaml)
self.state = {'EPEL Repository': '-',
'CUDA Toolkit Repository': '-',
'PowerAI content': '-',
'PowerAI Base Repository': '-',
'PowerAIE license content': '-',
'Dependent Packages Repository': '-',
'Python Package Repository': '-',
'CUDA dnn content': '-',
'CUDA nccl2 content': '-',
'Anaconda content': '-',
'Anaconda Free Repository': '-',
'Anaconda Main Repository': '-',
'Conda-forge Repository': '-',
'Spectrum conductor content': '-',
'Spectrum conductor content entitlement': '-',
'Spectrum DLI content': '-',
'Spectrum DLI content entitlement': '-',
'Nginx Web Server': '-',
'Firewall': '-'}
self.repo_id = {'EPEL Repository': 'epel-ppc64le',
'CUDA Toolkit Repository': 'cuda',
'PowerAI Base Repository': 'powerai',
'Dependent Packages Repository': 'dependencies',
'Python Package Repository': 'pypi'}
try:
self.pkgs = yaml.full_load(open(GEN_SOFTWARE_PATH +
'pkg-lists112.yml'))
except IOError:
self.log.error('Error opening the pkg lists file (pkg-lists112.yml)')
sys.exit('Exit due to critical error')
if self.eval_ver:
try:
self.sw_vars = yaml.full_load(open(GEN_SOFTWARE_PATH +
'software-vars-eval.yml'))
except IOError:
# if no eval vars file exist, see if the license var file exists
# and start with that
try:
self.sw_vars = yaml.full_load(open(GEN_SOFTWARE_PATH +
'software-vars.yml'))
except IOError:
self.log.info('Creating software vars yaml file')
self.sw_vars = {}
self.sw_vars['init-time'] = time.ctime()
self.README()
input('\nPress enter to continue')
# clear out any licensed version of PowerAI files
else:
self.sw_vars['content_files']['powerai-enterprise-license'] = ''
self.sw_vars['content_files']['spectrum-conductor'] = ''
self.sw_vars['content_files']['spectrum-conductor-entitlement'] = ''
self.sw_vars['content_files']['spectrum-dli'] = ''
self.sw_vars['content_files']['spectrum-dli-entitlement'] = ''
self.sw_vars['prep-timestamp'] = calendar.timegm(time.gmtime())
else:
try:
self.sw_vars = yaml.full_load(open(GEN_SOFTWARE_PATH +
'software-vars.yml'))
except IOError:
# if no licensed vars file exist, see if the eval var file exists
# and start with that
try:
self.sw_vars = yaml.full_load(
open(GEN_SOFTWARE_PATH + 'software-vars-eval.yml'))
except IOError:
self.log.info('Creating software vars yaml file')
self.sw_vars = {}
self.sw_vars['init-time'] = time.ctime()
self.README()
input('\nPress enter to continue')
# clear out any eval version of PowerAI Enterprise files
else:
self.sw_vars['content_files']['powerai-enterprise-license'] = ''
self.sw_vars['content_files']['spectrum-conductor'] = ''
self.sw_vars['content_files']['spectrum-conductor-entitlement'] = ''
self.sw_vars['content_files']['spectrum-dli'] = ''
self.sw_vars['content_files']['spectrum-dli-entitlement'] = ''
self.sw_vars['prep-timestamp'] = calendar.timegm(time.gmtime())
if not isinstance(self.sw_vars, dict):
self.sw_vars = {}
self.sw_vars['init-time'] = time.ctime()
if 'prep-timestamp' not in self.sw_vars:
self.sw_vars['prep-timestamp'] = 0
if self.eval_ver:
self.eval_prep_timestamp = self.sw_vars['prep-timestamp']
try:
temp = yaml.full_load(open(GEN_SOFTWARE_PATH +
'software-vars.yml'))
self.lic_prep_timestamp = temp['prep-timestamp']
except (IOError, KeyError):
self.lic_prep_timestamp = 0
else:
self.lic_prep_timestamp = self.sw_vars['prep-timestamp']
try:
temp = yaml.full_load(open(GEN_SOFTWARE_PATH +
'software-vars-eval.yml'))
self.eval_prep_timestamp = temp['prep-timestamp']
except (IOError, KeyError):
self.eval_prep_timestamp = 0
if ('ana_powerup_repo_channels' not in self.sw_vars or not
isinstance(self.sw_vars['ana_powerup_repo_channels'], list)):
self.sw_vars['ana_powerup_repo_channels'] = []
if ('yum_powerup_repo_files' not in self.sw_vars or not
isinstance(self.sw_vars['yum_powerup_repo_files'], dict)):
self.sw_vars['yum_powerup_repo_files'] = {}
if ('content_files' not in self.sw_vars or not
isinstance(self.sw_vars['content_files'], dict)):
self.sw_vars['content_files'] = {}
self.epel_repo_name = 'epel-ppc64le'
self.sw_vars['epel_repo_name'] = self.epel_repo_name
self.rhel_ver = '7'
self.sw_vars['rhel_ver'] = self.rhel_ver
self.arch = 'ppc64le'
self.sw_vars['arch'] = self.arch
self.root_dir = '/srv/'
self.repo_dir = self.root_dir + 'repos/{repo_id}/rhel' + self.rhel_ver + \
'/{repo_id}'
# When searching for files in other web servers, the fileglobs are converted to
# regular expressions. An asterisk (*) after a bracket is converted to a
# regular extression of [0-9]{0,3} Other asterisks are converted to regular
# expression of .*
try:
file_lists = yaml.full_load(open(GEN_SOFTWARE_PATH +
'file-lists112.yml'))
except IOError:
self.log.info('Error while reading installation file lists for PowerAI Enterprise')
sys.exit('exiting')
input('\nPress enter to continue')
else:
if self.eval_ver:
self.globs = file_lists['globs_eval']
self.files = file_lists['files_eval']
else:
self.globs = file_lists['globs']
self.files = file_lists['files']
# If empty, initialize software_vars content and repo info
# from software server directory
update = False
for item in self.state:
if 'content' in item:
item_key = get_name_dir(item)
item_dir = item_key
if item_dir.endswith('-entitlement'):
item_dir = item_dir[:-12]
exists = glob.glob(f'/srv/{item_dir}/**/{self.files[item]}',
recursive=True)
if not exists:
exists = glob.glob(f'/srv/{item_dir}/**/{self.globs[item]}',
recursive=True)
if exists:
self.sw_vars['content_files'][item_key] = exists[0]
if item_key not in self.sw_vars['content_files']:
update = True
if exists:
self.sw_vars['content_files'][item_key] = exists[0]
else:
self.sw_vars['content_files'][item_key] = ''
if update:
self.log.info('Content installation pointers were updated.\n'
'To insure content levels are correct, run \n'
'pup software --prep <module name>\n')
if 'ansible_inventory' not in self.sw_vars:
self.sw_vars['ansible_inventory'] = None
if 'ansible_become_pass' not in self.sw_vars:
self.sw_vars['ansible_become_pass'] = None
self.vault_pass = None
self.vault_pass_file = f'{GEN_SOFTWARE_PATH}.vault'
self.log.debug(f'software variables: {self.sw_vars}')
def __del__(self):
if not os.path.exists(GEN_SOFTWARE_PATH):
os.mkdir(GEN_SOFTWARE_PATH)
if self.eval_ver:
with open(GEN_SOFTWARE_PATH + 'software-vars-eval.yml', 'w') as f:
f.write('# Do not edit this file. This file is autogenerated.\n')
with open(GEN_SOFTWARE_PATH + 'software-vars-eval.yml', 'a') as f:
yaml.dump(self.sw_vars, f, default_flow_style=False)
else:
with open(GEN_SOFTWARE_PATH + 'software-vars.yml', 'w') as f:
f.write('# Do not edit this file. This file is autogenerated.\n')
with open(GEN_SOFTWARE_PATH + 'software-vars.yml', 'a') as f:
yaml.dump(self.sw_vars, f, default_flow_style=False)
if os.path.isfile(self.vault_pass_file):
os.remove(self.vault_pass_file)
def README(self):
print(bold('\nPowerAI Enterprise software installer module'))
text = ('\nThis module installs the PowerAI Enterprise software '
'to a cluster of OpenPOWER nodes.\n\n'
'PowerAI Enterprise installation involves three steps;\n'
'\n 1 - Preparation. Prepares the installer node software server.\n'
' The preparation phase may be run multiple times if needed.\n'
' usage: pup software --prep paie112\n'
'\n 2 - Initialization of client nodes\n'
' usage: pup software --init-clients paie112\n'
'\n 3 - Installation. Install software on the client nodes\n'
' usage: pup software --install paie112\n\n'
'Before beginning, the following files should be extracted from the\n'
'PowerAI Enterprise binary file and present on this node;\n'
'- mldl-repo-local-5.4.0-*.ppc64le.rpm\n'
'- powerai-enterprise-license-1.1.2-*.ppc64le.rpm\n'
'- conductor2.3.0.0_ppc64le.bin\n'
'- conductor_entitlement.dat\n'
'- dli-1.2.1.0_ppc64le.bin\n'
'- dli_entitlement.dat\n\n'
'The following files must also be downloaded to this node;\n'
'- cudnn-10.0-linux-ppc64le-v7.3.1.20.tgz\n'
'- nccl_2.3.4-1+cuda10.0_ppc64le.txz\n'
'For installation status: pup software --status paie112\n'
'To redisplay this README: pup software --README paie112\n\n'
'Note: The \'pup\' cli supports tab autocompletion.\n\n')
print(text)
def status(self, which='all'):
self.status_prep(which)
def status_prep(self, which='all'):
def yum_repo_status(item):
repodata = glob.glob(self.repo_dir.format(repo_id=self.repo_id[item]) +
'/**/repodata', recursive=True)
sw_vars_data = (f'{self.repo_id[item]}-powerup.repo' in
self.sw_vars['yum_powerup_repo_files'])
if repodata and sw_vars_data:
self.state[item] = f'{item} is setup'
def content_status(item):
ver_mis = False
item_key = get_name_dir(item)
item_dir = item_key
if item_dir.endswith('-entitlement'):
item_dir = item_dir[:-12]
exists = glob.glob(f'/srv/{item_dir}/**/{self.globs[item]}',
recursive=True)
sw_vars_data = item_key in self.sw_vars['content_files']
if exists and sw_vars_data:
if self.files[item] in self.sw_vars['content_files'][item_key]:
self.state[item] = ('Present in the POWER-Up server')
else:
ver_mis = True
self.state[item] = (Color.yellow +
'Present but not at release level' +
Color.endc)
return ver_mis
ver_mis = False
for item in self.state:
self.state[item] = '-'
# Content files status
if 'content' in item:
ret = content_status(item)
ver_mis = ver_mis or ret
continue
# yum repos status
if item in self.repo_id:
if 'Python' in item:
if os.path.exists(f'/srv/repos/{self.repo_id[item]}/simple/') and \
len(os.listdir(f'/srv/repos/{self.repo_id[item]}/simple/')) >= 1:
self.state[item] = f'{item} is setup'
else:
yum_repo_status(item)
continue
# Firewall status
if item == 'Firewall':
cmd = 'firewall-cmd --list-all'
resp, err, rc = sub_proc_exec(cmd)
if re.search(r'services:\s+.+http', resp):
self.state[item] = "Running and configured for http"
continue
# Nginx web server status
if item == 'Nginx Web Server':
cmd = 'curl -I 127.0.0.1'
resp, err, rc = sub_proc_exec(cmd)
if 'HTTP/1.1 200 OK' in resp:
self.state[item] = 'Nginx is configured and running'
continue
# Anaconda Repo Free status
if item == 'Anaconda Free Repository':
repodata_noarch = glob.glob(f'/srv/repos/anaconda/pkgs/free'
'/noarch/repodata.json', recursive=True)
repodata = glob.glob(f'/srv/repos/anaconda/pkgs/free'
'/linux-ppc64le/repodata.json', recursive=True)
if repodata and repodata_noarch:
self.state[item] = f'{item} is setup'
continue
# Anaconda Main repo status
if item == 'Anaconda Main Repository':
repodata_noarch = glob.glob(f'/srv/repos/anaconda/pkgs/main'
'/noarch/repodata.json', recursive=True)
repodata = glob.glob(f'/srv/repos/anaconda/pkgs/main'
'/linux-ppc64le/repodata.json', recursive=True)
if repodata and repodata_noarch:
self.state[item] = f'{item} is setup'
continue
# Anaconda Conda-forge repo status
if item == 'Conda-forge Repository':
repodata = glob.glob(f'/srv/repos/anaconda/conda-forge'
'/noarch/repodata.json', recursive=True)
if repodata:
self.state[item] = f'{item} is setup'
continue
exists = True
if which == 'all':
heading1('Preparation Summary')
for item in self.state:
status = self.state[item]
it = (item + ' ')[:38]
print(f' {it:<39} : ' + status)
exists = exists and self.state[item] != '-'
gtg = 'Preparation complete. '
if ver_mis:
gtg += 'Some content is not at release level.'
for item in self.state.values():
if item == '-':
gtg = f'{Color.red}Preparation incomplete{Color.endc}'
print(f'\n{bold(gtg)}\n')
else:
exists = self.state[which] != '-'
return exists
def prep(self, eval_ver=False, non_int=False):
# Invoked with --prep flag
# Basic check of the state of yum repos
print()
self.sw_vars['prep-timestamp'] = calendar.timegm(time.gmtime())
self.log.info('Performing basic check of yum repositories')
cmd = 'yum repolist --noplugins'
resp, err, rc = sub_proc_exec(cmd)
yum_err = re.search(r'\[Errno\s+\d+\]', err)
if rc:
self.log.error(f'Failure running "yum repolist" :{rc}')
elif yum_err:
self.log.error(err)
self.log.error(f'yum error: {yum_err.group(0)}')
if rc or yum_err:
self.log.error('There is a problem with yum or one or more of the yum '
'repositories. \n')
self.log.info('Cleaning yum caches')
cmd = 'yum clean all'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
self.log.error('An error occurred while cleaning the yum repositories\n'
'POWER-Up is unable to continue.')
sys.exit('Exiting')
# Setup firewall to allow http
heading1('Setting up firewall')
firewall_add_services('http')
self.status_prep(which='Firewall')
if self.state['Firewall'] == '-':
self.log.info('Failed to configure firewall')
else:
self.log.info(self.state['Firewall'])
# nginx setup
heading1('Set up Nginx')
nginx_setup()
self.status_prep(which='Nginx Web Server')
if self.state['Nginx Web Server'] == '-':
self.log.info('nginx web server is not running')
else:
self.log.info(self.state['Nginx Web Server'])
# Get PowerAI base
name = 'PowerAI content'
heading1('Setting up the PowerAI base repository\n')
pai_src = self.globs['PowerAI content']
pai_url = '' # No default public url exists
repo_id = 'powerai'
repo_name = 'IBM PowerAI Base'
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = 'http://'
exists = self.status_prep(which='PowerAI Base Repository')
if exists:
self.log.info(f'The {name} and {repo_id} repository exists already '
'in the POWER-Up server.')
if not exists or get_yesno(f'Recopy {name} and Recreate the {repo_id} '
'repository '):
repo = PowerupRepoFromRpm(repo_id, repo_name)
src_path, dest_path, state = setup_source_file(repo_id, pai_src, pai_url,
alt_url=alt_url)
if src_path:
print(f'Creating {repo_id} repository.')
if 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
self.sw_vars['content_files'][get_name_dir(repo_id)] = dest_path
repodata_dir = repo.extract_rpm(dest_path)
if repodata_dir:
content = repo.get_yum_dotrepo_content(repo_dir=repodata_dir,
gpgcheck=0, local=True)
else:
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.create_meta()
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(repo_dir=repodata_dir,
gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
self.status_prep(which='PowerAI Base Repository')
else:
self.log.info('No source selected. Skipping PowerAI repository creation.')
# Get PowerAI Enterprise license file
name = 'PowerAIE license content'
heading1(f'Set up {name.title()} \n')
lic_src = self.globs[name]
exists = self.status_prep(name)
lic_url = ''
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = 'http://'
if exists:
self.log.info('PowerAI Enterprise license exists already in the POWER-Up server')
if not exists or get_yesno(f'Copy a new {name.title()} file '):
src_path, dest_path, state = setup_source_file(name, lic_src, lic_url,
alt_url=alt_url)
if src_path and 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
if dest_path:
self.sw_vars['content_files'][get_name_dir(name)] = dest_path
# Get Spectrum Conductor
name = 'Spectrum conductor content'
heading1(f'Set up {name.title()} \n')
spc_src = self.globs[name]
entitlement = self.globs[name + ' entitlement']
exists = self.status_prep(name)
spc_url = ''
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = 'http://'
if exists:
self.log.info('Spectrum conductor content exists already in the POWER-Up server')
if not exists or get_yesno(f'Copy a new {name.title()} file '):
src_path, dest_path, state = setup_source_file(name, spc_src, spc_url,
alt_url=alt_url, src2=entitlement)
if src_path and 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
if dest_path:
self.sw_vars['content_files'][get_name_dir(name)] = dest_path
if state:
self.sw_vars['content_files'][get_name_dir(name) + '-entitlement'] = (
os.path.dirname(dest_path) + '/' + entitlement)
# Get Spectrum DLI
name = 'Spectrum DLI content'
heading1(f'Set up {name.title()} \n')
spdli_src = self.globs[name]
entitlement = self.globs[name + ' entitlement']
exists = self.status_prep(name)
spdli_url = ''
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = 'http://'
if exists:
self.log.info('Spectrum DLI content exists already in the POWER-Up server')
if not exists or get_yesno(f'Copy a new {name.title()} file '):
src_path, dest_path, state = setup_source_file(name, spdli_src, spdli_url,
alt_url=alt_url, src2=entitlement)
if src_path and 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
if dest_path:
self.sw_vars['content_files'][get_name_dir(name)] = dest_path
if state:
self.sw_vars['content_files'][get_name_dir(name) + '-entitlement'] = (
os.path.dirname(dest_path) + '/' + entitlement)
# Setup repository for cuda packages. The Cuda public repo is enabled
# and the package list can be downloaded from there or alternately the
# cuda packages repo can be created from a local directory or an
# existing repository on another node.
repo_id = 'cuda'
repo_name = 'Cuda toolkit'
baseurl = 'http://developer.download.nvidia.com/compute/cuda/repos/rhel7/ppc64le'
gpgkey = f'{baseurl}/7fa2af80.pub'
heading1(f'Set up {repo_name} repository')
# list to str
pkg_list = ' '.join(self.pkgs['cuda_pkgs'])
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
# Enable the public repo
repo_cuda = PowerupRepo(repo_id, repo_name)
dot_repo_content = repo_cuda.get_yum_dotrepo_content(url=baseurl, gpgkey=gpgkey)
repo_cuda.write_yum_dot_repo_file(dot_repo_content)
exists = self.status_prep(which='CUDA Toolkit Repository')
if exists:
self.log.info(f'The {repo_name} repository exists already'
' in the POWER-Up server')
pr_str = (f'\nDo you want to resync the {repo_name} repository'
' at this time\n')
else:
pr_str = (f'\nDo you want to create the {repo_name} repository'
' at this time\n')
ch = 'S'
if get_yesno(prompt=pr_str, yesno='Y/n'):
if platform.machine() == self.arch:
ch, item = get_selection('Sync required packages from public repo\n'
'Create from package files in a local Directory\n'
'Sync from an alternate Repository\n'
'Skip',
'E\nD\nR\nS',
'Repository source? ')
else:
ch, item = get_selection('Create from package files in a local Directory\n'
'Sync from an alternate Repository\n'
'Skip',
'D\nR\nS',
'Repository source? ')
if ch == 'E':
repo = PowerupRepo(repo_id, repo_name)
repo_dir = repo.get_repo_dir()
self._add_dependent_packages(repo_dir, pkg_list)
repo.create_meta()
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
elif ch == 'D':
repo = PowerupRepoFromDir(repo_id, repo_name)
repo_dir = repo.get_repo_dir()
if f'{repo_id}_src_dir' in self.sw_vars:
src_dir = self.sw_vars[f'{repo_id}_src_dir']
else:
src_dir = None
src_dir, dest_dir = repo.copy_dirs(src_dir)
if src_dir:
self.sw_vars[f'{repo_id}_src_dir'] = src_dir
repo.create_meta()
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
elif ch == 'R':
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
repo = PowerupYumRepoFromRepo(repo_id, repo_name)
repo_dir = repo.get_repo_dir()
url = repo.get_repo_url(baseurl, alt_url, contains=[repo_id],
filelist=['cuda-10-*-*'])
if url:
if not url == baseurl:
self.sw_vars[f'{repo_id}_alt_url'] = url
# Set up access to the repo
content = repo.get_yum_dotrepo_content(url, gpgcheck=0)
repo.write_yum_dot_repo_file(content)
repo.sync()
repo.create_meta()
# Prep setup of POWER-Up client access to the repo copy
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
self.log.info('Repository setup complete')
else:
print(f'{repo_name} repository not updated')
if ch != 'S':
repo_dir += '/cuda-[1-9][0-9].[0-9]*.[0-9]*'
files = glob.glob(repo_dir, recursive=True)
if files:
self.sw_vars['cuda'] = re.search(r'cuda-\d+\.\d+\.\d+',
' '.join(files)).group(0)
else:
self.log.error('No cuda toolkit file found in cuda repository')
# Get cudnn tar file
name = 'CUDA dnn content'
heading1(f'Set up {name.title()} \n')
cudnn_src = self.globs[name]
cudnn_url = ''
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = None
exists = self.status_prep(name)
if exists:
self.log.info('CUDA dnn content exists already in the POWER-Up server')
if not exists or get_yesno(f'Copy a new {name.title()} file '):
src_path, dest_path, state = setup_source_file(name, cudnn_src, cudnn_url,
alt_url=alt_url)
if dest_path:
self.sw_vars['content_files'][get_name_dir(name)] = dest_path
if src_path and 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
# Get cuda nccl2 tar file
name = 'CUDA nccl2 content'
heading1(f'Set up {name.title()} \n')
nccl2_src = self.globs[name]
nccl2_url = ''
if f'{name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{name}_alt_url']
else:
alt_url = None
exists = self.status_prep(name)
if exists:
self.log.info('CUDA nccl2 content exists already in the POWER-Up server')
if not exists or get_yesno(f'Copy a new {name.title()} file '):
src_path, dest_path, state = setup_source_file(name, nccl2_src, nccl2_url,
alt_url=alt_url)
if dest_path:
self.sw_vars['content_files'][get_name_dir(name)] = dest_path
if src_path and 'http' in src_path:
self.sw_vars[f'{name}_alt_url'] = os.path.dirname(src_path) + '/'
# Setup repository for redhat dependent packages. This is intended to deal
# specifically with redhat packages requiring red hat subscription for access,
# however dependent packages can come from any YUM repository enabled on the
# POWER-Up Installer node. Alternately the dependent packages repo can be
# Created from a local directory or an existing repository on another node.
repo_id = 'dependencies'
repo_name = 'Dependencies'
baseurl = ''
heading1(f'Set up {repo_name} repository')
# list to str
dep_list = ' '.join(self.pkgs['yum_pkgs'])
file_more = GEN_SOFTWARE_PATH + 'dependent-packages.list'
if os.path.isfile(file_more):
try:
with open(file_more, 'r') as f:
more = f.read()
except:
self.log.error('Error reading {file_more}')
more = ''
else:
more.replace(',', ' ')
more.replace('\n', ' ')
else:
more = ''
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
exists = self.status_prep(which='Dependent Packages Repository')
if exists:
self.log.info(f'The {repo_name} repository exists already'
' in the POWER-Up server')
pr_str = (f'\nDo you want to resync the {repo_name} repository'
' at this time\n')
else:
pr_str = (f'\nDo you want to create the {repo_name} repository'
' at this time\n')
ch = 'S'
if get_yesno(prompt=pr_str, yesno='Y/n'):
if platform.machine() == self.arch:
ch, item = get_selection('Sync required dependent packages from Enabled YUM repos\n'
'Create from package files in a local Directory\n'
'Sync from an alternate Repository\n'
'Skip',
'E\nD\nR\nS',
'Repository source? ')
else:
ch, item = get_selection('Create from package files in a local Directory\n'
'Sync from an alternate Repository\n'
'Skip',
'D\nR\nS',
'Repository source? ')
if ch == 'E':
repo = PowerupRepo(repo_id, repo_name)
repo_dir = repo.get_repo_dir()
self._add_dependent_packages(repo_dir, dep_list)
self._add_dependent_packages(repo_dir, more)
repo.create_meta()
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
elif ch == 'D':
repo = PowerupRepoFromDir(repo_id, repo_name)
if f'{repo_id}_src_dir' in self.sw_vars:
src_dir = self.sw_vars[f'{repo_id}_src_dir']
else:
src_dir = None
src_dir, dest_dir = repo.copy_dirs(src_dir)
if src_dir:
self.sw_vars[f'{repo_id}_src_dir'] = src_dir
repo.create_meta()
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
elif ch == 'R':
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
repo = PowerupYumRepoFromRepo(repo_id, repo_name)
url = repo.get_repo_url(baseurl, alt_url, contains=[repo_id],
filelist=['bzip2-*'])
if url:
if not url == baseurl:
self.sw_vars[f'{repo_id}_alt_url'] = url
# Set up access to the repo
content = repo.get_yum_dotrepo_content(url, gpgcheck=0)
repo.write_yum_dot_repo_file(content)
repo.sync()
repo.create_meta()
# Setup local access to the new repo copy in /srv/repo/
if platform.machine() == self.arch:
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.write_yum_dot_repo_file(content)
# Prep setup of POWER-Up client access to the repo copy
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
self.log.info('Repository setup complete')
else:
print(f'{repo_name} repository not updated')
# Get Anaconda
ana_name = 'Anaconda content'
ana_src = self.globs[ana_name]
ana_url = 'https://repo.continuum.io/archive/'
if f'{ana_name}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{ana_name}_alt_url']
else:
alt_url = 'http://'
exists = self.status_prep(which=ana_name)
heading1('Set up Anaconda\n')
if exists:
self.log.info(f'The {ana_name} exists already '
'in the POWER-Up server.')
if not exists or get_yesno(f'Recopy {ana_name} '):
src_path, dest_path, state = setup_source_file(ana_name, ana_src, ana_url,
alt_url=alt_url)
if dest_path:
self.sw_vars['content_files'][get_name_dir(ana_name)] = dest_path
if src_path and 'http' in src_path:
self.sw_vars[f'{ana_name}_alt_url'] = os.path.dirname(src_path) + '/'
# Setup Anaconda Free Repo. (not a YUM repo)
repo_id = 'anaconda'
repo_name = 'Anaconda Free Repository'
baseurl = 'https://repo.continuum.io/pkgs/free/linux-ppc64le/'
heading1(f'Set up {repo_name}\n')
vars_key = get_name_dir(repo_name) # format the name
if f'{vars_key}-alt-url' in self.sw_vars:
alt_url = self.sw_vars[f'{vars_key}-alt-url']
else:
alt_url = None
exists = self.status_prep(which='Anaconda Free Repository')
if exists:
self.log.info('The Anaconda Repository exists already'
' in the POWER-Up server\n')
repo = PowerupAnaRepoFromRepo(repo_id, repo_name)
ch = repo.get_action(exists)
if ch in 'Y':
# if not exists or ch == 'F':
url = repo.get_repo_url(baseurl, alt_url, contains=['free', 'linux',
'ppc64le'], excludes=['noarch', 'main'],
filelist=['conda-4.3*'])
if url:
if not url == baseurl:
self.sw_vars[f'{vars_key}-alt-url'] = url
dest_dir = repo.sync_ana(url)
dest_dir = dest_dir[4 + dest_dir.find('/srv'):5 + dest_dir.find('free')]
# form .condarc channel entry. Note that conda adds
# the corresponding 'noarch' channel automatically.
channel = f' - http://{{{{ host_ip.stdout }}}}{dest_dir}'
if channel not in self.sw_vars['ana_powerup_repo_channels']:
self.sw_vars['ana_powerup_repo_channels'].append(channel)
noarch_url = os.path.split(url.rstrip('/'))[0] + '/noarch/'
rejlist = ','.join(self.pkgs['anaconda_free_pkgs']['reject_list'])
repo.sync_ana(noarch_url, rejlist=rejlist)
# Setup Anaconda Main Repo. (not a YUM repo)
repo_id = 'anaconda'
repo_name = 'Anaconda Main Repository'
baseurl = 'https://repo.continuum.io/pkgs/main/linux-ppc64le/'
heading1(f'Set up {repo_name}\n')
vars_key = get_name_dir(repo_name) # format the name
if f'{vars_key}-alt-url' in self.sw_vars:
alt_url = self.sw_vars[f'{vars_key}-alt-url']
else:
alt_url = None
exists = self.status_prep(which='Anaconda Main Repository')
if exists:
self.log.info('The Anaconda Repository exists already'
' in the POWER-Up server\n')
repo = PowerupAnaRepoFromRepo(repo_id, repo_name)
ch = repo.get_action(exists)
if ch in 'Y':
url = repo.get_repo_url(baseurl, alt_url, contains=['main', 'linux',
'ppc64le'], excludes=['noarch', 'free'],
filelist=['numpy-1.15*'])
if url:
if not url == baseurl:
self.sw_vars[f'{vars_key}-alt-url'] = url
al = ','.join(self.pkgs['anaconda_main_pkgs']['accept_list'])
rl = ','.join(self.pkgs['anaconda_main_pkgs']['reject_list'])
dest_dir = repo.sync_ana(url, acclist=al)
# dest_dir = repo.sync_ana(url)
dest_dir = dest_dir[4 + dest_dir.find('/srv'):5 + dest_dir.find('main')]
# form .condarc channel entry. Note that conda adds
# the corresponding 'noarch' channel automatically.
channel = f' - http://{{{{ host_ip.stdout }}}}{dest_dir}'
if channel not in self.sw_vars['ana_powerup_repo_channels']:
self.sw_vars['ana_powerup_repo_channels'].insert(0, channel)
noarch_url = os.path.split(url.rstrip('/'))[0] + '/noarch/'
repo.sync_ana(noarch_url, rejlist=rl)
# Setup Anaconda conda-forge Repo. (not a YUM repo)
repo_id = 'anaconda'
repo_name = 'Conda-forge noarch Repository'
baseurl = 'https://conda.anaconda.org/conda-forge/noarch/'
heading1(f'Set up {repo_name}\n')
vars_key = get_name_dir(repo_name) # format the name
if f'{vars_key}-alt-url' in self.sw_vars:
alt_url = self.sw_vars[f'{vars_key}-alt-url']
else:
alt_url = None
exists = self.status_prep(which='Conda-forge Repository')
if exists:
self.log.info('The Conda-forge Repository exists already'
' in the POWER-Up server\n')
repo = PowerupAnaRepoFromRepo(repo_id, repo_name)
ch = repo.get_action(exists)
if ch in 'Y':
url = repo.get_repo_url(baseurl, alt_url, contains=['noarch'],
excludes=['main'],
filelist=['configparser-3.5*'])
if url:
if not url == baseurl:
self.sw_vars[f'{vars_key}-alt-url'] = url
al = ','.join(self.pkgs['conda_forge_noarch_pkgs']['accept_list'])
dest_dir = repo.sync_ana(url, acclist=al)
dest_dir = dest_dir[4 + dest_dir.find('/srv'):7 + dest_dir.find('noarch')]
# form .condarc channel entry. Note that conda adds
# the corresponding 'noarch' channel automatically.
channel = f' - http://{{{{ host_ip.stdout }}}}{dest_dir}'
if channel not in self.sw_vars['ana_powerup_repo_channels']:
self.sw_vars['ana_powerup_repo_channels'].insert(0, channel)
# Setup Python package repository. (pypi)
repo_id = 'pypi'
repo_name = 'Python Package'
baseurl = 'https://pypi.org'
heading1(f'Set up {repo_name} repository\n')
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
exists = self.status_prep(which='Python Package Repository')
if exists:
self.log.info('The Python Package Repository exists already'
' in the POWER-Up server')
repo = PowerupPypiRepoFromRepo(repo_id, repo_name)
ch = repo.get_action(exists, exists_prompt_yn=True)
pkg_list = ' '.join(self.pkgs['python_pkgs'])
if not exists or ch == 'Y':
pkg_list = ' '.join(self.pkgs['python_pkgs'])
pkg3_list = ' '.join(self.pkgs['python3_specific_pkgs'])
url = repo.get_repo_url(baseurl, alt_url, name=repo_name,
contains=repo_id, filelist=['Flask-*'])
if url == baseurl:
repo.sync(pkg_list)
repo.sync(pkg3_list, py_ver=36)
elif url:
self.sw_vars[f'{repo_id}_alt_url'] = url
repo.sync(pkg_list, url + 'simple')
repo.sync(pkg3_list, url + 'simple', py_ver=36)
# Setup EPEL Repo
repo_id = 'epel-ppc64le'
repo_name = 'Extra Packages for Enterprise Linux 7 (EPEL) - ppc64le'
baseurl = 'https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=ppc64le'
gpgkey = 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7'
heading1(f'Set up {repo_name} repository')
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
exists = self.status_prep(which='EPEL Repository')
if exists:
self.log.info('The EPEL Repository exists already'
' in the POWER-Up server')
repo = PowerupYumRepoFromRepo(repo_id, repo_name)
ch = repo.get_action(exists)
if ch in 'Y':
url = repo.get_repo_url(baseurl, alt_url, contains=[repo_id],
filelist=['epel-release-*'])
if url:
if not url == baseurl:
self.sw_vars[f'{repo_id}_alt_url'] = url
content = repo.get_yum_dotrepo_content(url, gpgkey=gpgkey)
else:
content = repo.get_yum_dotrepo_content(url, gpgkey=gpgkey,
metalink=True)
repo.write_yum_dot_repo_file(content)
if url:
repo.sync()
# recheck status after sync.
exists = self.status_prep(which='EPEL Repository')
if not exists:
repo.create_meta()
else:
repo.create_meta(update=True)
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
# Create custom repositories
heading1('Create custom repositories')
if get_yesno('Would you like to create a custom repository '):
repo_id = input('Enter a repo id (yum short name): ')
repo_name = input('Enter a repo name (Descriptive name): ')
ch, item = get_selection('Create from files in a directory\n'
'Create from an RPM file\n'
'Create from an existing repository',
'dir\nrpm\nrepo',
'Repository source? ', allow_none=True)
if ch != 'N':
if ch == 'rpm':
repo = PowerupRepoFromRpm(repo_id, repo_name)
if f'{repo_id}_src_rpm_dir' in self.sw_vars:
src_path = self.sw_vars[f'{repo_id}_src_rpm_dir']
else:
# default is to search recursively under all /home/
# directories
src_path = '/home/**/*.rpm'
rpm_path = repo.get_rpm_path(src_path)
if rpm_path:
self.sw_vars[f'{repo_id}_src_rpm_dir'] = rpm_path
repo.copy_rpm()
repodata_dir = repo.extract_rpm()
if repodata_dir:
content = repo.get_yum_dotrepo_content(
repo_dir=repodata_dir, gpgcheck=0, local=True)
else:
content = repo.get_yum_dotrepo_content(gpgcheck=0,
local=True)
repo.create_meta()
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(
repo_dir=repodata_dir, gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
else:
self.log.info('No path chosen. Skipping create custom '
'repository.')
elif ch == 'dir':
repo = PowerupRepoFromDir(repo_id, repo_name)
if f'{repo_id}_src_dir' in self.sw_vars:
src_dir = self.sw_vars[f'{repo_id}_src_dir']
else:
src_dir = None
src_dir, dest_dir = repo.copy_dirs(src_dir)
if src_dir:
self.sw_vars[f'{repo_id}_src_dir'] = src_dir
repo.create_meta()
content = repo.get_yum_dotrepo_content(gpgcheck=0,
local=True)
repo.write_yum_dot_repo_file(content)
content = repo.get_yum_dotrepo_content(gpgcheck=0,
client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
elif ch == 'repo':
baseurl = 'http://'
if f'{repo_id}_alt_url' in self.sw_vars:
alt_url = self.sw_vars[f'{repo_id}_alt_url']
else:
alt_url = None
repo = PowerupYumRepoFromRepo(repo_id, repo_name)
new = True
if os.path.isfile(f'/etc/yum.repos.d/{repo_id}.repo') and \
os.path.exists(repo.get_repo_dir()):
new = False
url = repo.get_repo_url(baseurl)
if not url == baseurl:
self.sw_vars[f'{repo_id}_alt_url'] = url
# Set up access to the repo
content = repo.get_yum_dotrepo_content(url, gpgcheck=0)
repo.write_yum_dot_repo_file(content)
repo.sync()
if new:
repo.create_meta()
else:
repo.create_meta(update=True)
# Setup local access to the new repo copy in /srv/repo/
content = repo.get_yum_dotrepo_content(gpgcheck=0, local=True)
repo.write_yum_dot_repo_file(content)
# Prep setup of POWER-Up client access to the repo copy
content = repo.get_yum_dotrepo_content(gpgcheck=0, client=True)
filename = repo_id + '-powerup.repo'
self.sw_vars['yum_powerup_repo_files'][filename] = content
self.log.info('Repository setup complete')
# Display status
self.status_prep()
# write software-vars file. Although also done in __del__, the software
# vars files are written here in case the user is running all phases of
# install
if not os.path.exists(GEN_SOFTWARE_PATH):
os.mkdir(GEN_SOFTWARE_PATH)
if self.eval_ver:
with open(GEN_SOFTWARE_PATH + 'software-vars-eval.yml', 'w') as f:
f.write('# Do not edit this file. This file is autogenerated.\n')
with open(GEN_SOFTWARE_PATH + 'software-vars-eval.yml', 'a') as f:
yaml.dump(self.sw_vars, f, default_flow_style=False)
else:
with open(GEN_SOFTWARE_PATH + 'software-vars.yml', 'w') as f:
f.write('# Do not edit this file. This file is autogenerated.\n')
with open(GEN_SOFTWARE_PATH + 'software-vars.yml', 'a') as f:
yaml.dump(self.sw_vars, f, default_flow_style=False)
def _add_dependent_packages(self, repo_dir, dep_list):
cmd = (f'yumdownloader --archlist={self.arch} --destdir '
f'{repo_dir} {dep_list}')
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
self.log.error('An error occurred while downloading dependent packages\n'
f'rc: {rc} err: {err}')
resp = resp.splitlines()
for item in resp:
if 'No Match' in item:
self.log.error(f'Dependent packages download error. {item}')
cmd = 'yum clean packages expire-cache'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
self.log.error('An error occurred while cleaning the yum cache\n'
f'rc: {rc} err: {err}')
cmd = 'yum makecache fast'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
self.log.error('An error occurred while making the yum cache\n'
f'rc: {rc} err: {err}')
def init_clients(self):
log = logger.getlogger()
self.sw_vars['ansible_inventory'] = get_ansible_inventory()
sudo_password = None
if self.sw_vars['ansible_become_pass'] is None:
sudo_password = self._cache_sudo_pass()
else:
self._unlock_vault()
if self.eval_ver:
cmd = ('{} -i {} {}init_clients.yml --extra-vars "@{}" '
.format(get_ansible_playbook_path(),
self.sw_vars['ansible_inventory'],
GEN_SOFTWARE_PATH,
GEN_SOFTWARE_PATH + "software-vars-eval.yml"))
else:
cmd = ('{} -i {} {}init_clients.yml --extra-vars "@{}" '
.format(get_ansible_playbook_path(),
self.sw_vars['ansible_inventory'],
GEN_SOFTWARE_PATH,
GEN_SOFTWARE_PATH + "software-vars.yml"))
prompt_msg = ""
if sudo_password is not None:
cmd += f'--extra-vars "ansible_become_pass={sudo_password}" '
elif os.path.isfile(self.vault_pass_file):
cmd += '--vault-password-file ' + self.vault_pass_file
elif self.sw_vars['ansible_become_pass'] is None:
cmd += '--ask-become-pass '
prompt_msg = "\nClient password required for privilege escalation"
run = True
while run:
log.info(f"Running Ansible playbook 'init_clients.yml' ...")
print(prompt_msg)
resp, err, rc = sub_proc_exec(cmd, shell=True)
log.debug(f"cmd: {cmd}\nresp: {resp}\nerr: {err}\nrc: {rc}")
if rc != 0:
log.warning("Ansible playbook failed!")
if resp != '':
print(f"stdout:\n{ansible_pprint(resp)}\n")
if err != '':
print(f"stderr:\n{err}\n")
choice, item = get_selection(['Retry', 'Continue', 'Exit'])
if choice == "1":
pass
elif choice == "2":
run = False
elif choice == "3":
log.debug('User chooses to exit.')
sys.exit('Exiting')
else:
log.info("Ansible playbook ran successfully")
run = False
print('All done')
def _cache_sudo_pass(self):
from ansible_vault import Vault
log = logger.getlogger()
print("\nPlease provide the client sudo password below. Note: All "
"client nodes must use the same password!")
# client_sudo_pass_validated = False
ansible_become_pass = getpass(prompt="Client sudo password: ")
while not self._validate_ansible_become_pass(ansible_become_pass):
choice, item = get_selection(['Re-enter password',
'Continue without caching password',
'Exit'])
if choice == "1":
ansible_become_pass = getpass(prompt="Client sudo password: ")
elif choice == "2":
ansible_become_pass = None
break
elif choice == "3":
log.debug('User chooses to exit.')
sys.exit('Exiting')
self.vault_pass = ansible_become_pass
if ansible_become_pass is not None:
vault = Vault(self.vault_pass)
data = vault.dump(ansible_become_pass).decode(encoding='UTF-8')
self.sw_vars['ansible_become_pass'] = YAMLVault(data)
return ansible_become_pass
def _validate_ansible_become_pass(self, ansible_become_pass):
log = logger.getlogger()
print("\nValidating sudo password on all clients...")
sudo_test = f'{GEN_SOFTWARE_PATH}{self.my_name}_ansible/sudo_test.yml'
cmd = (f'{get_ansible_playbook_path()} '
f'-i {self.sw_vars["ansible_inventory"]} '
f'{GEN_SOFTWARE_PATH}{self.my_name}_ansible/run.yml '
f'--extra-vars "task_file={sudo_test}" ')
if ansible_become_pass is not None:
cmd += f'--extra-vars "ansible_become_pass={ansible_become_pass}" '
elif os.path.isfile(self.vault_pass_file):
cmd += f' --vault-password-file {self.vault_pass_file} '
cmd += f'--extra-vars "@{GEN_SOFTWARE_PATH}software-vars.yml" '
else:
cmd += ' --ask-become-pass '
resp, err, rc = sub_proc_exec(cmd, shell=True)
log.debug(f"cmd: {cmd}\nresp: {resp}\nerr: {err}\nrc: {rc}")
if rc == 0:
print(bold("Validation passed!\n"))
return True
else:
print(bold("Validation failed!"))
if resp != '':
print(f"stdout:\n{ansible_pprint(resp)}\n")
if err != '':
print(f"stderr:\n{err}\n")
return False
def _unlock_vault(self, validate=True):
log = logger.getlogger()
while True:
if self.sw_vars['ansible_become_pass'] is None:
return False
elif self.vault_pass is None:
self.vault_pass = getpass(prompt="\nClient sudo password: ")
with open(self.vault_pass_file, 'w') as vault_pass_file_out:
vault_pass_file_out.write(self.vault_pass)
os.chmod(self.vault_pass_file, 0o600)
if not validate or self._validate_ansible_become_pass(None):
return True
else:
print(bold("Cached sudo password decryption/validation fail!"))
choice, item = get_selection(['Retry Password', 'Exit'])
if choice == "1":
self.vault_pass = None
elif choice == "2":
log.debug('User chooses to exit.')
sys.exit('Exiting')
sys.exit(1)
def install(self):
print()
if self.eval_ver:
if self.lic_prep_timestamp > self.eval_prep_timestamp:
print(bold('You have requested to install the evaluation version'))
print('of PowerAI Enterprise but last ran preparation for ')
print('licensed version.')
resp = get_yesno('Continue with evaluation installation ')
if not resp:
sys.exit('Installation ended by user')
else:
if self.eval_prep_timestamp > self.lic_prep_timestamp:
print(bold('You have requested to install the licensed version'))
print('of PowerAI Enterprise but last ran preparation for ')
print('evaluation version.')
resp = get_yesno('Continue with licensed installation ')
if not resp:
sys.exit('Installation ended by user')
if self.sw_vars['ansible_inventory'] is None:
self.sw_vars['ansible_inventory'] = get_ansible_inventory()
else:
print("Validating software inventory '{}'..."
.format(self.sw_vars['ansible_inventory']))
if validate_software_inventory(self.sw_vars['ansible_inventory']):
print(bold("Validation passed!"))
else:
print(bold("Validation FAILED!"))
self.sw_vars['ansible_inventory'] = get_ansible_inventory()
self._unlock_vault()
ana_ver = re.search(r'(anaconda\d)-\d', self.sw_vars['content_files']
['anaconda'], re.IGNORECASE).group(1).lower()
_set_spectrum_conductor_install_env(self.sw_vars['ansible_inventory'],
'spark')
_set_spectrum_conductor_install_env(self.sw_vars['ansible_inventory'],
'dli', ana_ver)
install_tasks = yaml.full_load(
open(GEN_SOFTWARE_PATH + f'{self.my_name}_install_procedure.yml'))
for task in install_tasks:
heading1(f"Client Node Action: {task['description']}")
if task['description'] == "Install Anaconda installer":
_interactive_anaconda_license_accept(
self.sw_vars['ansible_inventory'],
self.sw_vars['content_files']['anaconda'])
elif (task['description'] ==
"Check PowerAI Enterprise License acceptance"):
_interactive_paie_license_accept(
self.sw_vars['ansible_inventory'])
extra_args = ''
if 'hosts' in task:
extra_args = f"--limit \'{task['hosts']},localhost\'"
self._run_ansible_tasks(task['tasks'], extra_args)
print('Done')
def _run_ansible_tasks(self, tasks_path, extra_args=''):
log = logger.getlogger()
tasks_path = f'{self.my_name}_ansible/' + tasks_path
if self.sw_vars['ansible_become_pass'] is not None:
extra_args += ' --vault-password-file ' + self.vault_pass_file
elif 'become:' in open(f'{GEN_SOFTWARE_PATH}{tasks_path}').read():
extra_args += ' --ask-become-pass'
if self.eval_ver:
cmd = (f'{get_ansible_playbook_path()} -i '
f'{self.sw_vars["ansible_inventory"]} '
f'{GEN_SOFTWARE_PATH}{self.my_name}_ansible/run.yml '
f'--extra-vars "task_file={GEN_SOFTWARE_PATH}{tasks_path}" '
f'--extra-vars "@{GEN_SOFTWARE_PATH}software-vars-eval.yml" '
f'{extra_args}')
else:
cmd = (f'{get_ansible_playbook_path()} -i '
f'{self.sw_vars["ansible_inventory"]} '
f'{GEN_SOFTWARE_PATH}{self.my_name}_ansible/run.yml '
f'--extra-vars "task_file={GEN_SOFTWARE_PATH}{tasks_path}" '
f'--extra-vars "@{GEN_SOFTWARE_PATH}software-vars.yml" '
f'{extra_args}')
run = True
while run:
log.info(f'Running Ansible tasks found in \'{tasks_path}\' ...')
if ('notify: Reboot' in
open(f'{GEN_SOFTWARE_PATH}{tasks_path}').read()):
print(bold('\nThis step requires changed systems to reboot! '
'(16 minute timeout)'))
if '--ask-become-pass' in cmd:
print('\nClient password required for privilege escalation')
elif '--vault-password-file' in cmd:
self._unlock_vault(validate=False)
resp, err, rc = sub_proc_exec(cmd, shell=True)
log.debug(f"cmd: {cmd}\nresp: {resp}\nerr: {err}\nrc: {rc}")
print("") # line break
# If .vault file is missing a retry should work
if rc != 0 and '.vault was not found' in err:
log.warning("Vault file missing, retrying...")
elif rc != 0:
log.warning("Ansible tasks failed!")
if resp != '':
print(f"stdout:\n{ansible_pprint(resp)}\n")
if err != '':
print(f"stderr:\n{err}\n")
choice, item = get_selection(['Retry', 'Continue', 'Exit'])
if choice == "1":
pass
elif choice == "2":
run = False
elif choice == "3":
log.debug('User chooses to exit.')
sys.exit('Exiting')
else:
log.info("Ansible tasks ran successfully")
run = False
return rc
def _interactive_anaconda_license_accept(ansible_inventory, ana_path):
log = logger.getlogger()
cmd = (f'ansible-inventory --inventory {ansible_inventory} --list')
resp, err, rc = sub_proc_exec(cmd, shell=True)
inv = json.loads(resp)
hostname, hostvars = inv['_meta']['hostvars'].popitem()
ip = re.search(r'(Anaconda\d)-\d+.\d+.\d+', ana_path, re.IGNORECASE).group(1)
ip = f'/opt/{ip}/'.lower()
base_cmd = f'ssh -t {hostvars["ansible_user"]}@{hostname} '
if "ansible_ssh_private_key_file" in hostvars:
base_cmd += f'-i {hostvars["ansible_ssh_private_key_file"]} '
if "ansible_ssh_common_args" in hostvars:
base_cmd += f'{hostvars["ansible_ssh_common_args"]} '
cmd = base_cmd + f' ls {ip}'
resp, err, rc = sub_proc_exec(cmd)
# If install directory already exists assume license has been accepted
if rc == 0:
print(f'Anaconda license already accepted on {hostname}')
else:
print(bold('Manual Anaconda license acceptance required on at least '
'one client!'))
rlinput(f'Press Enter to run interactively on {hostname}')
fn = os.path.basename(ana_path)
cmd = f'{base_cmd} sudo ~/{fn} -p {ip}'
rc = sub_proc_display(cmd)
if rc == 0:
print('\nLicense accepted. Acceptance script will be run quietly '
'on remaining servers.')
else:
log.error("Anaconda license acceptance required to continue!")
sys.exit('Exiting')
return rc
def _interactive_paie_license_accept(ansible_inventory):
log = logger.getlogger()
cmd = (f'ansible-inventory --inventory {ansible_inventory} --list')
resp, err, rc = sub_proc_exec(cmd, shell=True)
inv = json.loads(resp)
accept_cmd = ('sudo /opt/DL/powerai-enterprise/license/bin/'
'accept-powerai-enterprise-license.sh ')
check_cmd = ('/opt/DL/powerai-enterprise/license/bin/'
'check-powerai-enterprise-license.sh ')
print(bold('Acceptance of the PowerAI Enterprise license is required on '
'all nodes in the cluster.'))
rlinput(f'Press Enter to run interactively on each hosts')
for hostname, hostvars in inv['_meta']['hostvars'].items():
base_cmd = f'ssh -t {hostvars["ansible_user"]}@{hostname} '
if "ansible_ssh_common_args" in hostvars:
base_cmd += f'{hostvars["ansible_ssh_common_args"]} '
if "ansible_ssh_private_key_file" in hostvars:
base_cmd += f'-i {hostvars["ansible_ssh_private_key_file"]} '
cmd = base_cmd + check_cmd
resp, err, rc = sub_proc_exec(cmd)
if rc == 0:
print(bold('PowerAI Enterprise license already accepted on '
f'{hostname}'))
else:
run = True
while run:
print(bold('\nRunning PowerAI Enterprise license script on '
f'{hostname}'))
cmd = base_cmd + accept_cmd
rc = sub_proc_display(cmd)
if rc == 0:
print(f'\nLicense accepted on {hostname}.')
run = False
else:
print(f'\nWARNING: License not accepted on {hostname}!')
choice, item = get_selection(['Retry', 'Continue', 'Exit'])
if choice == "1":
pass
elif choice == "2":
run = False
elif choice == "3":
log.debug('User chooses to exit.')
sys.exit('Exiting')
def _set_spectrum_conductor_install_env(ansible_inventory, package, ana_ver=None):
mod_name = sys.modules[__name__].__name__
cmd = (f'ansible-inventory --inventory {ansible_inventory} --list')
resp, err, rc = sub_proc_exec(cmd, shell=True)
inv = json.loads(resp)
hostname, hostvars = inv['_meta']['hostvars'].popitem()
if package == 'spark':
envs_path = (f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor.yml')
if not os.path.isfile(envs_path):
copy2(f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor_template.yml',
f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor.yml')
replace_regex(envs_path, r'^CLUSTERADMIN:\s*$',
f'CLUSTERADMIN: {hostvars["ansible_user"]}\n')
elif package == 'dli':
envs_path = (f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor_dli.yml')
if not os.path.isfile(envs_path):
copy2(f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor_dli_template.yml',
f'{GEN_SOFTWARE_PATH}/{mod_name}_ansible/'
'envs_spectrum_conductor_dli.yml')
replace_regex(envs_path, r'^CLUSTERADMIN:\s*$',
f'CLUSTERADMIN: {hostvars["ansible_user"]}\n')
replace_regex(envs_path, r'^DLI_CONDA_HOME:\s*$',
f'DLI_CONDA_HOME: /opt/{ana_ver}\n')
env_validated = False
init = True
while not env_validated:
try:
for key, value in yaml.full_load(open(envs_path)).items():
if value is None:
break
else:
env_validated = True
except IOError:
print(f'Failed to load Spectrum Conductor {package} configuration')
if not env_validated:
print(f'\nSpectrum Conductor {package} configuration variables '
'incomplete!')
input(f'Press enter to edit {package} configuration file')
click.edit(filename=envs_path)
elif init and get_yesno(f'Edit Spectrum Conductor {package} '
'configuration? '):
click.edit(filename=envs_path)
init = False
user_name = os.getlogin()
if os.getuid() == 0 and user_name != 'root':
user_uid = pwd.getpwnam(user_name).pw_uid
user_gid = grp.getgrnam(user_name).gr_gid
os.chown(envs_path, user_uid, user_gid)
os.chmod(envs_path, 0o644)
print(f'Spectrum Conductor {package} configuration variables successfully '
'loaded\n')
class YAMLVault(yaml.YAMLObject):
yaml_tag = u'!vault'
def __init__(self, ansible_become_pass):
self.ansible_become_pass = ansible_become_pass
@classmethod
def from_yaml(cls, loader, node):
return YAMLVault(node.value)
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_scalar(cls.yaml_tag, data.ansible_become_pass)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['prep', 'install'],
help='Action to take: prep or install')
parser.add_argument('--print', '-p', dest='log_lvl_print',
help='print log level', default='info')
parser.add_argument('--file', '-f', dest='log_lvl_file',
help='file log level', default='info')
args = parser.parse_args()
logger.create(args.log_lvl_print, args.log_lvl_file)
soft = software()
if args.action == 'prep':
soft.prep()
elif args.action == 'install':
soft.install()
| apache-2.0 | 9,169,767,854,707,613,000 | 43.793229 | 100 | 0.513322 | false |
fabiand/anaconda | pyanaconda/timezone.py | 3 | 6441 | #
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <[email protected]>
#
"""
Module providing functions for getting the list of timezones, writing timezone
configuration, valid timezones recognition etc.
"""
import os
import pytz
import langtable
from collections import OrderedDict
from pyanaconda import iutil
from pyanaconda.constants import THREAD_STORAGE
from pyanaconda.flags import flags
from pyanaconda.threads import threadMgr
from blivet import arch
import logging
log = logging.getLogger("anaconda")
# The following zones are not in pytz.common_timezones and
# Etc category in pytz.all_timezones includes some more,
# however confusing ones (like UCT, GMT+0, GMT-0,...)
ETC_ZONES = ['GMT+1', 'GMT+2', 'GMT+3', 'GMT+4', 'GMT+5', 'GMT+6', 'GMT+7',
'GMT+8', 'GMT+9', 'GMT+10', 'GMT+11', 'GMT+12',
'GMT-1', 'GMT-2', 'GMT-3', 'GMT-4', 'GMT-5', 'GMT-6', 'GMT-7',
'GMT-8', 'GMT-9', 'GMT-10', 'GMT-11', 'GMT-12', 'GMT-13',
'GMT-14', 'UTC', 'GMT']
NTP_PACKAGE = "chrony"
NTP_SERVICE = "chronyd"
class TimezoneConfigError(Exception):
"""Exception class for timezone configuration related problems"""
pass
def time_initialize(timezone, storage, bootloader):
"""
Try to guess if RTC uses UTC time or not, set timezone.isUtc properly and
set system time from RTC using the UTC guess.
Guess is done by searching for bootable ntfs devices.
:param timezone: ksdata.timezone object
:param storage: blivet.Blivet instance
:param bootloader: bootloader.Bootloader instance
"""
if arch.isS390():
# nothing to do on s390(x) were hwclock doesn't exist
return
if not timezone.isUtc and not flags.automatedInstall:
# if set in the kickstart, no magic needed here
threadMgr.wait(THREAD_STORAGE)
ntfs_devs = filter(lambda dev: dev.format.name == "ntfs",
storage.devices)
timezone.isUtc = not bootloader.has_windows(ntfs_devs)
cmd = "hwclock"
args = ["--hctosys"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--localtime")
iutil.execWithRedirect(cmd, args)
def write_timezone_config(timezone, root):
"""
Write timezone configuration for the system specified by root.
:param timezone: ksdata.timezone object
:param root: path to the root
:raise: TimezoneConfigError
"""
# we want to create a relative symlink
tz_file = "/usr/share/zoneinfo/" + timezone.timezone
rooted_tz_file = os.path.normpath(root + tz_file)
relative_path = os.path.normpath("../" + tz_file)
link_path = os.path.normpath(root + "/etc/localtime")
if not os.access(rooted_tz_file, os.R_OK):
log.error("Timezone to be linked (%s) doesn't exist", rooted_tz_file)
else:
try:
# os.symlink fails if link_path exists, so try to remove it first
os.remove(link_path)
except OSError:
pass
try:
os.symlink(relative_path, link_path)
except OSError as oserr:
log.error("Error when symlinking timezone (from %s): %s",
rooted_tz_file, oserr.strerror)
if arch.isS390():
# there is no HW clock on s390(x)
return
try:
fobj = open(os.path.normpath(root + "/etc/adjtime"), "r")
lines = fobj.readlines()
fobj.close()
except IOError:
lines = [ "0.0 0 0.0\n", "0\n" ]
try:
with open(os.path.normpath(root + "/etc/adjtime"), "w") as fobj:
fobj.write(lines[0])
fobj.write(lines[1])
if timezone.isUtc:
fobj.write("UTC\n")
else:
fobj.write("LOCAL\n")
except IOError as ioerr:
msg = "Error while writing /etc/adjtime file: %s" % ioerr.strerror
raise TimezoneConfigError(msg)
def save_hw_clock(timezone):
"""
Save system time to HW clock.
:param timezone: ksdata.timezone object
"""
if arch.isS390():
return
cmd = "hwclock"
args = ["--systohc"]
if timezone.isUtc:
args.append("--utc")
else:
args.append("--local")
iutil.execWithRedirect(cmd, args)
def get_preferred_timezone(territory):
"""
Get the preferred timezone for a given territory. Note that this function
simply returns the first timezone in the list of timezones for a given
territory.
:param territory: territory to get preferred timezone for
:type territory: str
:return: preferred timezone for the given territory or None if no found
:rtype: str or None
"""
timezones = langtable.list_timezones(territoryId=territory)
if not timezones:
return None
return timezones[0]
def get_all_regions_and_timezones():
"""
Get a dictionary mapping the regions to the list of their timezones.
:rtype: dict
"""
result = OrderedDict()
for tz in pytz.common_timezones:
parts = tz.split("/", 1)
if len(parts) > 1:
if parts[0] not in result:
result[parts[0]] = set()
result[parts[0]].add(parts[1])
result["Etc"] = set(ETC_ZONES)
return result
def is_valid_timezone(timezone):
"""
Check if a given string is an existing timezone.
:type timezone: str
:rtype: bool
"""
etc_zones = ["Etc/" + zone for zone in ETC_ZONES]
return timezone in pytz.common_timezones + etc_zones
| gpl-2.0 | 5,350,825,474,400,793,000 | 28.682028 | 78 | 0.643844 | false |
d-plaindoux/fluent-rest | fluent_rest/runtime/filter.py | 1 | 3865 | # Copyright (C)2016 D. Plaindoux.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option) any
# later version.
import inspect
from fluent_rest.spec import rest
from fluent_rest.spec.path import Path
class ProviderInstance:
"""
A Provider instance is a provider generator ready to use
"""
def __init__(self, specification):
self.specification = specification
def execute(self, data):
callback = self.specification
if 'self' in inspect.getargspec(self.specification)[0]:
if not self.specification.im_self:
# TODO(didier) find a better solution for unbound methods
instance = self.specification.im_class()
callback = lambda d: self.specification.__func__(instance, d)
return callback(data)
class ResourceActionInstance:
"""
A specification instance denotes a selected specification with a given
set of bound variables generated by the path matching operation
"""
def __init__(self, specification, variables):
self.specification = specification
self.variables = variables
def execute(self, data):
parameters = {}
callback = self.specification
for name in inspect.getargspec(self.specification)[0]:
if name == 'self':
# TODO(didier) find a better solution for unbound methods
if not self.specification.im_self:
parameters['self'] = self.specification.im_class()
callback = self.specification.__func__
elif name == 'data':
parameters['data'] = data
else:
parameters[name] = self.variables(name)
return callback(**parameters)
class SpecificationFilter:
"""
A Specification filter is able to select a function using its rest
specification and a bridged request.
"""
def __init__(self, specification):
self.specification = specification
def filterProvider(self, response):
"""
Method called when a response must be managed transparently using
providers
"""
spec = rest.specification(self.specification)
if spec.hasProvider() and isinstance(response, spec.getProvider()):
return ProviderInstance(self.specification)
else:
return None
def filterRequest(self, request):
"""
Method called when the filter must be performed using a given request.
If the request respects the specification it returns a set of bound
variables defined in the path. Otherwise it returns None.
"""
spec = rest.specification(self.specification)
if spec.hasGivenVerb(request.verb()) is False:
return None
if spec.hasGivenConsumes(request.consumes()) is False:
return None
if spec.hasGivenProduces(request.produces()) is False:
return None
env = Path.parse(spec.getPath()).accept(request.path())
if env:
return ResourceActionInstance(self.specification, env)
else:
return None
def filterResponse(self, request):
"""
Method called when the filter must be performed using a given request.
If the request respects the specification it returns the function for
the response transformation
"""
spec = rest.specification(self.specification)
if spec.hasGivenProduces(request.produces()) is False:
return None
return spec.getGivenProduces(request.produces())
def __str__(self):
return str(rest.specification(self.specification))
| lgpl-2.1 | 4,175,094,459,938,144,000 | 30.942149 | 78 | 0.63881 | false |
tchernomax/ansible | lib/ansible/module_utils/ec2.py | 14 | 28070 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto3
import botocore
HAS_BOTO3 = True
except:
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
not_found = re.compile(r'^\w+.NotFound')
return response_code in retry_on or not_found.search(response_code)
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, botocore.exceptions.NoCredentialsError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
if params.get('config'):
config = params.pop('config')
config.user_agent_extra = 'Ansible/{0}'.format(__version__)
else:
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = module.params.get('region')
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not region:
if 'AWS_REGION' in os.environ:
region = os.environ['AWS_REGION']
elif 'AWS_DEFAULT_REGION' in os.environ:
region = os.environ['AWS_DEFAULT_REGION']
elif 'EC2_REGION' in os.environ:
region = os.environ['EC2_REGION']
else:
if not boto3:
if HAS_BOTO:
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if not region:
region = boto.config.get('Boto', 'ec2_region')
else:
module.fail_json(msg="boto is required for this module. Please install boto and try again")
elif HAS_BOTO3:
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
region = botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
pass
else:
module.fail_json(msg="Boto3 is required for this module. Please install boto3 and try again")
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [(to_text(policy))]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset
| gpl-3.0 | 6,600,999,521,297,061,000 | 37.986111 | 152 | 0.611649 | false |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/autobahntestsuite/case/case1_2_2.py | 14 | 1337 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case1_2_2(Case):
DESCRIPTION = """Send binary message message with payload of length 125."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent). Clean close with normal code."""
def onOpen(self):
payload = "\xfe" * 125
self.expected[Case.OK] = [("message", payload, True)]
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendFrame(opcode = 2, payload = payload)
self.p.killAfter(1)
| apache-2.0 | -8,320,442,274,870,681,000 | 39.78125 | 112 | 0.600598 | false |
tensorflow/ngraph-bridge | test/python/test_cast.py | 1 | 1774 | # ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""nGraph TensorFlow bridge cast operation test
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from common import NgraphTest
class TestCastOperations(NgraphTest):
def test_cast_1d(self):
val = tf.compat.v1.placeholder(tf.float32, shape=(2,))
out = tf.cast(val, dtype=tf.int32)
def run_test(sess):
return sess.run(out, feed_dict={val: (5.5, 2.0)})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
def test_cast_2d(self):
test_input = ((1.5, 2.5, 3.5), (4.5, 5.5, 6.5))
val = tf.compat.v1.placeholder(tf.float32, shape=(2, 3))
out = tf.cast(val, dtype=tf.int32)
def run_test(sess):
return sess.run(out, feed_dict={val: test_input})
assert (
self.with_ngraph(run_test) == self.without_ngraph(run_test)).all()
| apache-2.0 | 8,793,015,199,251,692,000 | 33.115385 | 80 | 0.607666 | false |
grigorisg9gr/menpo3d | menpo3d/rasterize/base.py | 3 | 3482 | def rasterize_mesh_from_barycentric_coordinate_images(mesh, bcoords_image,
tri_indices_image):
r"""
Renders an image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Note that the texture is rendered without any lighting model - think of
this as a piecewise affine warp of the mesh's texture into the image (
with z-buffering). As there is no lighting model, only meshes with
colour/texture can be used with this method (a single color for the whole
mesh would render flat with no shading).
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
colours = mesh.sample_texture_with_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T,
tri_indices_image.as_vector())
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(colours.T,
n_channels=mesh.n_channels)
def rasterize_shape_image_from_barycentric_coordinate_images(mesh,
bcoords_image,
tri_indices_image):
r"""
Renders an XYZ shape image of a `menpo.shape.TexturedTriMesh` or
`menpo.shape.ColouredTriMesh` from a barycentric coordinate image pair.
Parameters
----------
mesh : `menpo.shape.TexturedTriMesh` or `menpo.shape.ColouredTriMesh`
The 3D mesh who's texture will be rendered to the image.
bcoords_image : `menpo.image.MaskedImage`
The per-triangle barycentric coordinates for what should be rendered
into each pixel. See :map:`rasterize_barycentric_coordinate_images`.
tri_indices_image : `menpo.image.MaskedImage`
The triangle index identifying the triangle that is visable at a pixel
after z-buffering. See :map:`rasterize_barycentric_coordinate_images`.
Returns
-------
`menpo.image.MaskedImage`
A rasterized shape image image of the mesh.
"""
# Sample the mesh texture space to find the colors-per pixel
shape_per_pixel = mesh.project_barycentric_coordinates(
bcoords_image.as_vector(keep_channels=True).T,
tri_indices_image.as_vector())
# Rebuild the image using the usual from_vector machinery
return tri_indices_image.from_vector(shape_per_pixel.points.T,
n_channels=mesh.n_channels)
def rasterize_mesh(mesh_in_img, image_shape):
from .cpu import rasterize_barycentric_coordinate_images
bcs = rasterize_barycentric_coordinate_images(mesh_in_img, image_shape)
return rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
| bsd-3-clause | 5,081,503,443,686,180,000 | 46.054054 | 80 | 0.664561 | false |
stimpsonsg/moose | python/MooseDocs/html2latex/MooseExtension.py | 4 | 1500 | from Extension import Extension
from moose_elements import *
class MooseExtension(Extension):
"""
Aggregates the MOOSE specific element objects into an extension for html to latex conversion.
"""
def extend(self, translator):
config = self.getConfigs()
translator.elements.add('moose_internal_links', moose_internal_links(), '<a')
translator.elements.add('moose_external_links', moose_markdown_links(site=config['site']), '<a')
translator.elements.add('moose_inline_code', moose_inline_code(), '<code')
translator.elements.add('moose_bib', moose_bib(), '<ol')
translator.elements.add('moose_bib_span', moose_bib_span(), '<moose_internal_links')
translator.elements.add('moose_slider', moose_slider(), '_begin')
translator.elements.add('moose_buildstatus', moose_buildstatus(), '_begin')
translator.elements.add('admonition_div', admonition_div(), '<div')
translator.elements.add('moose_code_div', moose_code_div(), '_begin')
translator.elements.add('moose_pre_code', moose_pre_code(), '<pre_code')
translator.elements.add('moose_pre', moose_pre(), '<pre')
translator.elements.add('moose_table', moose_table(), '<table')
translator.elements.add('moose_img', moose_img(), '<img')
translator.elements.add('moose_diagram', moose_diagram(), '<moose_img')
if not config['hrule']:
translator.elements.add('moose_hide_hr', moose_hide_hr(), '<hr')
| lgpl-2.1 | 3,872,035,656,216,660,000 | 50.724138 | 104 | 0.657333 | false |
ramon-astudillo/lxmls-toolkit | lxmls/sequences/bak/forward_backward_2.py | 3 | 2093 | import numpy as np
# ----------
# Computes the forward backward trellis for a given sequence
# N - Lenght of sequence
# H - Number of hidden states
# Receives:
# Node potentials (N,H) vector
# Edge potentials (N-1,H,H)
# ----------
def forward_backward(node_potentials, edge_potentials):
H, N = node_potentials.shape
forward = -1000.0 * np.ones([H, N], dtype=float)
backward = -1000.0 * np.ones([H, N], dtype=float)
forward[:, 0] = np.log(node_potentials[:, 0])
# Forward loop
for pos in xrange(1, N):
for current_state in xrange(H):
for prev_state in xrange(H):
forward_v = forward[prev_state, pos-1]
trans_v = np.log(edge_potentials[prev_state, current_state, pos-1])
logprob = forward_v + trans_v
forward[current_state, pos] = np.logaddexp(forward[current_state, pos], logprob)
forward[current_state, pos] += np.log(node_potentials[current_state, pos])
# Backward loop
backward[:, N-1] = 0.0 # log(1) = 0
for pos in xrange(N-2, -1, -1):
for current_state in xrange(H):
logprob = -1000.0
for next_state in xrange(H):
back = backward[next_state, pos+1]
trans = np.log(edge_potentials[current_state, next_state, pos])
observation = np.log(node_potentials[next_state, pos+1])
logprob = np.logaddexp(logprob, trans + observation + back)
backward[current_state, pos] = logprob
# sanity_check_forward_backward(forward,backward)
# print forward, backward
return np.exp(forward), np.exp(backward)
# ----------
# For every position - pos the sum_states forward(pos,state)*backward(pos,state) = Likelihood
# ----------
def sanity_check_forward_backward(forward, backward):
H, N = forward.shape
likelihood = np.zeros([N, 1])
for pos in xrange(N):
aux = 0
for current_state in xrange(H):
aux += forward[current_state, pos] * backward[current_state, pos]
likelihood[pos] = aux
return likelihood
| mit | -145,053,589,913,879,680 | 38.490566 | 96 | 0.599618 | false |
sloria/modular-odm | tests/queries/test_logical_operators.py | 4 | 1811 | from modularodm import fields, StoredObject
from modularodm.query.querydialect import DefaultQueryDialect as Q
from tests.base import ModularOdmTestCase
class LogicalOperatorsBase(ModularOdmTestCase):
def define_objects(self):
class Foo(StoredObject):
_id = fields.IntegerField(required=True, primary=True)
a = fields.IntegerField()
b = fields.IntegerField()
return Foo,
def set_up_objects(self):
self.foos = []
for idx, f in enumerate([(a, b) for a in range(3) for b in range(3)]):
self.foos.append(
self.Foo(
_id = idx,
a = f[0],
b = f[1],
)
)
[x.save() for x in self.foos]
def test_and(self):
"""Find the intersection of two or more queries."""
result = self.Foo.find(Q('a', 'eq', 0) & Q('b', 'eq', 1))
self.assertEqual(
len(result),
1,
)
self.assertEqual(result[0].a, 0)
self.assertEqual(result[0].b, 1)
def test_or(self):
"""Find the union of two or more queries."""
result = self.Foo.find(Q('a', 'eq', 0) | Q('a', 'eq', 1))
self.assertEqual(
len(result),
6,
)
def test_not(self):
"""Find the inverse of a query."""
result = self.Foo.find(~Q('a', 'eq', 0))
self.assertEqual(
len(result),
6,
)
def test_and_or(self):
"""Join multiple OR queries with an AND.
"""
result = self.Foo.find(
(Q('a', 'eq', 0) | Q('a', 'eq', 1))
& (Q('b', 'eq', 1) | Q('b', 'eq', 2))
)
self.assertEqual(
len(result),
4,
)
| apache-2.0 | 1,969,003,466,511,118,000 | 26.029851 | 78 | 0.478189 | false |
nilathak/gentoo-cruft | pylon/ui.py | 1 | 4929 | '''
provides a ui class which contains all user interface specific code.
- a logger based on the logging module
- a default parser based on the argparse module
- a custom exception handler to control traceback output (eg, during multithreading)
- a few stub functions for overloading in a more specific ui class
'''
import argparse
import logging
import sys
class ui(object):
'nice command line user interface class used by pylon based scripts'
EXT_INFO = logging.INFO - 1
@property
def args(self):
return self._args
@property
def formatter(self):
return self._formatter
@property
def logger(self):
return self._logger
@property
def owner(self):
return self._owner
@property
def parser(self):
return self._parser
def __init__(self, owner):
self.__dict__.update({'_'+k:v for k,v in locals().items() if k != 'self'})
# Logger
########################################
# define additional logging level for a better verbosity granularity
logging.addLevelName(ui.EXT_INFO, 'INFO')
# set logger name to class name
self._logger = logging.getLogger(self.owner.__class__.__name__)
# define format of logger output
fmt_str = '### %(name)s(%(asctime)s) %(levelname)s: %(message)s'
self._formatter = {}
self.formatter['default'] = logging.Formatter(fmt_str)
self.formatter['threaded'] = logging.Formatter('%(threadName)s: ' + fmt_str)
# add default handler for logging on stdout
self._handler = {}
self._handler['stdout'] = logging.StreamHandler(sys.stdout)
self._handler['stdout'].setFormatter(self.formatter['default'])
self.logger.addHandler(self._handler['stdout'])
# Argument Parser
########################################
# take any existing class doc string from our owner and set it as description
self._parser = argparse.ArgumentParser(description=self.owner.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
# define the common basic set of arguments
self.parser.add_argument('--dry_run', action='store_true',
help='switch to passive behavior (no subprocess execution)')
self.parser.add_argument('-q', action='count', dest='quiet', default=0,
help='quiet output (multiply for more silence)')
self.parser.add_argument('--traceback', action='store_true',
help='enable python traceback for debugging purposes')
self.parser.add_argument('-v', action='count', dest='verbosity', default=0,
help='verbose output (multiply for more verbosity)')
def setup(self):
self._args = self.parser.parse_args()
# determine default verbosity behavior
l = logging.INFO
if self.args.verbosity > 1 or self.args.dry_run or self.args.traceback:
l = logging.DEBUG
elif self.args.verbosity > 0:
l = ui.EXT_INFO
# quiet switch takes precedence
if self.args.quiet > 1:
l = logging.ERROR
elif self.args.quiet > 0:
l = logging.WARNING
self.logger.setLevel(l)
def cleanup(self):
'stub for basic cleanup stuff'
pass
def handle_exception_gracefully(self, et):
'returns True if an exception should NOT be thrown at python interpreter'
return (
not self.args.traceback or
# catch only objects deriving from Exception. Omit trivial
# things like KeyboardInterrupt (derives from BaseException)
not issubclass(et, Exception)
)
def excepthook(self, et, ei, tb):
'pipe exceptions to logger, control traceback display. default exception handler will be replaced by this function'
# switch to a more passive exception handling mechanism if
# other threads are still active
origin = 'default'
if len(self.owner.jobs) > 0:
origin = 'thread'
if self.handle_exception_gracefully(et):
self.error(repr(et) + ' ' + str(ei))
if origin == 'default':
self.cleanup()
# generate error != 0
sys.exit(1)
else:
if origin == 'thread':
self.logger.exception('Traceback')
else:
# avoid losing any traceback info
sys.__excepthook__(et, ei, tb)
# logging level wrapper functions
def debug(self, msg):
self.logger.debug(msg)
def error(self, msg):
self.logger.error(msg)
def ext_info(self, msg):
self.logger.log(ui.EXT_INFO, msg)
def info(self, msg):
self.logger.info(msg)
def warning(self, msg):
self.logger.warning(msg)
| gpl-2.0 | 5,971,663,844,420,374,000 | 34.978102 | 132 | 0.592209 | false |
cloudfoundry/php-buildpack | lib/httpd/extension.py | 8 | 1638 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def preprocess_commands(ctx):
return ((
'$HOME/.bp/bin/rewrite',
'"$HOME/httpd/conf"'),)
def service_commands(ctx):
return {
'httpd': (
'$HOME/httpd/bin/apachectl',
'-f "$HOME/httpd/conf/httpd.conf"',
'-k start',
'-DFOREGROUND')
}
def service_environment(ctx):
return {
'HTTPD_SERVER_ADMIN': ctx['ADMIN_EMAIL']
}
def compile(install):
print 'Installing HTTPD'
print 'HTTPD %s' % (install.builder._ctx['HTTPD_VERSION'])
install.builder._ctx['PHP_FPM_LISTEN'] = '127.0.0.1:9000'
(install
.package('HTTPD')
.config()
.from_application('.bp-config/httpd') # noqa
.or_from_build_pack('defaults/config/httpd')
.to('httpd/conf')
.rewrite()
.done())
return 0
| apache-2.0 | 7,133,047,946,178,950,000 | 30.5 | 74 | 0.643468 | false |
fqul/scrapy | tests/test_dupefilters.py | 15 | 1661 | import hashlib
import unittest
from scrapy.dupefilters import RFPDupeFilter
from scrapy.http import Request
from scrapy.utils.python import to_bytes
class RFPDupeFilterTest(unittest.TestCase):
def test_filter(self):
dupefilter = RFPDupeFilter()
dupefilter.open()
r1 = Request('http://scrapytest.org/1')
r2 = Request('http://scrapytest.org/2')
r3 = Request('http://scrapytest.org/2')
assert not dupefilter.request_seen(r1)
assert dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
assert dupefilter.request_seen(r3)
dupefilter.close('finished')
def test_request_fingerprint(self):
"""Test if customization of request_fingerprint method will change
output of request_seen.
"""
r1 = Request('http://scrapytest.org/index.html')
r2 = Request('http://scrapytest.org/INDEX.html')
dupefilter = RFPDupeFilter()
dupefilter.open()
assert not dupefilter.request_seen(r1)
assert not dupefilter.request_seen(r2)
dupefilter.close('finished')
class CaseInsensitiveRFPDupeFilter(RFPDupeFilter):
def request_fingerprint(self, request):
fp = hashlib.sha1()
fp.update(to_bytes(request.url.lower()))
return fp.hexdigest()
case_insensitive_dupefilter = CaseInsensitiveRFPDupeFilter()
case_insensitive_dupefilter.open()
assert not case_insensitive_dupefilter.request_seen(r1)
assert case_insensitive_dupefilter.request_seen(r2)
case_insensitive_dupefilter.close('finished')
| bsd-3-clause | 9,171,064,577,340,593,000 | 28.660714 | 74 | 0.657435 | false |
uclouvain/OSIS-Louvain | base/views/education_groups/achievement/common.py | 1 | 3445 | ############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.functional import cached_property
from django.views.generic.detail import SingleObjectMixin
from base.models.education_group_achievement import EducationGroupAchievement
from base.models.education_group_detailed_achievement import EducationGroupDetailedAchievement
from base.models.education_group_year import EducationGroupYear
from base.views.mixins import RulesRequiredMixin
class EducationGroupAchievementMixin(RulesRequiredMixin, SingleObjectMixin):
# SingleObjectMixin
model = EducationGroupAchievement
context_object_name = "education_group_achievement"
pk_url_kwarg = 'education_group_achievement_pk'
def get_success_url(self):
# Redirect to a page fragment
url = reverse(
"education_group_skills_achievements",
args=[
self.kwargs['root_id'],
self.kwargs['education_group_year_id'],
]
)
obj = getattr(self, "object", None) or self.get_object()
if obj:
# Remove the last / otherwise URL will be malformed
url = url.rstrip('/') + "#{}_{}".format(self.context_object_name, obj.pk)
return url
@cached_property
def person(self):
return self.request.user.person
@cached_property
def education_group_year(self):
return get_object_or_404(EducationGroupYear, pk=self.kwargs['education_group_year_id'])
# RulesRequiredMixin
raise_exception = True
def _call_rule(self, rule):
""" Rules will be call with the person and the education_group_year"""
return rule(self.person, self.education_group_year)
class EducationGroupDetailedAchievementMixin(EducationGroupAchievementMixin):
# SingleObjectMixin
model = EducationGroupDetailedAchievement
context_object_name = "education_group_detail_achievement"
pk_url_kwarg = 'education_group_detail_achievement_pk'
@cached_property
def education_group_achievement(self):
return get_object_or_404(EducationGroupAchievement, pk=self.kwargs["education_group_achievement_pk"])
| agpl-3.0 | 2,461,939,139,919,578,600 | 40 | 109 | 0.687573 | false |
jyogi/purvar-agent | tests/checks/mock/test_openstack.py | 19 | 11917 | from time import sleep
from unittest import TestCase
from checks import AgentCheck
from tests.checks.common import AgentCheckTest, load_check, load_class
from mock import patch
OS_CHECK_NAME = 'openstack'
OpenStackProjectScope = load_class(OS_CHECK_NAME, "OpenStackProjectScope")
KeystoneCatalog = load_class(OS_CHECK_NAME, "KeystoneCatalog")
IncompleteConfig = load_class(OS_CHECK_NAME, "IncompleteConfig")
IncompleteAuthScope = load_class(OS_CHECK_NAME, "IncompleteAuthScope")
IncompleteIdentity = load_class(OS_CHECK_NAME, "IncompleteIdentity")
class MockHTTPResponse(object):
def __init__(self, response_dict, headers):
self.response_dict = response_dict
self.headers = headers
def json(self):
return self.response_dict
EXAMPLE_AUTH_RESPONSE = {
u'token': {
u'methods': [
u'password'
],
u'roles': [
{
u'id': u'f20c215f5a4d47b7a6e510bc65485ced',
u'name': u'datadog_monitoring'
},
{
u'id': u'9fe2ff9ee4384b1894a90878d3e92bab',
u'name': u'_member_'
}
],
u'expires_at': u'2015-11-02T15: 57: 43.911674Z',
u'project': {
u'domain': {
u'id': u'default',
u'name': u'Default'
},
u'id': u'0850707581fe4d738221a72db0182876',
u'name': u'admin'
},
u'catalog': [
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8773/',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'541baeb9ab7542609d7ae307a7a9d5f0'
},
{
u'url': u'http: //10.0.2.15:8773/',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'5c648acaea9941659a5dc04fb3b18e49'
},
{
u'url': u'http: //10.0.2.15:8773/',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'cb70e610620542a1804522d365226981'
}
],
u'type': u'compute',
u'id': u'1398dc02f9b7474eb165106485033b48',
u'name': u'nova'
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'354e35ed19774e398f80dc2a90d07f4b'
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'36e8e2bf24384105b9d56a65b0900172'
},
{
u'url': u'http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'de93edcbf7f9446286687ec68423c36f'
}
],
u'type': u'computev21',
u'id': u'2023bd4f451849ba8abeaaf283cdde4f',
u'name': u'novav21'
},
{
u'endpoints': [
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'7c1e318d8f7f42029fcb591598df2ef5'
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'afcc88b1572f48a38bb393305dc2b584'
},
{
u'url': u'http://10.0.2.15:9292',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'id': u'd9730dbdc07844d785913219da64a197'
}
],
u'type': u'network',
u'id': u'21ad241f26194bccb7d2e49ee033d5a2',
u'name': u'neutron'
},
],
u'extras': {
},
u'user': {
u'domain': {
u'id': u'default',
u'name': u'Default'
},
u'id': u'5f10e63fbd6b411186e561dc62a9a675',
u'name': u'datadog'
},
u'audit_ids': [
u'OMQQg9g3QmmxRHwKrfWxyQ'
],
u'issued_at': u'2015-11-02T14: 57: 43.911697Z'
}
}
MOCK_HTTP_RESPONSE = MockHTTPResponse(response_dict=EXAMPLE_AUTH_RESPONSE, headers={"X-Subject-Token": "fake_token"})
class OSProjectScopeTest(TestCase):
BAD_AUTH_SCOPES = [
{"auth_scope": {}},
{"auth_scope": {"project": {}}},
{"auth_scope": {"project": {"id": ""}}},
{"auth_scope": {"project": {"name": "test"}}},
{"auth_scope": {"project": {"name": "test", "domain": {}}}},
{"auth_scope": {"project": {"name": "test", "domain": {"id": ""}}}},
]
GOOD_AUTH_SCOPES = [
{"auth_scope": {"project": {"id": "test_project_id"}}},
{"auth_scope": {"project": {"name": "test", "domain": {"id": "test_id"}}}},
]
BAD_USERS = [
{"user": {}},
{"user": {"name": ""}},
{"user": {"name": "test_name", "password": ""}},
{"user": {"name": "test_name", "password": "test_pass", "domain": {}}},
{"user": {"name": "test_name", "password": "test_pass", "domain": {"id": ""}}},
]
GOOD_USERS = [
{"user": {"name": "test_name", "password": "test_pass", "domain": {"id": "test_id"}}},
]
def _test_bad_auth_scope(self, scope):
self.assertRaises(IncompleteAuthScope, OpenStackProjectScope.get_auth_scope, scope)
def test_get_auth_scope(self):
for scope in self.BAD_AUTH_SCOPES:
self._test_bad_auth_scope(scope)
for scope in self.GOOD_AUTH_SCOPES:
auth_scope = OpenStackProjectScope.get_auth_scope(scope)
# Should pass through unchanged
self.assertEqual(auth_scope, scope.get("auth_scope"))
def _test_bad_user(self, user):
self.assertRaises(IncompleteIdentity, OpenStackProjectScope.get_user_identity, user)
def test_get_user_identity(self):
for user in self.BAD_USERS:
self._test_bad_user(user)
for user in self.GOOD_USERS:
parsed_user = OpenStackProjectScope.get_user_identity(user)
self.assertEqual(parsed_user, {"methods": ["password"], "password": user})
def test_from_config(self):
init_config = {"keystone_server_url": "http://10.0.2.15:5000", "nova_api_version": "v2"}
bad_instance_config = {}
good_instance_config = {"user": self.GOOD_USERS[0]["user"], "auth_scope": self.GOOD_AUTH_SCOPES[0]["auth_scope"]}
self.assertRaises(IncompleteConfig, OpenStackProjectScope.from_config, init_config, bad_instance_config)
with patch("openstack.OpenStackProjectScope.request_auth_token", return_value=MOCK_HTTP_RESPONSE):
append_config = good_instance_config.copy()
append_config["append_tenant_id"] = True
scope = OpenStackProjectScope.from_config(init_config, append_config)
self.assertTrue(isinstance(scope, OpenStackProjectScope))
self.assertEqual(scope.auth_token, "fake_token")
self.assertEqual(scope.tenant_id, "test_project_id")
# Test that append flag worked
self.assertEqual(scope.service_catalog.nova_endpoint, "http://10.0.2.15:8773/test_project_id")
class KeyStoneCatalogTest(TestCase):
def test_get_nova_endpoint(self):
self.assertEqual(KeystoneCatalog.get_nova_endpoint(EXAMPLE_AUTH_RESPONSE), u"http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876")
self.assertEqual(KeystoneCatalog.get_nova_endpoint(EXAMPLE_AUTH_RESPONSE, nova_api_version="v2"), u"http://10.0.2.15:8773/")
def test_get_neutron_endpoint(self):
self.assertEqual(KeystoneCatalog.get_neutron_endpoint(EXAMPLE_AUTH_RESPONSE), u"http://10.0.2.15:9292")
def test_from_auth_response(self):
catalog = KeystoneCatalog.from_auth_response(EXAMPLE_AUTH_RESPONSE, "v2.1")
self.assertTrue(isinstance(catalog, KeystoneCatalog))
self.assertEqual(catalog.neutron_endpoint, u"http://10.0.2.15:9292")
self.assertEqual(catalog.nova_endpoint, u"http://10.0.2.15:8774/v2.1/0850707581fe4d738221a72db0182876")
class TestCheckOpenStack(AgentCheckTest):
CHECK_NAME = OS_CHECK_NAME
MOCK_CONFIG = {
"init_config": {
"keystone_server_url": "http://10.0.2.15:5000",
"ssl_verify": False,
},
"instances": [
{
"name" : "test_name", "user": {"name": "test_name", "password": "test_pass", "domain": {"id": "test_id"}},
"auth_scope": {"project": {"id": "test_project_id"}}
}
]
}
def setUp(self):
self.check = load_check(self.CHECK_NAME, self.MOCK_CONFIG, self.DEFAULT_AGENT_CONFIG)
def test_ensure_auth_scope(self):
instance = self.MOCK_CONFIG["instances"][0]
self.assertRaises(KeyError, self.check.get_scope_for_instance, instance)
with patch("openstack.OpenStackProjectScope.request_auth_token", return_value=MOCK_HTTP_RESPONSE):
scope = self.check.ensure_auth_scope(instance)
self.assertEqual(self.check.get_scope_for_instance(instance), scope)
self.check._send_api_service_checks(scope)
self.service_checks = self.check.get_service_checks()
# Expect OK, since we've mocked an API response
self.assertServiceCheck(self.check.IDENTITY_API_SC, status=AgentCheck.OK, count=1)
# Expect CRITICAL since URLs are non-existent
self.assertServiceCheck(self.check.COMPUTE_API_SC, status=AgentCheck.CRITICAL, count=1)
self.assertServiceCheck(self.check.NETWORK_API_SC, status=AgentCheck.CRITICAL, count=1)
self.check._current_scope = scope
self.check.delete_current_scope()
self.assertRaises(KeyError, self.check.get_scope_for_instance, instance)
def test_parse_uptime_string(self):
uptime_parsed = self.check._parse_uptime_string(u' 16:53:48 up 1 day, 21:34, 3 users, load average: 0.04, 0.14, 0.19\n')
self.assertEqual(uptime_parsed.get('loads'), [0.04, 0.14, 0.19])
def test_cache_utils(self):
self.check.CACHE_TTL["aggregates"] = 1
expected_aggregates = {"hyp_1": ["aggregate:staging", "availability_zone:test"]}
with patch("openstack.OpenStackCheck.get_all_aggregate_hypervisors", return_value=expected_aggregates):
self.assertEqual(self.check._get_and_set_aggregate_list(), expected_aggregates)
sleep(1.5)
self.assertTrue(self.check._is_expired("aggregates"))
| bsd-3-clause | 2,928,608,700,257,250,000 | 38.989933 | 146 | 0.529831 | false |
ikn/boom | game/player.py | 1 | 7927 | from math import atan2, degrees
from random import gauss
from pygame import Rect
from .engine import conf, gfx, util, entity
from .engine.evt import bmode
from .entity import Entity
from .mine import Mine
from .util import line_intersects_rects, pt_dist
def closest_intersect (src, dest, rects):
pts = line_intersects_rects(src, dest, rects, True)
if pts:
return min((pt_dist(src, i), i) for i in pts)[1]
else:
return False
class Lasers (entity.Entity):
def __init__ (self, player, mines):
entity.Entity.__init__(self)
self.graphics.pos = player.rect.center
self.player = player
self.mines = mines
def added (self):
self.world.scheduler.add_timeout(self.finished, conf.LASER['time'])
self.world.play_snd('laser')
def update (self):
w = conf.LASER['width']
c = conf.LASER['colour']
l = conf.LAYERS['laser']
G = gfx.Graphic
x, y = self.player.rect.center
rects = self.world.rects
pad = 5
self.graphics.rm(*self.graphics)
self.graphics.pos = (x, y)
add = self.graphics.add
for m in self.mines:
mx, my = m.rect.center
i = closest_intersect((x, y), (mx, my), rects)
if i:
mx, my = i
dist = ((mx - x) ** 2 + (my - y) ** 2) ** .5
sfc = util.blank_sfc((dist + 2 * pad, w + 2 * pad))
sfc.fill(c, (pad, pad, dist, w))
g = G(sfc, layer=l)
dy = pad + w / 2
g.rot_anchor = (pad, dy)
g.rotate(atan2(y - my, mx - x))
add(g, -pad, -dy)
def finished (self):
self.world.detonate_mines(
[m for m in self.mines if not line_intersects_rects(
self.player.rect.center, m.rect.center, self.world.rects,
True
)],
True
)
self.world.rm(self)
class Player (Entity):
def __init__ (self, n, have_real, pos):
Entity.__init__(self, (0, 0))
self.graphics.pos = pos
self.id = n
self.size = conf.PLAYER['size']
self.have_real = have_real
self.thrown_real = False
self.dirn = [0, 0]
self.dead = False
self.lasers_left = conf.PLAYER['num_lasers']
self.throwing = False
def walk_sound (self):
self.world.play_snd('walk')
self.walk_counter.t = gauss(conf.WALK_SOUND_DELAY,
conf.WALK_SOUND_DELAY_VARIANCE)
self.walk_counter.pause()
for l in self.legs:
l.pause()
def added (self):
Entity.added(self)
C = self.world.scheduler.counter
self.done_jumping = C(conf.PLAYER['jump']['time'])
self.walk_counter = C(0, True).reset().cb(self.walk_sound)
self.walk_counter.pause()
self.walked = False
self.graphics.add(gfx.Graphic(
'player-body-{0}.png'.format(str(self.id)),
layer=conf.LAYERS['player' + str(self.id)]
), *conf.PLAYER['offset'])
legs = conf.PLAYER['legs']
x = legs['x_offset_base']
dx = legs['x_offset_multiple']
y = legs['y_offset']
n = legs['num']
s = gfx.util.Spritemap('player-legs-{0}.png'.format(str(0)), ## self.id
ncols=n)
t = legs['frame_time']
self.legs = []
for i in xrange(n):
frames = range(n)
frames = frames[i:] + frames[:i]
g = gfx.Animation(
s, layer=conf.LAYERS['legs' + str(self.id)],
scheduler=self.world.scheduler
).add('run', *frames, frame_time=t).play('run')
g.pause()
self.legs.append(g)
self.graphics.add(g, x, y)
x += dx
def update (self):
Entity.update(self)
if self.throwing:
self.throw_time += self.world.scheduler.frame
if self.walked:
self.walked = False
self.walk_counter.unpause()
for l in self.legs:
l.unpause()
else:
self.walk_counter.pause()
for l in self.legs:
l.pause()
def collide (self, axis, sgn, v):
v = abs(v)
if v > 5:
self.world.play_snd('collide', min(v / 20., 1))
def action (self, action):
action = util.wrap_fn(getattr(self, action))
def f (*args):
if not self.dead:
action(*args)
return f
def aim (self, pos):
self.dirn = pos
def move (self, dirn):
if dirn:
on_ground = self.on_sfc[1] == 1
self.walked = self.walked or on_ground
speed = conf.PLAYER['move_ground' if on_ground else 'move_air']
self.vel[0] += dirn * speed
# will need to move a little if the joint becomes off-centre
# (currently 13/24)
for l in self.legs:
l.flip(dirn < 0)
def jump (self, evt):
if evt[bmode.DOWN]:
s = conf.PLAYER['jump']['initial']
if self.on_sfc[1] == 1:
self.world.play_snd('jump')
self.vel[1] -= s
self.done_jumping.reset()
elif self.on_sfc[0]:
# wall jump
self.world.play_snd('walljump')
wj = conf.PLAYER['walljump']
self.vel[0] -= self.on_sfc[0] * s * wj['horiz']
self.vel[1] -= s * wj['vert']
self.done_jumping.reset()
elif evt[bmode.HELD] and not self.done_jumping:
self.vel[1] -= conf.PLAYER['jump']['continue']
def throw_mine (self, real, evt):
down = evt[bmode.DOWN]
up = evt[bmode.UP]
while True:
if self.throwing:
if up:
up -= 1
self.release(real, self.throw_time)
self.throwing = False
yield None
else:
break
# now self.throwing is False
if down:
down -= 1
self.throwing = True
self.throw_time = 0
else:
break
def release (self, real, force):
self.world.play_snd('throw')
vel = list(self.vel)
dx, dy = dirn = self.dirn
adirn = (dx * dx + dy * dy) ** .5
if adirn:
s = (min(force, conf.PLAYER['max_throw_speed']) *
conf.PLAYER['throw_speed'])
vel[0] += s * dx / adirn
vel[1] += s * dy / adirn
pos = dirn
dirns = []
for d in xrange(4):
axis = d % 2
this_sgn = 1 if d >= 2 else -1
x = pos[axis]
sgn = 1 if x > 0 else -1
if sgn != this_sgn:
x = 0
dirns.append((sgn * x, conf.THROW_DIRN_PRIO[d], d))
dirn = max(dirns)[2]
side = ('left', 'top', 'right', 'bottom')[dirn]
r = self.rect
pos = getattr(r, 'mid' + side)
self.world.add_mine(Mine(real, r, self.id, vel, dirn, pos))
def throw_real (self, evt):
if not self.thrown_real:
for x in self.throw_mine(self.have_real, evt):
self.thrown_real = True
break
def throw_dummy (self, evt):
list(self.throw_mine(False, evt))
def detonate (self):
self.world.detonate_mines(self, False)
def destroy (self):
if self.lasers_left:
if self.world.add_lasers(self):
self.lasers_left -= 1
def die (self, vel=(0, 0)):
self.dead = True
self.walk_counter.cancel()
for l in self.legs:
l.stop()
self.world.particles('die' + str(self.id), self.rect.center, vel)
self.world.rm(self)
| gpl-3.0 | -355,076,024,412,575,300 | 29.606178 | 79 | 0.489845 | false |
assassinen/sel-3 | python_example/test_l07.py | 1 | 1326 | __author__ = 'NovikovII'
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from python_example.fixture.fixture import driver
def test_l07(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
wait = WebDriverWait(driver, 10)
menu_number = len(driver.find_elements_by_css_selector("ul#box-apps-menu > li"))
while menu_number:
menu_number -= 1
menu_items = driver.find_elements_by_css_selector("ul#box-apps-menu > li")
menu_items[menu_number].click()
element = wait.until(EC.presence_of_element_located((By.TAG_NAME, "h1")))
submenu_number = len(driver.find_elements_by_css_selector(".docs>li>a"))
while submenu_number:
submenu_number -= 1
submenu_items = driver.find_elements_by_css_selector(".docs>li>a")
submenu_items[submenu_number].click()
element = wait.until(EC.presence_of_element_located((By.TAG_NAME, "h1")))
aaa = EC.number_of_windows_to_be(1)
| apache-2.0 | -5,331,407,694,552,531,000 | 38 | 85 | 0.668175 | false |
JenniferShelton/read-cleaning-format-conversion | KSU_bioinfo_lab/fasta-o-matic/fasta_clean.py | 3 | 4541 | #!/usr/bin/env python
##########################################################################
# USAGE: import fasta_clean
# DESCRIPTION: Makes sure a FASTA file is formatted in the way we expect
# with wrapped sequence lines, unix newline characters and
# whitespace removed from the headers
##########################################################################
import os
import sys
import re
import argparse
from Bio import SeqIO
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
util_dir = os.path.abspath(parent_dir + '/../util')
sys.path.append(util_dir)
import general
def doc():
'''
Print standard information about script.
'''
print('###########################################################')
print('# fasta_clean.py Version 0.0.1 #')
print(' Warning: Script currently under development!! #')
print(' This script was designed to test parsing files #')
print(' with BioPython. It has not been tested to #')
print(' reformat FASTA files yet!! #')
print('###########################################################')
# Assume the file is broken and make a new one with
# standardized formatting.
def fix_fasta_file(file, out_dir=None):
'''
Passes a file through biopython SeqIO to remove common
formatting issues like '\r' characters and unwrapped sequences.
The new file is saved with the suffix '_clean.fasta'.
'''
# Give up early if the file does not look like fasta
assert check_header_pattern(file), "Sorry, " + str(file) + " does not look like FASTA to me"
suffix = '_clean.fa';
(out_path,out_basename,out_ext) = general.parse_filename(file)
if out_dir is not None:
os.system("mkdir -p "+out_dir);
out_path = out_dir
fixed_file = out_path + '/' + out_basename + suffix
out_handle = general.open_write_file(fixed_file)
fasta_in = SeqIO.parse(file,'fasta');
# Iterate through the records to remove white-space
# from the ID line
new_records = []
for record in fasta_in:
header = re.sub('\s+','_',record.description)
record.id = header
record.name = header
record.description = ''
new_records.append(record)
written = SeqIO.write(new_records, out_handle,'fasta')
print str(written) + ' sequence records stored in ' + fixed_file
return(fixed_file)
#######################################
# Check that the file starts with
# at least a '>'
#######################################
def check_header_pattern(file):
'''
Check if FASTA file begins with a '>'. Returns True if the first line
begins with a '>'. Returns False if the file starts with any other
character.
'''
header_pattern = re.compile('^>.*')
infile = general.open_file(file)
first_line = infile.readline()
infile.close()
if header_pattern.match(first_line):
return(True)
else:
return(False)
def main():
'''
For a given FASTA file function runs all qc steps listed in the
list of steps.
USAGE: python fasta_massager.py [-o] -f FILE
'''
######################################################################
############ Get commandline arguments ############
######################################################################
parser = argparse.ArgumentParser(
description='DESCRIPTION: Script massaged FASTA a little bit \
Command-line options that may be \
omitted (i.e. are NOT required) are shown \
in square brackets.')
parser.add_argument('-f', '--fasta',
dest='file',
help='This is the the full path (path and filename) of \
the user provided FASTA file.',
required=True)
parser.add_argument('-o', '--out_dir',
dest='out_dir',
help='Output directory for any repaired FASTA created (no trailing slash).',
default=None,
required=False)
args = parser.parse_args()
fix_fasta_file(args.file,args.out_dir)
if __name__ == '__main__':
main()
| mit | 5,302,129,931,158,223,000 | 37.159664 | 127 | 0.499449 | false |
portaloffreedom/robot-baby | Watcher/watcher.py | 2 | 2003 | import time
from watchdog.observers import Observer
from watchdog.tricks import ShellCommandTrick
class Watchdog:
SLEEP_TIME = 5
def __init__(self, __cmd_prefix="", __cmd_suffix="", __path='.'):
"""
Constructor for watchdog mechanism. Given command is executed as
terminal command based on combination of altered file and given
command as constructor parameter.
Args:
__cmd_prefix: first part of command that is executed. Eg. 'ls -la'
__cmd_suffix: second part of command that goes after file that
is monitored. May be omitted with ''.
__path: path to directory that is being monitored
"""
self.__cmd = __cmd_prefix + ' ${watch_src_path} ' + __cmd_suffix
self.__path = __path
self.__event_handler = ShellCommandTrick(shell_command=self.__cmd,
patterns='*',
ignore_patterns='',
ignore_directories=True,
wait_for_process=False,
drop_during_process=False)
self.__observer = Observer()
self.__observer.schedule(self.__event_handler, self.__path, recursive=False)
def run(self):
"""
Synchronous run
"""
self.__observer.start()
try:
while True:
time.sleep(self.SLEEP_TIME)
except KeyboardInterrupt:
self.__observer.stop()
self.__observer.join()
def start(self):
"""
Asynchronous run
"""
self.__observer.start()
def join(self):
self.__observer.join()
def stop(self):
self.__observer.stop()
# Usage example
if __name__ == "__main__":
wdog = Watchdog(
'./robogen-file-viewer',
'conf.txt',
'/tmp')
wdog.run()
| apache-2.0 | 2,464,659,066,338,190,300 | 30.296875 | 84 | 0.501747 | false |
luzpaz/QGIS | tests/src/python/test_authmanager_oauth2_ows.py | 15 | 10092 | # -*- coding: utf-8 -*-
"""
Tests for auth manager WMS/WFS using QGIS Server through OAuth2
enabled qgis_wrapped_server.py.
This is an integration test for QGIS Desktop Auth Manager WFS and WMS provider
and QGIS Server WFS/WMS that check if QGIS can use a stored auth manager auth
configuration to access an OAuth2 Resource Owner Grant Flow protected endpoint.
From build dir, run: ctest -R PyQgsAuthManagerOAuth2OWSTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import sys
import re
import subprocess
import tempfile
import urllib
import stat
import json
import time
import random
__author__ = 'Alessandro Pasotti'
__date__ = '20/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
from shutil import rmtree
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsApplication,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsRasterLayer,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_ENDPOINT_PORT = os.environ['QGIS_SERVER_ENDPOINT_PORT']
except:
QGIS_SERVER_ENDPOINT_PORT = '0' # Auto
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
def setup_oauth(username, password, token_uri, refresh_token_uri='', authcfg_id='oauth-2', authcfg_name='OAuth2 test configuration'):
"""Setup oauth configuration to access OAuth API,
return authcfg_id on success, None on failure
"""
cfgjson = {
"accessMethod": 0,
"apiKey": "",
"clientId": "",
"clientSecret": "",
"configType": 1,
"grantFlow": 2,
"password": password,
"persistToken": False,
"redirectPort": '7070',
"redirectUrl": "",
"refreshTokenUrl": refresh_token_uri,
"requestTimeout": '30',
"requestUrl": "",
"scope": "",
"tokenUrl": token_uri,
"username": username,
"version": 1
}
if authcfg_id not in QgsApplication.authManager().availableAuthMethodConfigs():
authConfig = QgsAuthMethodConfig('OAuth2')
authConfig.setId(authcfg_id)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if QgsApplication.authManager().storeAuthenticationConfig(authConfig):
return authcfg_id
else:
authConfig = QgsAuthMethodConfig()
QgsApplication.authManager().loadAuthenticationConfig(authcfg_id, authConfig, True)
authConfig.setName(authcfg_name)
authConfig.setConfig('oauth2config', json.dumps(cfgjson))
if QgsApplication.authManager().updateAuthenticationConfig(authConfig):
return authcfg_id
return None
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
cls.server_cert = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_cert.pem')
cls.server_key = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
os.environ['QGIS_SERVER_HOST'] = cls.hostname
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
os.environ['QGIS_SERVER_OAUTH2_KEY'] = cls.server_key
os.environ['QGIS_SERVER_OAUTH2_CERTIFICATE'] = cls.server_cert
os.environ['QGIS_SERVER_OAUTH2_USERNAME'] = cls.username
os.environ['QGIS_SERVER_OAUTH2_PASSWORD'] = cls.password
os.environ['QGIS_SERVER_OAUTH2_AUTHORITY'] = cls.server_rootcert
# Set default token expiration to 2 seconds, note that this can be
# also controlled when issuing token requests by adding ttl=<int>
# to the query string
os.environ['QGIS_SERVER_OAUTH2_TOKEN_EXPIRES_IN'] = '2'
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_SERVER_ENDPOINT_PORT
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
cls.testdata_path = unitTestDataPath('qgis_server')
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.project_path = os.path.join(cls.testdata_path, "test_project.qgs")
# cls.hostname = 'localhost'
cls.protocol = 'https'
cls.hostname = '127.0.0.1'
cls.username = 'username'
cls.password = 'password'
cls.setUpAuth()
server_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'qgis_wrapped_server.py')
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# We need a valid port before we setup the oauth configuration
cls.token_uri = '%s://%s:%s/token' % (cls.protocol, cls.hostname, cls.port)
cls.refresh_token_uri = '%s://%s:%s/refresh' % (cls.protocol, cls.hostname, cls.port)
# Need a random authcfg or the cache will bites us back!
cls.authcfg_id = setup_oauth(cls.username, cls.password, cls.token_uri, cls.refresh_token_uri, str(random.randint(0, 10000000)))
# This is to test wrong credentials
cls.wrong_authcfg_id = setup_oauth('wrong', 'wrong', cls.token_uri, cls.refresh_token_uri, str(random.randint(0, 10000000)))
# Get the authentication configuration instance:
cls.auth_config = QgsApplication.authManager().availableAuthMethodConfigs()[cls.authcfg_id]
assert cls.auth_config.isValid()
# Wait for the server process to start
assert waitServer('%s://%s:%s' % (cls.protocol, cls.hostname, cls.port)), "Server is not responding! %s://%s:%s" % (cls.protocol, cls.hostname, cls.port)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.kill()
rmtree(QGIS_AUTH_DB_DIR_PATH)
del cls.server
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None, authcfg=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'version': 'auto',
'table': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
return wfs_layer
@classmethod
def _getWMSLayer(cls, layers, layer_name=None, authcfg=None):
"""
WMS layer factory
"""
if layer_name is None:
layer_name = 'wms_' + layers.replace(',', '')
parms = {
'crs': 'EPSG:4326',
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'format': 'image/png',
# This is needed because of a really weird implementation in QGIS Server, that
# replaces _ in the the real layer name with spaces
'layers': urllib.parse.quote(layers.replace('_', ' ')),
'styles': '',
'version': 'auto',
# 'sql': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = '&'.join([("%s=%s" % (k, v.replace('=', '%3D'))) for k, v in list(parms.items())])
wms_layer = QgsRasterLayer(uri, layer_name, 'wms')
return wms_layer
def testNoAuthAccess(self):
"""
Access the protected layer with no credentials
"""
wms_layer = self._getWMSLayer('testlayer_èé')
self.assertFalse(wms_layer.isValid())
def testInvalidAuthAccess(self):
"""
Access the protected layer with wrong credentials
"""
wms_layer = self._getWMSLayer('testlayer_èé', authcfg=self.wrong_authcfg_id)
self.assertFalse(wms_layer.isValid())
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
Note: cannot test invalid access WFS in a separate test because
it would fail the subsequent (valid) calls due to cached connections
"""
wfs_layer = self._getWFSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wfs_layer.isValid())
wms_layer = self._getWMSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wms_layer.isValid())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -7,765,616,038,647,268,000 | 36.348148 | 161 | 0.623165 | false |
ozburo/youtube-dl | youtube_dl/extractor/infoq.py | 6 | 5072 | # coding: utf-8
from __future__ import unicode_literals
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
determine_ext,
update_url_query,
)
from .bokecc import BokeCCBaseIE
class InfoQIE(BokeCCBaseIE):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': 'A-Few-of-My-Favorite-Python-Things',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}, {
'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',
'md5': '4918d0cca1497f2244572caf626687ef',
'info_dict': {
'id': 'openstack-continued-delivery',
'title': 'OpenStack持续交付之路',
'ext': 'flv',
'description': 'md5:308d981fb28fa42f49f9568322c683ff',
},
}, {
'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',
'md5': '0e34642d4d9ef44bf86f66f6399672db',
'info_dict': {
'id': 'Simple-Made-Easy',
'title': 'Simple Made Easy',
'ext': 'mp3',
'description': 'md5:3e0e213a8bbd074796ef89ea35ada25b',
},
'params': {
'format': 'bestaudio',
},
}]
def _extract_rtmp_video(self, webpage):
# The server URL is hardcoded
video_url = 'rtmpe://videof.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
real_id = compat_urllib_parse_unquote(compat_b64decode(encoded_id).decode('utf-8'))
playpath = 'mp4:' + real_id
return [{
'format_id': 'rtmp_video',
'url': video_url,
'ext': determine_ext(playpath),
'play_path': playpath,
}]
def _extract_cf_auth(self, webpage):
policy = self._search_regex(r'InfoQConstants\.scp\s*=\s*\'([^\']+)\'', webpage, 'policy')
signature = self._search_regex(r'InfoQConstants\.scs\s*=\s*\'([^\']+)\'', webpage, 'signature')
key_pair_id = self._search_regex(r'InfoQConstants\.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id')
return {
'Policy': policy,
'Signature': signature,
'Key-Pair-Id': key_pair_id,
}
def _extract_http_video(self, webpage):
http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL')
http_video_url = update_url_query(http_video_url, self._extract_cf_auth(webpage))
return [{
'format_id': 'http_video',
'url': http_video_url,
'http_headers': {'Referer': 'https://www.infoq.com/'},
}]
def _extract_http_audio(self, webpage, video_id):
fields = self._form_hidden_inputs('mp3Form', webpage)
http_audio_url = fields.get('filename')
if not http_audio_url:
return []
# base URL is found in the Location header in the response returned by
# GET https://www.infoq.com/mp3download.action?filename=... when logged in.
http_audio_url = compat_urlparse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url)
http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage))
# audio file seem to be missing some times even if there is a download link
# so probe URL to make sure
if not self._is_valid_url(http_audio_url, video_id):
return []
return [{
'format_id': 'http_audio',
'url': http_audio_url,
'vcodec': 'none',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
if '/cn/' in url:
# for China videos, HTTP video URL exists but always fails with 403
formats = self._extract_bokecc_formats(webpage, video_id)
else:
formats = (
self._extract_rtmp_video(webpage)
+ self._extract_http_video(webpage)
+ self._extract_http_audio(webpage, video_id))
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| unlicense | -3,598,789,930,153,303,600 | 35.934307 | 169 | 0.561067 | false |
adam-incuna/imperial-painter | test_app/settings.py | 2 | 1745 | import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = bool(os.environ.get('DEBUG', True))
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = (
'painter',
'test_app',
'django_extensions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
SECRET_KEY = 'Django requires this to be set, but this project does not make use of it'
ROOT_URLCONF = 'test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_app.wsgi.application'
DATABASES = {
'default': dj_database_url.config(
default='postgres://localhost/painter',
),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
LANGUAGE_CODE = 'en-gb'
USE_TZ = False
STATIC_ROOT = os.path.join(BASE_DIR, 'painter/static')
STATIC_URL = os.environ.get('STATIC_URL', '/static/')
TEST_RUNNER = 'painter.tests.runner.TestRunner'
# Imperial Painter settings
IP_DATA_FILES = [
os.path.join(BASE_DIR, 'Test Cards.xlsx'),
os.path.join(BASE_DIR, 'Test Cards.xlsx'),
]
IP_IMPORTER = 'painter.importers.import_cards'
| mit | -2,089,968,417,175,643,400 | 23.577465 | 87 | 0.65043 | false |
dhomeier/astropy | astropy/timeseries/periodograms/lombscargle/implementations/utils.py | 3 | 5013 | from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
| bsd-3-clause | 9,208,805,311,177,000,000 | 30.727848 | 79 | 0.574706 | false |
rchaber/publishbay | boilerplate/external/babel/messages/checkers.py | 19 | 6417 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Various routines that help with validation of translations.
:since: version 0.9
"""
from itertools import izip
from babel.messages.catalog import TranslationError, PYTHON_FORMAT
from babel.util import set
#: list of format chars that are compatible to each other
_string_format_compatibilities = [
set(['i', 'd', 'u']),
set(['x', 'X']),
set(['f', 'F', 'g', 'G'])
]
def num_plurals(catalog, message):
"""Verify the number of plurals in the translation."""
if not message.pluralizable:
if not isinstance(message.string, basestring):
raise TranslationError("Found plural forms for non-pluralizable "
"message")
return
# skip further test if no catalog is provided.
elif catalog is None:
return
msgstrs = message.string
if not isinstance(msgstrs, (list, tuple)):
msgstrs = (msgstrs,)
if len(msgstrs) != catalog.num_plurals:
raise TranslationError("Wrong number of plural forms (expected %d)" %
catalog.num_plurals)
def python_format(catalog, message):
"""Verify the format string placeholders in the translation."""
if 'python-format' not in message.flags:
return
msgids = message.id
if not isinstance(msgids, (list, tuple)):
msgids = (msgids,)
msgstrs = message.string
if not isinstance(msgstrs, (list, tuple)):
msgstrs = (msgstrs,)
for msgid, msgstr in izip(msgids, msgstrs):
if msgstr:
_validate_format(msgid, msgstr)
def _validate_format(format, alternative):
"""Test format string `alternative` against `format`. `format` can be the
msgid of a message and `alternative` one of the `msgstr`\s. The two
arguments are not interchangeable as `alternative` may contain less
placeholders if `format` uses named placeholders.
The behavior of this function is undefined if the string does not use
string formattings.
If the string formatting of `alternative` is compatible to `format` the
function returns `None`, otherwise a `TranslationError` is raised.
Examples for compatible format strings:
>>> _validate_format('Hello %s!', 'Hallo %s!')
>>> _validate_format('Hello %i!', 'Hallo %d!')
Example for an incompatible format strings:
>>> _validate_format('Hello %(name)s!', 'Hallo %s!')
Traceback (most recent call last):
...
TranslationError: the format strings are of different kinds
This function is used by the `python_format` checker.
:param format: The original format string
:param alternative: The alternative format string that should be checked
against format
:return: None on success
:raises TranslationError: on formatting errors
"""
def _parse(string):
result = []
for match in PYTHON_FORMAT.finditer(string):
name, format, typechar = match.groups()
if typechar == '%' and name is None:
continue
result.append((name, str(typechar)))
return result
def _compatible(a, b):
if a == b:
return True
for set in _string_format_compatibilities:
if a in set and b in set:
return True
return False
def _check_positional(results):
positional = None
for name, char in results:
if positional is None:
positional = name is None
else:
if (name is None) != positional:
raise TranslationError('format string mixes positional '
'and named placeholders')
return bool(positional)
a, b = map(_parse, (format, alternative))
# now check if both strings are positional or named
a_positional, b_positional = map(_check_positional, (a, b))
if a_positional and not b_positional and not b:
raise TranslationError('placeholders are incompatible')
elif a_positional != b_positional:
raise TranslationError('the format strings are of different kinds')
# if we are operating on positional strings both must have the
# same number of format chars and those must be compatible
if a_positional:
if len(a) != len(b):
raise TranslationError('positional format placeholders are '
'unbalanced')
for idx, ((_, first), (_, second)) in enumerate(izip(a, b)):
if not _compatible(first, second):
raise TranslationError('incompatible format for placeholder '
'%d: %r and %r are not compatible' %
(idx + 1, first, second))
# otherwise the second string must not have names the first one
# doesn't have and the types of those included must be compatible
else:
type_map = dict(a)
for name, typechar in b:
if name not in type_map:
raise TranslationError('unknown named placeholder %r' % name)
elif not _compatible(typechar, type_map[name]):
raise TranslationError('incompatible format for '
'placeholder %r: '
'%r and %r are not compatible' %
(name, typechar, type_map[name]))
def _find_checkers():
try:
from pkg_resources import working_set
except ImportError:
return [num_plurals, python_format]
checkers = []
for entry_point in working_set.iter_entry_points('babel.checkers'):
checkers.append(entry_point.load())
return checkers
checkers = _find_checkers()
| lgpl-3.0 | -4,901,518,939,735,194,000 | 34.87931 | 78 | 0.59919 | false |
tsuna/tcollector | collectors/0/tcollector.py | 2 | 5473 | #!/usr/bin/python
# This file is part of tcollector.
# Copyright (C) 2013 ProfitBricks GmbH
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
"""
Collects statistics about running processes from /proc into TSDB.
Currently the following is collected:
- Number of running tcollector processes
- CPU and memory statistics from tcollector process and children
"""
import os
import pwd
import resource
import sys
import time
from collectors.lib import utils
COLLECTION_INTERVAL = 15 # seconds
class ProcessTerminatedError(Exception):
pass
class Process(object):
def __init__(self, pid):
self.pid = pid
stat = self.stat()
self.comm = stat["comm"].strip("()")
self.ppid = int(stat["ppid"])
self._cmdline = None
@property
def cmdline(self):
""" Returns /proc/[pid]/cmdline as tuple.
If the process already terminated ProcessTerminatedError is raised.
"""
if self._cmdline is None:
path = "/proc/%s/cmdline" % self.pid
try:
with open(path) as f:
cmdl = f.readline()
if cmdl:
self._cmdline = tuple(cmdl.split('\0'))
except IOError:
raise ProcessTerminatedError()
return self._cmdline
def stat(self):
""" Returns /proc/[pid]/stat as dict.
The dict only contains the values that are currently used, but can
be extended easily.
If the process already terminated ProcessTerminatedError is raised.
"""
path = "/proc/%s/stat" % self.pid
try:
with open(path) as f:
spl = f.readline().split()
except IOError:
raise ProcessTerminatedError()
rv = {"pid": spl[0], "comm": spl[1], "ppid": spl[3],
"utime": spl[13], "stime": spl[14], "cutime": spl[15],
"cstime": spl[16], "vsize": spl[22], "rss": spl[23]}
# supported since Kernel 2.6.24
if len(spl) > 43:
rv.update({"guest_time": spl[42],
"cguest_time": spl[43]})
return rv
class ProcessTable(object):
""" List of all running processes.
Process informations are gathered from /proc.
"""
def __init__(self):
self.processes = {}
self.update()
def update(self):
new = {}
pids = [int(i) for i in os.listdir("/proc") if i.isdigit()]
for pid in pids:
# TODO: Optimize: Don't create 2 objects, use a factory function
# or something similar
if pid in self.processes:
new[pid] = self.processes[pid]
else:
try:
p = Process(pid)
new[pid] = p
except ProcessTerminatedError:
continue
self.processes = new
def filter(self, cond):
""" Return processes for that the function cond evaluates to true. """
return filter(cond, self.processes.values())
def collect_tcollect_stats(processes):
# print a msg and do nothing if the parent process isn't tcollector
# (eg when processtats.py is executed from shell)
tcol_pid = os.getppid()
tcol_process = Process(tcol_pid)
if not "tcollector.py" in " ".join(tcol_process.cmdline):
sys.stderr.write("Parent Process %s isn't a tcollector instance\n" %
tcol_pid)
return
tcollect_procs = processes.filter(lambda p: p.ppid == tcol_pid)
ts = int(time.time())
print("tcollector.processes %s %s" % (ts, len(tcollect_procs)))
for p in tcollect_procs:
cpu_time = 0
try:
s = p.stat()
except ProcessTerminatedError:
continue
cpu_time += int(s["utime"])
cpu_time += int(s["cutime"])
cpu_time += int(s["stime"])
cpu_time += int(s["cstime"])
cpu_time += int(s["guest_time"])
cpu_time += int(s["cguest_time"])
# ensure tcollector.py is used as name for tcollector.py,
# if tcollector.py is executed with "python tcollector.py", comm will
# contain "python"
if p.pid == tcol_pid:
comm = "tcollector.py"
else:
comm = p.comm
print("tcollector.cputime %s %s name=%s" % (ts, cpu_time, comm))
print("tcollector.mem_bytes %s %s name=%s type=vsize" %
(ts, s["vsize"], comm))
print("tcollector.mem_bytes %s %s name=%s type=rss" %
(ts, int(s["rss"]) * resource.getpagesize(), comm))
def main():
utils.drop_privileges()
while True:
processes = ProcessTable()
processes.update()
collect_tcollect_stats(processes)
time.sleep(COLLECTION_INTERVAL)
if __name__ == "__main__":
main()
| gpl-3.0 | 516,319,852,061,195,650 | 30.454023 | 79 | 0.581034 | false |
trabucayre/gnuradio | gr-blocks/python/blocks/qa_vco.py | 1 | 1819 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import math
def sig_source_f(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [amp*math.cos(2.*math.pi*freq*x) for x in t]
return y
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x) for x in t]
return y
class test_vco(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = 200*[0,] + 200*[0.5,] + 200*[1,]
expected_result = 200*[1,] + \
sig_source_f(1, 0.125, 1, 200) + \
sig_source_f(1, 0.25, 1, 200)
src = blocks.vector_source_f(src_data)
op = blocks.vco_f(1, math.pi / 2.0, 1)
dst = blocks.vector_sink_f()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5)
def test_002(self):
src_data = 200*[0,] + 200*[0.5,] + 200*[1,]
expected_result = 200*[1,] + \
sig_source_c(1, 0.125, 1, 200) + \
sig_source_c(1, 0.25, 1, 200)
src = blocks.vector_source_f(src_data)
op = blocks.vco_c(1, math.pi / 2.0, 1)
dst = blocks.vector_sink_c()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_vco, "test_vco.xml")
| gpl-3.0 | 527,838,367,891,515,600 | 24.985714 | 76 | 0.556899 | false |
austin-garrard/Blobber | src/resource.py | 1 | 2541 | from random import randint, uniform
class Resource:
def __init__(self, xy, radius, value):
self.xy = xy
self.color = (0,255,0)
self.radius = radius
self.value = value
self.name = 'res'
def change_pos(self, new_x, new_y):
self.xy = (new_x, new_y)
def updatePos(self):
pass
class CenaBoost:
def __init__(self, xy, radius):
self.xy = xy
self.color = (0, 0, 255)
self.radius = radius
self.name = "CENA"
self.songfile = "songs\\CENA.mp3"
def change_pos(self, new_x, new_y):
self.xy = (new_x, new_y)
class ResourceFactory:
def __init__(self, map, maxResources=2000):
self.map = map
self.minValue = .05
self.maxValue = .11
self.maxResources = maxResources
#initially populate the map with resources
def createInitialResources(self):
self.createResource(self.maxResources/2)
#generate a number of resources
def generateResources(self, numResources):
numNeedResources = self.maxResources - numResources;
for i in range(numNeedResources/self.maxResources):
self.map.addResource(self.createResource())
#randomly create a default resource
def createResource(self, N=1):
n = min(N, self.maxResources - len(self.map.resources))
for i in range(n):
while True:
x = uniform(0, self.map.width)
y = uniform(0, self.map.height)
if self.map.validPosition((x,y)):
value = uniform(self.minValue, self.maxValue)
self.map.addResource(Resource((x,y), value, value))
break
#create a default resource in a given area
def createResourceInArea(self, pos, dim, N=1):
n = min(N, self.maxResources - len(self.map.resources))
for i in range(n):
while True:
x = uniform(int(pos[0]), int(pos[0] + dim[0]))
y = uniform(int(pos[1]), int(pos[1] + dim[1]))
if self.map.validPosition((x,y)):
value = uniform(self.minValue, self.maxValue)
self.map.addResource(Resource((x,y), value, value))
break
#create a randomly placed, specifically valued resource
def createResourceWithValue(self, value):
while True:
x = randint(0, self.map.width)
y = randint(0, self.map.width)
if self.map.validPosition((x,y)):
return Resource((x,y), value, value)
def createResourceInAreaWithValue(self, pos, dim, value):
while True:
x = randint(pos[0], pos[0] + dim[0])
y = randint(pos[1], pos[1] + dim[1])
if self.map.validPosition((x,y)):
return Resource((x,y), value, value) | gpl-2.0 | 1,413,024,260,717,696,500 | 30 | 61 | 0.632822 | false |
jstepien/spit | spit/network.py | 1 | 21454 | import socket, sys, pickle, select, os, zlib, errno
import game
"""A protocol version tuple: major and minor version. The minor version is
increased when the protocol changes without breaking the backward compatibility.
The major version changes when the protocol becomes incompatible with previous
versions. Thus clients and servers with the same major version SHOULD talk with
each other without any problems."""
ProtocolVersion = 0, 0
"This port number seems unreserved yet."
DefaultPort = 23658
"By default the server listens on all available interfaces."
DefaultHost = '0.0.0.0'
"Prints a given message on stderr."
def debug(msg, *vars):
sys.stderr.write(msg % vars)
class Message:
r"""
Represents a message sent between a server and a client.
The following table describes what is expected in the Message.args field
depending on Message.type contents.
/----------------+---------------------------------------------------\
| Type | Args |
+----------------+---------------------------------------------------+
| Error | An error code. |
| Hello | The ProtocolVersion tuple. |
| Bye | - |
| NewDeal | - |
| GameState | A structure defined in Server.send_game_state |
| | documentation. |
| MoveToHand | A pair: source and target piles' numbers. |
| MoveToTable | A pair: source pile and target table numbers. |
| Uncover | The number of the pile to uncover. |
| Stop | The number of the table pile to stop. |
| EndOfRound | A boolean telling whether the recipient has won. |
| EndOfGame | A boolean telling whether the recipient has won. |
| NewCardRequest | - |
| Ack | - |
\----------------+---------------------------------------------------/
"""
"""
Message types used in network communication. See Message for details.
"""
Error, Hello, Bye, NewDeal, GameState, MoveToHand, MoveToTable, Stop, Uncover, \
NewCardRequest, EndOfGame, EndOfRound, Ack = range(13)
def __init__(self, type, args = None):
self.type = type
self.args = args
def __repr__(self):
return "<network.Message(%i, %s)>" % (self.type, repr(self.args))
class PickledTalker:
"""
Provides methods for sending and receiving objects through sockets using
pickle serialization. Each message is preceded with 2B integer in network
byte order representing the length of the remaining part of the message.
The remaining bytes are zlib compressed pickled instances of Message class.
"""
def __init__(self, socket = None):
self.socket = socket
def send(self, message, sock = None):
"""
Sends a given message to self.socket or the given sock.
"""
# Use the fastest compression.
data = zlib.compress(pickle.dumps(message), 1)
# TODO: check if the length of sent data is lesser than 2^16B. It should
# be far lesser, but who knows, shit happens.
enc_len = socket.htons(len(data))
data = chr(enc_len & 0xff) + chr(enc_len >> 8) + data
debug("%i sends message of size %i\n", os.getpid(), len(data))
(self.socket if not sock else sock).sendall(data)
def recv(self, sock = None, timeout = None):
"""
Reads and deserialises a Message from self.socket the given socket
Returns the message. The timeout is added to socket.recv.
"""
sock = sock or self.socket
# Store the old timeout value as we've got to reset it later.
if timeout:
old_timeout = sock.gettimeout()
sock.settimeout(timeout)
try:
# Begin by decoding 2 bytes representing the length of the incoming
# message.
enc_len = sock.recv(2)
if not enc_len:
return
# TODO: those ord calls don't seem to be compatible with Python 3.
data = sock.recv(socket.ntohs(
ord(enc_len[0]) | (ord(enc_len[1]) << 8)))
debug("%i recvd message of size %i\n", os.getpid(), len(data))
# Deserialise the received data.
return pickle.loads(zlib.decompress(data)) if data else None
except socket.timeout:
pass
finally:
# Reset the timeout value.
if timeout:
sock.settimeout(old_timeout)
def recv_many(self, sockets, timeout = None):
"""
Reads and deserialises a Message from one of sockets given in the
arguments using select. Returns the message and the socket it came from.
The timeout is added to socket.recv and select.select. If a timeout was
set and no socket is ready to be read it returns (None, None).
"""
sockets, _, _ = select.select(sockets, [], [], timeout)
if not sockets:
return (None, None)
return (self.recv(sockets[0], timeout), sockets[0])
class Server():
"""
Implements the game server which manages clients connections and game
sessions. It's thread-free but it forks when a new game is launched.
The server is built in a style resembling a state machine. The main loop in
the start method infinitely calls the handle method of the current state.
States are implemented as subclasses of the State.Base class.
"""
class State:
"""
This class consists of the Base state class and subclasses which
implement all server's states.
"""
class Base:
"""
The base state type. Classes which derive from it implement the
handle method which is called in the server's main loop. Base
instances have access to the Server instance using self.server.
"""
def __init__(self, server):
self.server = server
def handle(self):
"""
Executes the logic of a server in a given state. The following
invariants have to be kept after this method returns.
- self.server.socket.gettimout() == 0
- self.server.socket is blocking
"""
pass
class Accepting(Base):
"""
This is the first state of the Server, in which it accepts
connections from clients who'd like to begin a game.
Newly accepted players are sent an Ack message. Clients using an
incompatible protocol are sent a Bye message and their connections
are closed.
When there are no players yet the first client is waiting for
a partner. After a second one joins the state is set to NewGame.
"""
"Timeout for all socket operations. Represented in seconds."
Timeout = 0.25
def handle(self):
"""
Accept new clients (if anyone's waiting), check if anyone has
disconnected and waitpid for child processes (if any).
"""
self.server.socket.settimeout(self.Timeout)
try:
self.accept_new_client()
except socket.timeout:
if any(self.server.clients):
self.check_disconnecting_clients()
self.wait_for_child_processes()
finally:
self.server.socket.settimeout(None)
def accept_new_client(self):
conn, addr = self.server.socket.accept()
debug('Incoming connection from %s\n', addr)
if not self.protocol_compatible(conn):
self.server.talker.send(Message(Message.Bye), conn)
conn.close()
debug('Client %s was incompatible\n', addr)
return
self.server.add_client((conn, addr))
self.server.talker.send(Message(Message.Ack), conn)
if len(self.server.client_sockets()) == 2:
self.server.set_state(Server.State.NewGame)
def protocol_compatible(self, conn):
msg = self.server.talker.recv(conn)
debug('Message: %s\n', repr(msg))
return msg.type == Message.Hello and \
msg.args[0] == ProtocolVersion[0]
def check_disconnecting_clients(self):
msg, sock = self.server.talker.recv_many(
self.server.client_sockets(), self.Timeout)
if not msg:
return
if msg.type == Message.Bye:
# There's only one client, so we can remove him by removing
# everyone.
debug("awaiting player disconnected\n")
sock.close()
self.server.remove_waiting_clients()
else:
# That's not what we've expected. Send him "Bye" and
# disconnect.
debug("awaiting player broke the protocol\n")
self.server.talker.send(Message(Message.Bye), sock)
sock.close()
self.server.remove_waiting_clients()
def wait_for_child_processes(self):
'''
Waits for all child processes to finish, without blocking. We
don't care about the result.
'''
try:
os.waitpid(-1, os.WNOHANG)
except OSError, ex:
# Lack of child processes isn't an error.
if not ex.errno == errno.ECHILD:
raise ex
class NewGame(Base):
"""
In this state the server forks. The child process starts a new game
and switches to the NewDeal state. The parent process removes
clients which are to begin the game and goes back to Accepting.
"""
def handle(self):
try:
pid = os.fork()
except OSError, ex:
# Disconnect both clients and panic.
for sock in self.server.client_sockets():
self.server.talker.send(Message(Message.Bye), sock)
sock.close()
raise ex
if pid == 0:
self.server.game = game.Game()
self.server.set_state(Server.State.NewDeal)
else:
self.server.remove_waiting_clients()
self.server.set_state(Server.State.Accepting)
class NewDeal(Base):
"""
Deals new cards from stockpiles to table, sends a NewDeal message
and goes to the SendGameState.
"""
def handle(self):
self.server.game.new_cards_onto_table()
for sock in self.server.client_sockets():
self.server.talker.send(Message(Message.NewDeal), sock)
self.server.set_state(Server.State.SendGameState)
class SendGameState(Base):
"""
Sends information about the current game state to both clients and
sets server to the AwaitingMoves state.
"""
def handle(self):
self.send_game_state()
self.server.set_state(Server.State.AwaitingMoves)
def send_game_state(self):
"""
Prepares and sends data describing the current state of the
game. The args field of the sent Message is an array consisting
of two hashes with following keys:
- 'table' - a card on the top of one of table piles.
- 'hand' - an array of 5 cards in player's hand.
- 'stockpile' - the length of player's stockpile.
Lack of a card is represented with None.
"""
debug("sending game state\n")
g = self.server.game
player_info = [{
'table' : g.table_top(i),
'hand' : [(pile[-1] if any(pile) else None) for pile in
g.players[i].piles()],
'stockpile' : g.players[i].stockpile_len()
} for i in [0, 1]]
for client in [0, 1]:
msg = Message(Message.GameState,
(player_info[client], player_info[1 - client]))
self.server.talker.send(msg,
self.server.client_sockets()[client])
class AwaitingMoves(Base):
"""
In this state the server awaits for in-game messages incoming from
clients. When a player sends a move request it's forwarded to the
Game instance available as self.server.game. If a move is legal is
usually causes the server to be set to SendGameState.
"""
def handle(self):
msg, sock = self.server.talker.recv_many(
self.server.client_sockets())
client = self.server.client_sockets().index(sock)
try:
{
Message.MoveToTable : lambda:self.card_to_table(
client, *msg.args),
Message.MoveToHand : lambda:self.card_to_hand(
client, *msg.args),
Message.Uncover : lambda:self.uncover_card(
client, msg.args),
Message.Stop: lambda:self.stop(client, msg.args),
Message.NewCardRequest : lambda:self.new_card_request(
client),
Message.Bye: lambda:self.player_disconnected(client),
}[msg.type]()
except KeyError:
debug("'%s' msg type not covered\n", msg.type)
def notify_about_an_error(self, client, err):
"""
Sends an Error Message to the client with a given ID. Used to
inform players about a reason why their move is impossible.
"""
self.server.talker.send(Message(Message.Error, err),
self.server.client_sockets()[client])
def card_to_table(self, client, src, dst):
"""
Moves a card from client's hand to one of table heaps. If the
move causes the current deal to end the server is set to
DealEnded game.
"""
err = self.server.game.card_from_hand_to_table(client, src,
dst if client == 0 else not dst)
if err:
self.notify_about_an_error(client, err)
else:
winner = self.server.game.deal_winner_id()
if winner is None:
self.server.set_state(Server.State.SendGameState)
else:
debug("the deal was won by %i\n", winner)
self.server.set_state(Server.State.DealEnded)
def card_to_hand(self, client, src, dst):
"""
Moves a card from one of client's piles to another.
"""
err = self.server.game.card_from_hand_to_hand(client, src, dst)
debug("card_to_hand returned %s\n", err)
if err:
self.notify_about_an_error(client, err)
else:
self.server.set_state(Server.State.SendGameState)
def uncover_card(self, client, pile):
"""
Uncovers a card on top of one of client's piles.
"""
err = self.server.game.uncover_card_in_hand(client, pile)
debug("uncover_card returned %s\n", err)
if err:
self.notify_about_an_error(client, err)
else:
self.server.set_state(Server.State.SendGameState)
def stop(self, client, pile):
"""
Executes a "stop".
"""
err = self.server.game.execute_stop(client, pile)
if err:
self.notify_about_an_error(client, err)
else:
debug("player %s has won the round with a stop\n", client)
self.server.set_state(Server.State.NewDeal)
def new_card_request(self, client):
"""
Accepts a new card requests. If the other player hasn't asked
for it yet it or it's not possible the client is informed about
it.
"""
err = self.server.game.request_new_cards(client)
debug("new_card_request returned %s\n", err)
if err:
self.notify_about_an_error(client, err)
else:
self.server.set_state(Server.State.SendGameState)
def player_disconnected(self, client):
"""
Informs the other player that he has won because of a walkover.
"""
debug("player %i disconnected, %i wins\n", client, 1 - client)
self.server.talker.send(Message(Message.EndOfGame, True),
self.server.client_sockets()[1 - client])
sys.exit(1)
class DealEnded(Base):
"""
Checks whether the game is over. If it's not a new deal is begun by
calling Game.start_next_deal and setting the state to NewDeal. If
the game's over EndOfGame messages are sent end the server process
exits.
"""
def handle(self):
winner = self.server.game.game_winner_id()
if winner is None:
self.server.game.start_next_deal()
self.server.set_state(Server.State.NewDeal)
else:
debug("player %i has won the game\n", winner)
for player in [0, 1]:
self.server.talker.send(
Message(Message.EndOfGame, player == winner),
self.server.client_sockets()[player])
sys.exit()
def __init__(self, ip = DefaultHost, port = DefaultPort):
self.prepare_socket(ip, port)
self.clients = []
self.set_state(Server.State.Accepting)
self.talker = PickledTalker()
def add_client(self, client):
"""
Adds a given client to the clients list. The argument should be a pair
consisting of a connection and an address returned from socket.accept()
"""
self.clients.append(client)
def remove_waiting_clients(self):
self.clients = []
def client_sockets(self):
"""
Returns a list of clients' readable sockets.
"""
return [tuple[0] for tuple in self.clients]
def prepare_socket(self, ip, port):
"Prepares a TCP socket"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
sock.bind((ip, port))
sock.listen(2)
self.socket = sock
def set_state(self, state_type):
"Sets state to an instance of a given type passing self to the ctor."
self.state = state_type(self)
def start(self):
"The main, infinite server's loop."
while True:
self.state.handle()
class Client():
"""
Handles the network communication from the client's side.
"""
def __init__(self, ip = DefaultHost, port = DefaultPort):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
self.socket = sock
self.talker = PickledTalker(sock)
self.talker.send(Message(Message.Hello, ProtocolVersion))
def send_move_to_hand(self, src, dst):
self.talker.send(Message(Message.MoveToHand, (src, dst)))
def send_move_to_table(self, src, dst):
self.talker.send(Message(Message.MoveToTable, (src, dst)))
def send_uncover(self, pile):
self.talker.send(Message(Message.Uncover, pile))
def send_stop(self, pile):
self.talker.send(Message(Message.Stop, pile))
def send_new_card_request(self):
self.talker.send(Message(Message.NewCardRequest))
def disconnect(self):
self.talker.send(Message(Message.Bye))
self.socket.close()
def recv(self):
return self.talker.recv()
| gpl-3.0 | -3,029,758,972,598,041,600 | 40.577519 | 84 | 0.522607 | false |
ehashman/oh-mainline | vendor/packages/kombu/kombu/serialization.py | 20 | 14799 | """
kombu.serialization
===================
Serialization utilities.
"""
from __future__ import absolute_import
import codecs
import os
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError: # pragma: no cover
cpickle = None # noqa
from collections import namedtuple
from contextlib import contextmanager
from .exceptions import (
ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled
)
from .five import BytesIO, reraise, text_t
from .utils import entrypoints
from .utils.encoding import str_to_bytes, bytes_t
__all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister']
SKIP_DECODE = frozenset(['binary', 'ascii-8bit'])
TRUSTED_CONTENT = frozenset(['application/data', 'application/text'])
if sys.platform.startswith('java'): # pragma: no cover
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
pickle = cpickle or pypickle
pickle_load = pickle.load
#: Kombu requires Python 2.5 or later so we use protocol 2 by default.
#: There's a new protocol (3) but this is only supported by Python 3.
pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2))
codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder'))
@contextmanager
def _reraise_errors(wrapper,
include=(Exception, ), exclude=(SerializerNotInstalled, )):
try:
yield
except exclude:
raise
except include as exc:
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))
def parenthesize_alias(first, second):
return '%s (%s)' % (first, second) if first else second
class SerializerRegistry(object):
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
self.name_to_type = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
if encoder:
self._encoders[name] = codec(
content_type, content_encoding, encoder,
)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def enable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.discard(name)
def disable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.add(name)
def unregister(self, name):
try:
content_type = self.name_to_type[name]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
self.name_to_type.pop(name, None)
except KeyError:
raise SerializerNotInstalled(
'No encoder/decoder installed for {0}'.format(name))
def _set_default_serializer(self, name):
"""
Set the default serialization method used by this library.
:param name: The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(name))
def dumps(self, data, serializer=None):
if serializer == 'raw':
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
'No encoder installed for {0}'.format(serializer))
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes_t):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return 'application/data', 'binary', data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, text_t):
with _reraise_errors(EncodeError, exclude=()):
payload = data.encode('utf-8')
return 'text/plain', 'utf-8', payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
with _reraise_errors(EncodeError):
payload = encoder(data)
return content_type, content_encoding, payload
encode = dumps # XXX compat
def loads(self, data, content_type, content_encoding,
accept=None, force=False, _trusted_content=TRUSTED_CONTENT):
content_type = content_type or 'application/data'
if accept is not None:
if content_type not in _trusted_content \
and content_type not in accept:
raise self._for_untrusted_content(content_type, 'untrusted')
else:
if content_type in self._disabled_content_types and not force:
raise self._for_untrusted_content(content_type, 'disabled')
content_encoding = (content_encoding or 'utf-8').lower()
if data:
decode = self._decoders.get(content_type)
if decode:
with _reraise_errors(DecodeError):
return decode(data)
if content_encoding not in SKIP_DECODE and \
not isinstance(data, text_t):
with _reraise_errors(DecodeError):
return _decode(data, content_encoding)
return data
decode = loads # XXX compat
def _for_untrusted_content(self, ctype, why):
return ContentDisallowed(
'Refusing to deserialize {0} content of type {1}'.format(
why,
parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype),
),
)
#: Global registry of serializers/deserializers.
registry = SerializerRegistry()
"""
.. function:: dumps(data, serializer=default_serializer)
Serialize a data structure into a string suitable for sending
as an AMQP message body.
:param data: The message data to send. Can be a list,
dictionary or a string.
:keyword serializer: An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
:returns: A three-item tuple containing the content type
(e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized
data.
:raises SerializerNotInstalled: If the serialization method
requested is not available.
"""
dumps = encode = registry.encode # XXX encode is a compat alias
"""
.. function:: loads(data, content_type, content_encoding):
Deserialize a data stream as serialized using `dumps`
based on `content_type`.
:param data: The message data to deserialize.
:param content_type: The content-type of the data.
(e.g., `application/json`).
:param content_encoding: The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
:returns: The unserialized data.
"""
loads = decode = registry.decode # XXX decode is a compat alias
"""
.. function:: register(name, encoder, decoder, content_type,
content_encoding='utf-8'):
Register a new encoder/decoder.
:param name: A convenience name for the serialization method.
:param encoder: A method that will be passed a python data structure
and should return a string representing the serialized data.
If :const:`None`, then only a decoder will be registered. Encoding
will not be possible.
:param decoder: A method that will be passed a string representing
serialized data and should return a python data structure.
If :const:`None`, then only an encoder will be registered.
Decoding will not be possible.
:param content_type: The mime-type describing the serialized
structure.
:param content_encoding: The content encoding (character set) that
the `decoder` method will be returning. Will usually be
`utf-8`, `us-ascii`, or `binary`.
"""
register = registry.register
"""
.. function:: unregister(name):
Unregister registered encoder/decoder.
:param name: Registered serialization method name.
"""
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, text_t):
content_encoding = 'utf-8'
with _reraise_errors(EncodeError, exclude=()):
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from anyjson import loads as json_loads, dumps as json_dumps
def _loads(obj):
if isinstance(obj, bytes_t):
obj = obj.decode()
return json_loads(obj)
registry.register('json', json_dumps, _loads,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""In case a client receives a yaml message, but yaml
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
if sys.version_info[0] == 3: # pragma: no cover
def unpickle(s):
return pickle_loads(str_to_bytes(s))
else:
unpickle = pickle_loads # noqa
def register_pickle():
"""The fastest serialization method, but restricts
you to python clients."""
def pickle_dumps(obj, dumper=pickle.dumps):
return dumper(obj, protocol=pickle_protocol)
registry.register('pickle', pickle_dumps, unpickle,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""See http://msgpack.sourceforge.net/"""
try:
try:
from msgpack import packb as pack, unpackb
unpack = lambda s: unpackb(s, encoding='utf-8')
except ImportError:
# msgpack < 0.2.0 and Python 2.5
from msgpack import packs as pack, unpacks as unpack # noqa
registry.register(
'msgpack', pack, unpack,
content_type='application/x-msgpack',
content_encoding='binary')
except (ImportError, ValueError):
def not_available(*args, **kwargs):
"""In case a client receives a msgpack message, but yaml
isn't installed."""
raise SerializerNotInstalled(
'No decoder installed for msgpack. '
'Please install the msgpack library')
registry.register('msgpack', None, not_available,
'application/x-msgpack')
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# Default serializer is 'json'
registry._set_default_serializer('json')
_setupfuns = {
'json': register_json,
'pickle': register_pickle,
'yaml': register_yaml,
'msgpack': register_msgpack,
'application/json': register_json,
'application/x-yaml': register_yaml,
'application/x-python-serialize': register_pickle,
'application/x-msgpack': register_msgpack,
}
def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']):
"""Enable serializers that are considered to be unsafe.
Will enable ``pickle``, ``yaml`` and ``msgpack`` by default,
but you can also specify a list of serializers (by name or content type)
to enable.
"""
for choice in choices:
try:
registry.enable(choice)
except KeyError:
pass
def disable_insecure_serializers(allowed=['json']):
"""Disable untrusted serializers.
Will disable all serializers except ``json``
or you can specify a list of deserializers to allow.
.. note::
Producers will still be able to serialize data
in these formats, but consumers will not accept
incoming data using the untrusted content types.
"""
for name in registry._decoders:
registry.disable(name)
if allowed is not None:
for name in allowed:
registry.enable(name)
# Insecure serializers are disabled by default since v3.0
disable_insecure_serializers()
# Load entrypoints from installed extensions
for ep, args in entrypoints('kombu.serializers'): # pragma: no cover
register(ep.name, *args)
def prepare_accept_content(l, name_to_type=registry.name_to_type):
if l is not None:
return set(n if '/' in n else name_to_type[n] for n in l)
return l
| agpl-3.0 | 8,434,037,319,424,792,000 | 31.525275 | 79 | 0.62984 | false |
nmartensen/pandas | pandas/tests/frame/test_alter_axes.py | 4 | 37525 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Index, MultiIndex,
RangeIndex, date_range, IntervalIndex,
to_datetime)
from pandas.core.dtypes.common import (
is_object_dtype,
is_categorical_dtype,
is_interval_dtype)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameAlterAxes(TestData):
def test_set_index(self):
idx = Index(np.arange(len(self.mixed_frame)))
# cache it
_ = self.mixed_frame['foo'] # noqa
self.mixed_frame.index = idx
assert self.mixed_frame['foo'].index is idx
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
self.mixed_frame.index = idx[::2]
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
expected = df.loc[2010]
new_index = df.index.astype(np.int32)
df.index = new_index
result = df.loc[2010]
assert_series_equal(result, expected)
def test_set_index2(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
# new object, single-column
result = df.set_index('C')
result_nodrop = df.set_index('C', drop=False)
index = Index(df['C'], name='C')
expected = df.loc[:, ['A', 'B', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
assert result.index.name == index.name
# inplace, single
df2 = df.copy()
df2.set_index('C', inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index('C', drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# create new object, multi-column
result = df.set_index(['A', 'B'])
result_nodrop = df.set_index(['A', 'B'], drop=False)
index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
expected = df.loc[:, ['C', 'D', 'E']]
expected.index = index
expected_nodrop = df.copy()
expected_nodrop.index = index
assert_frame_equal(result, expected)
assert_frame_equal(result_nodrop, expected_nodrop)
assert result.index.names == index.names
# inplace
df2 = df.copy()
df2.set_index(['A', 'B'], inplace=True)
assert_frame_equal(df2, expected)
df3 = df.copy()
df3.set_index(['A', 'B'], drop=False, inplace=True)
assert_frame_equal(df3, expected_nodrop)
# corner case
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# append
result = df.set_index(['A', 'B'], append=True)
xp = df.reset_index().set_index(['index', 'A', 'B'])
xp.index.names = [None, 'A', 'B']
assert_frame_equal(result, xp)
# append to existing multiindex
rdf = df.set_index(['A'], append=True)
rdf = rdf.set_index(['B', 'C'], append=True)
expected = df.set_index(['A', 'B', 'C'], append=True)
assert_frame_equal(rdf, expected)
# Series
result = df.set_index(df.C)
assert result.index.name == 'C'
def test_set_index_nonuniq(self):
df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],
'B': ['one', 'two', 'three', 'one', 'two'],
'C': ['a', 'b', 'c', 'd', 'e'],
'D': np.random.randn(5),
'E': np.random.randn(5)})
with tm.assert_raises_regex(ValueError,
'Index has duplicate keys'):
df.set_index('A', verify_integrity=True, inplace=True)
assert 'A' in df
def test_set_index_bug(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
df2 = df.select(lambda indx: indx >= 1)
rs = df2.set_index('key')
xp = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
assert_frame_equal(rs, xp)
def test_set_index_pass_arrays(self):
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# multiple columns
result = df.set_index(['A', df['B'].values], drop=False)
expected = df.set_index(['A', 'B'], drop=False)
# TODO should set_index check_names ?
assert_frame_equal(result, expected, check_names=False)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
idf = df.set_index('B').reset_index().set_index('B')
str(idf)
tm.assert_index_equal(idf.index, ci, check_names=False)
assert idf.index.name == 'B'
new_df = idf.reset_index()
new_df.index = df.B
tm.assert_index_equal(new_df.index, ci, check_names=False)
assert idf.index.name == 'B'
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
assert isinstance(idf.index, pd.DatetimeIndex)
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
i = (pd.DatetimeIndex(
to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00'], errors="raise"))
.tz_localize('US/Pacific'))
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"))
# convert index to series
result = Series(i)
assert_series_equal(result, expected)
# assignt to frame
df['B'] = i
result = df['B']
assert_series_equal(result, expected, check_names=False)
assert result.name == 'B'
# keep the timezone
result = i.to_series(keep_tz=True)
assert_series_equal(result.reset_index(drop=True), expected)
# convert to utc
df['C'] = i.to_series().reset_index(drop=True)
result = df['C']
comp = pd.DatetimeIndex(expected.values).copy()
comp.tz = None
tm.assert_numpy_array_equal(result.values, comp.values)
# list of datetimes with a tz
df['D'] = i.to_pydatetime()
result = df['D']
assert_series_equal(result, expected, check_names=False)
assert result.name == 'D'
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
assert_frame_equal(df, expected)
# GH 3950
# reset_index with single level
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = pd.DataFrame(
{'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(
lambda d: pd.Timestamp(d, tz=tz))
assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
i = pd.to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'i': i})
assert df.set_index(i).index[0].hour == 11
assert pd.DatetimeIndex(pd.Series(df.i))[0].hour == 11
assert df.set_index(df.i).index[0].hour == 11
def test_set_index_dst(self):
di = pd.date_range('2006-10-29 00:00:00', periods=3,
req='H', tz='US/Pacific')
df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=pd.Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = pd.IntervalIndex.from_breaks(np.arange(11), name='x')
original = pd.DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']]
result = original.set_index('x')
expected = pd.DataFrame({'y': np.arange(10)}, index=idx)
assert_frame_equal(result, expected)
result2 = result.reset_index()
assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
rs = df.set_index(df.columns[0])
xp = df.iloc[:, 1:]
xp.index = df.iloc[:, 0].values
xp.index.names = [df.columns[0]]
assert_frame_equal(rs, xp)
def test_set_index_empty_column(self):
# #1971
df = DataFrame([
dict(a=1, p=0),
dict(a=2, m=10),
dict(a=3, m=11, p=20),
dict(a=4, m=12, p=21)
], columns=('a', 'm', 'p', 'x'))
# it works!
result = df.set_index(['a', 'x'])
repr(result)
def test_set_columns(self):
cols = Index(np.arange(len(self.mixed_frame.columns)))
self.mixed_frame.columns = cols
with tm.assert_raises_regex(ValueError, 'Length mismatch'):
self.mixed_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# 11314
# with tz
index = date_range(datetime(2015, 10, 1),
datetime(2015, 10, 1, 23),
freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
new_index = date_range(datetime(2015, 10, 2),
datetime(2015, 10, 2, 23),
freq='H', tz='US/Eastern')
# TODO: unused?
result = df.set_index(new_index) # noqa
assert new_index.freq == index.freq
# Renaming
def test_rename(self):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = self.frame.rename(columns=mapping)
renamed2 = self.frame.rename(columns=str.lower)
assert_frame_equal(renamed, renamed2)
assert_frame_equal(renamed2.rename(columns=str.upper),
self.frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO']))
# have to pass something
pytest.raises(TypeError, self.frame.rename)
# partial columns
renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns,
pd.Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index,
pd.Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
pd.Index(['bar', 'foo'], name='name'))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self):
# GH 15704
frame = self.frame.copy()
expected = frame.rename_axis('foo')
result = frame.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
assert_frame_equal(result, expected)
expected = frame.rename_axis('bar', axis=1)
result = frame.copy()
no_return = result.rename_axis('bar', axis=1, inplace=True)
assert no_return is None
assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'),
('FIZZ2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'),
('fizz2', 'BUZZ2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar2')],
names=['foo', 'bar'])
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self):
renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
assert (self.frame['C'] == 1.).all()
def test_rename_inplace(self):
self.frame.rename(columns={'C': 'foo'})
assert 'C' in self.frame
assert 'foo' not in self.frame
c_id = id(self.frame['C'])
frame = self.frame.copy()
frame.rename(columns={'C': 'foo'}, inplace=True)
assert 'C' not in frame
assert 'foo' in frame
assert id(frame['foo']) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
assert_frame_equal(result, expected)
def test_reset_index(self):
stacked = self.frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = self.frame.reset_index()
exp = pd.Series(self.frame.index.values, name='index')
tm.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = self.frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = pd.Series(self.frame.index.values, name='level_0')
tm.assert_series_equal(rdf['level_0'], exp)
# but this is ok
self.frame.index.name = 'index'
deleveled = self.frame.reset_index()
tm.assert_series_equal(deleveled['index'],
pd.Series(self.frame.index))
tm.assert_index_equal(deleveled.index,
pd.Index(np.arange(len(deleveled))))
# preserve column names
self.frame.columns.name = 'columns'
resetted = self.frame.reset_index()
assert resetted.columns.name == 'columns'
# only remove certain columns
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
assert_frame_equal(rs, self.frame, check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index(['index', 'A', 'B'])
assert_frame_equal(rs, self.frame.reset_index(), check_names=False)
rs = frame.reset_index('A')
xp = self.frame.reset_index().set_index(['index', 'B'])
assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = self.frame.copy()
resetted = self.frame.reset_index()
df.reset_index(inplace=True)
assert_frame_equal(df, resetted, check_names=False)
frame = self.frame.reset_index().set_index(['index', 'A', 'B'])
rs = frame.reset_index('A', drop=True)
xp = self.frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_level(self):
df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
result = df.set_index(['A', 'B']).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C', 'D']])
# With single-level Index (GH 16263)
result = df.set_index('A').reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index('A').reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(['A']).reset_index(level=levels[0],
drop=True)
tm.assert_frame_equal(result, df[['B', 'C', 'D']])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ['A', 'B'], ['A']:
with tm.assert_raises_regex(KeyError, 'Level E '):
df.set_index(idx_lev).reset_index(level=['A', 'E'])
with tm.assert_raises_regex(IndexError, 'Too many levels'):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted['time'].dtype == np.float64
resetted = df.reset_index()
assert resetted['time'].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': [np.nan, 'b', 'c'],
'B': [0, 1, 2],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, 2],
'C': [np.nan, 1.1, 2.2]})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
df = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': [np.nan, np.nan, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = pd.DataFrame([[1, 2], [3, 4]],
columns=pd.date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
assert_frame_equal(result, expected)
def test_set_index_names(self):
df = pd.util.testing.makeDataFrame()
df.index.name = 'name'
assert df.set_index(df.index).index.names == ['name']
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'A', 'B'])
df = df.set_index(['A', 'B'])
assert df.set_index(df.index).index.names == ['A', 'B']
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index(
[df.index, df.index]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)
def test_rename_objects(self):
renamed = self.mixed_frame.rename(columns=str.upper)
assert 'FOO' in renamed
assert 'foo' not in renamed
def test_assign_columns(self):
self.frame['hi'] = 'there'
frame = self.frame.copy()
frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
assert_series_equal(self.frame['C'], frame['baz'], check_names=False)
assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': pd.Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
class TestIntervalIndex(object):
def test_setitem(self):
df = DataFrame({'A': range(10)})
s = pd.cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df['B'] = s
df['C'] = np.array(s)
df['D'] = s.values
df['E'] = np.array(s.values)
assert is_categorical_dtype(df['B'])
assert is_interval_dtype(df['B'].cat.categories)
assert is_categorical_dtype(df['D'])
assert is_interval_dtype(df['D'].cat.categories)
assert is_object_dtype(df['C'])
assert is_object_dtype(df['E'])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df['B'], df['B'], check_names=False)
tm.assert_series_equal(df['B'], df['D'], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df['C'], df['C'], check_names=False)
tm.assert_series_equal(df['C'], df['E'], check_names=False)
def test_set_reset_index(self):
df = DataFrame({'A': range(10)})
s = pd.cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
df = df.reset_index()
def test_set_axis_inplace(self):
# GH14636
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
for axis in expected:
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
kwargs = {'inplace': inplace}
result = df.copy()
with tm.assert_produces_warning(warn):
result.set_axis(list('abc'), axis=axis, **kwargs)
tm.assert_frame_equal(result, expected[axis])
# inplace=False
result = df.set_axis(list('abc'), axis=axis, inplace=False)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = df.set_axis(list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, 'foo':
with tm.assert_raises_regex(ValueError, 'No axis named'):
df.set_axis(list('abc'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
# old signature
for axis in expected:
with tm.assert_produces_warning(FutureWarning):
result = df.set_axis(axis, list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[axis])
| bsd-3-clause | -8,005,331,627,058,506,000 | 37.72549 | 79 | 0.489007 | false |
jbonofre/beam | sdks/python/apache_beam/io/gcp/tests/utils_test.py | 4 | 3827 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittest for GCP testing utils."""
import logging
import unittest
from mock import Mock
from mock import patch
from apache_beam.io.gcp.tests import utils
from apache_beam.testing.test_utils import patch_retry
# Protect against environments where bigquery library is not available.
try:
from google.cloud import bigquery
except ImportError:
bigquery = None
@unittest.skipIf(bigquery is None, 'Bigquery dependencies are not installed.')
class UtilsTest(unittest.TestCase):
def setUp(self):
self._mock_result = Mock()
patch_retry(self, utils)
@patch.object(bigquery, 'Client')
def test_delete_table_succeeds(self, mock_client):
mock_dataset = Mock()
mock_client.return_value.dataset = mock_dataset
mock_dataset.return_value.exists.return_value = True
mock_table = Mock()
mock_dataset.return_value.table = mock_table
mock_table.return_value.exists.side_effect = [True, False]
utils.delete_bq_table('unused_project',
'unused_dataset',
'unused_table')
@patch.object(bigquery, 'Client')
def test_delete_table_fails_dataset_not_exist(self, mock_client):
mock_dataset = Mock()
mock_client.return_value.dataset = mock_dataset
mock_dataset.return_value.exists.return_value = False
with self.assertRaisesRegexp(
Exception, r'^Failed to cleanup. Bigquery dataset unused_dataset '
r'doesn\'t exist'):
utils.delete_bq_table('unused_project',
'unused_dataset',
'unused_table')
@patch.object(bigquery, 'Client')
def test_delete_table_fails_table_not_exist(self, mock_client):
mock_dataset = Mock()
mock_client.return_value.dataset = mock_dataset
mock_dataset.return_value.exists.return_value = True
mock_table = Mock()
mock_dataset.return_value.table = mock_table
mock_table.return_value.exists.return_value = False
with self.assertRaisesRegexp(Exception,
r'^Failed to cleanup. Bigquery table '
'unused_table doesn\'t exist'):
utils.delete_bq_table('unused_project',
'unused_dataset',
'unused_table')
@patch.object(bigquery, 'Client')
def test_delete_table_fails_service_error(self, mock_client):
mock_dataset = Mock()
mock_client.return_value.dataset = mock_dataset
mock_dataset.return_value.exists.return_value = True
mock_table = Mock()
mock_dataset.return_value.table = mock_table
mock_table.return_value.exists.return_value = True
with self.assertRaisesRegexp(Exception,
r'^Failed to cleanup. Bigquery table '
'unused_table still exists'):
utils.delete_bq_table('unused_project',
'unused_dataset',
'unused_table')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 | -7,975,059,935,398,905,000 | 34.766355 | 78 | 0.66266 | false |
rajul/mne-python | examples/visualization/plot_topo_compare_conditions.py | 11 | 2389 | """
=================================================
Compare evoked responses for different conditions
=================================================
In this example, an Epochs object for visual and
auditory responses is created. Both conditions
are then accessed by their respective names to
create a sensor layout plot of the related
evoked responses.
"""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.io import Raw
from mne.viz import plot_evoked_topo
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: MEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
# bad channels in raw.info['bads'] will be automatically excluded
# Set up amplitude-peak rejection values for MEG channels
reject = dict(grad=4000e-13, mag=4e-12)
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Create epochs including different events
event_id = {'audio/left': 1, 'audio/right': 2,
'visual/left': 3, 'visual/right': 4}
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), reject=reject)
# Generate list of evoked objects from conditions names
evokeds = [epochs[name].average() for name in ('left', 'right')]
###############################################################################
# Show topography for two different conditions
colors = 'yellow', 'green'
title = 'MNE sample data - left vs right (A/V combined)'
plot_evoked_topo(evokeds, color=colors, title=title)
conditions = [e.comment for e in evokeds]
for cond, col, pos in zip(conditions, colors, (0.025, 0.07)):
plt.figtext(0.99, pos, cond, color=col, fontsize=12,
horizontalalignment='right')
plt.show()
| bsd-3-clause | 4,747,383,452,194,195,000 | 30.434211 | 79 | 0.625366 | false |
yusufb/file-manager | ui/searchListUI.py | 1 | 2127 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bookmarkListUI.ui'
#
# Created: Sun May 25 15:06:30 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui, Qt
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(400, 300)
self.searchList = QtGui.QTableWidget(Form)
self.searchList.setGeometry(QtCore.QRect(0, 0, 401, 261))
self.searchList.setObjectName(_fromUtf8("searchList"))
self.searchList.verticalHeader().setVisible(False)
self.searchList.SelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.searchList.EditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.searchList.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.openButton = QtGui.QPushButton(Form)
self.openButton.setGeometry(QtCore.QRect(10, 270, 75, 23))
self.openButton.setObjectName(_fromUtf8("openButton"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Search results", None))
self.openButton.setText(_translate("Form", "Open", None))
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.setWindowFlags(QtCore.Qt.WindowMinMaxButtonsHint)
Dialog.show()
Dialog.setFixedHeight(Dialog.height())
Dialog.setFixedWidth(Dialog.width())
print "dialog ui is created"
sys.exit(app.exec_())
| gpl-3.0 | 4,899,884,728,804,941,000 | 32.761905 | 80 | 0.693465 | false |
rhurkes/chasegame | venv/lib/python2.7/site-packages/cryptography/hazmat/bindings/openssl/engine.py | 6 | 6243 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... ECDH_METHOD;
typedef ... ECDSA_METHOD;
typedef ... DH_METHOD;
typedef ... RAND_METHOD;
typedef ... STORE_METHOD;
typedef ... *ENGINE_GEN_INT_FUNC_PTR;
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_ECDH;
static const unsigned int ENGINE_METHOD_ECDSA;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_STORE;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_load_cryptodev(void);
void ENGINE_load_builtin_engines(void);
void ENGINE_cleanup(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_ECDH(void);
ENGINE *ENGINE_get_default_ECDSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_ECDH(ENGINE *);
int ENGINE_set_default_ECDSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_ECDH(ENGINE *);
void ENGINE_unregister_ECDH(ENGINE *);
void ENGINE_register_all_ECDH(void);
int ENGINE_register_ECDSA(ENGINE *);
void ENGINE_unregister_ECDSA(ENGINE *);
void ENGINE_register_all_ECDSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_STORE(ENGINE *);
void ENGINE_unregister_STORE(ENGINE *);
void ENGINE_register_all_STORE(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_ECDH(ENGINE *, const ECDH_METHOD *);
int ENGINE_set_ECDSA(ENGINE *, const ECDSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_STORE(ENGINE *, const STORE_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const ECDH_METHOD *ENGINE_get_ECDH(const ENGINE *);
const ECDSA_METHOD *ENGINE_get_ECDSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const STORE_METHOD *ENGINE_get_STORE(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| mit | 3,982,424,861,392,290,000 | 36.836364 | 79 | 0.740509 | false |
datastax/python-driver | cassandra/datastax/graph/types.py | 1 | 5798 | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['Element', 'Vertex', 'Edge', 'VertexProperty', 'Path', 'T']
class Element(object):
element_type = None
_attrs = ('id', 'label', 'type', 'properties')
def __init__(self, id, label, type, properties):
if type != self.element_type:
raise TypeError("Attempted to create %s from %s element", (type, self.element_type))
self.id = id
self.label = label
self.type = type
self.properties = self._extract_properties(properties)
@staticmethod
def _extract_properties(properties):
return dict(properties)
def __eq__(self, other):
return all(getattr(self, attr) == getattr(other, attr) for attr in self._attrs)
def __str__(self):
return str(dict((k, getattr(self, k)) for k in self._attrs))
class Vertex(Element):
"""
Represents a Vertex element from a graph query.
Vertex ``properties`` are extracted into a ``dict`` of property names to list of :class:`~VertexProperty` (list
because they are always encoded that way, and sometimes have multiple cardinality; VertexProperty because sometimes
the properties themselves have property maps).
"""
element_type = 'vertex'
@staticmethod
def _extract_properties(properties):
# vertex properties are always encoded as a list, regardless of Cardinality
return dict((k, [VertexProperty(k, p['value'], p.get('properties')) for p in v]) for k, v in properties.items())
def __repr__(self):
properties = dict((name, [{'label': prop.label, 'value': prop.value, 'properties': prop.properties} for prop in prop_list])
for name, prop_list in self.properties.items())
return "%s(%r, %r, %r, %r)" % (self.__class__.__name__,
self.id, self.label,
self.type, properties)
class VertexProperty(object):
"""
Vertex properties have a top-level value and an optional ``dict`` of properties.
"""
label = None
"""
label of the property
"""
value = None
"""
Value of the property
"""
properties = None
"""
dict of properties attached to the property
"""
def __init__(self, label, value, properties=None):
self.label = label
self.value = value
self.properties = properties or {}
def __eq__(self, other):
return isinstance(other, VertexProperty) and self.label == other.label and self.value == other.value and self.properties == other.properties
def __repr__(self):
return "%s(%r, %r, %r)" % (self.__class__.__name__, self.label, self.value, self.properties)
class Edge(Element):
"""
Represents an Edge element from a graph query.
Attributes match initializer parameters.
"""
element_type = 'edge'
_attrs = Element._attrs + ('inV', 'inVLabel', 'outV', 'outVLabel')
def __init__(self, id, label, type, properties,
inV, inVLabel, outV, outVLabel):
super(Edge, self).__init__(id, label, type, properties)
self.inV = inV
self.inVLabel = inVLabel
self.outV = outV
self.outVLabel = outVLabel
def __repr__(self):
return "%s(%r, %r, %r, %r, %r, %r, %r, %r)" %\
(self.__class__.__name__,
self.id, self.label,
self.type, self.properties,
self.inV, self.inVLabel,
self.outV, self.outVLabel)
class Path(object):
"""
Represents a graph path.
Labels list is taken verbatim from the results.
Objects are either :class:`~.Result` or :class:`~.Vertex`/:class:`~.Edge` for recognized types
"""
labels = None
"""
List of labels in the path
"""
objects = None
"""
List of objects in the path
"""
def __init__(self, labels, objects):
# TODO fix next major
# The Path class should not do any deserialization by itself. To fix in the next major.
from cassandra.datastax.graph.query import _graph_object_sequence
self.labels = labels
self.objects = list(_graph_object_sequence(objects))
def __eq__(self, other):
return self.labels == other.labels and self.objects == other.objects
def __str__(self):
return str({'labels': self.labels, 'objects': self.objects})
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.labels, [o.value for o in self.objects])
class T(object):
"""
Represents a collection of tokens for more concise Traversal definitions.
"""
name = None
val = None
# class attributes
id = None
"""
"""
key = None
"""
"""
label = None
"""
"""
value = None
"""
"""
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return self.name
def __repr__(self):
return "T.%s" % (self.name, )
T.id = T("id", 1)
T.id_ = T("id_", 2)
T.key = T("key", 3)
T.label = T("label", 4)
T.value = T("value", 5)
T.name_to_value = {
'id': T.id,
'id_': T.id_,
'key': T.key,
'label': T.label,
'value': T.value
}
| apache-2.0 | -7,927,502,985,882,278,000 | 26.609524 | 148 | 0.588306 | false |
abhikeshav/ydk-py | core/ydk/providers/codec_provider.py | 1 | 2490 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
""" providers.py
Service Providers module. Current implementation supports the NetconfServiceProvider which
uses ncclient (a Netconf client library) to provide CRUD services.
"""
from ydk.errors import YPYServiceProviderError
from .provider import ServiceProvider
from ._encoder import XmlEncoder
from ._decoder import XmlDecoder
import logging
class CodecServiceProvider(ServiceProvider):
""" Codec ServiceProvider to encode to and decode from desired payload format
Initialization parameters of CodecServiceProvider
kwargs:
- type : desired type of codec (xml, json etc)
"""
def __init__(self, **kwargs):
if(len(kwargs) == 0):
raise YPYServiceProviderError('Codec type is required')
codec_type = ''
for key, val in kwargs.iteritems():
if key == 'type':
codec_type = val
if codec_type == 'xml':
self.encoder = XmlEncoder()
self.decoder = XmlDecoder()
else:
raise YPYServiceProviderError('Codec type "{0}" not yet supported'.format(codec_type))
self.logger = logging.getLogger(__name__)
def _encode(self, entity):
""" Encodes the entity into the desired encoding format """
self.logger.info('Encoding object: \n{0}'.format(entity))
payload = self.encoder.encode(entity)
self.logger.info('Result of encoding: \n{0}'.format(payload))
return payload
def _decode(self, payload):
""" Decodes the payload from the desired decoding format """
self.logger.info('Decoding payload: {0}'.format(payload))
entity = self.decoder.decode(payload)
self.logger.info('Result of decoding: {0}'.format(entity))
return entity
| apache-2.0 | -2,383,996,256,958,830,000 | 35.086957 | 98 | 0.639357 | false |
acorbe/torrent-migration-helper | torrent_migration_helper.py | 1 | 6268 | #!/usr/bin/env python
"""torrent_migration_helper
given a folder containing a set A of .torrent files,
a folder containing a set B of downloaded files,
[a string STR which is expected to be in the final torrent tracker name]
a subset of .torrent in A such that
the torrent has been downloaded in B
[the tracker of the torrent contains STR]
is copied in a target folder.
This answers to a common need. Whenever a folder containing a downloaded torrents to be seeded needs to be migrated in another seeding server, specific .torrent files need to be selected and added to destination torrent client. This package helps to make such process automatic.
"""
import os
import pandas as pd
import torrentparse as tp
"""torrent parse by mohanraj-r (https://github.com/mohanraj-r/torrentparse) must be installed."""
import shutil
__author__ = "Alessandro Corbetta"
__copyright__ = "Copyright 2014, Alessandro Corbetta"
__license__ = "GPL"
__version__ = ".1"
__maintainer__ = "Alessandro Corbetta"
__email__ = "corbisoft dot codes at gmail dot com"
DEFAULT_PATH = './torrentfile_source/'
DEFAULT_F_EXT = '.torrent'
def list_dir(path = DEFAULT_PATH
, print_list = False
, fending = '.torrent'
, just_files = True ):
"""lists the content of a given dir (path).
One can specify the ending (fending = '.torrent') of the considered files/dirs (defaulted to .torrent).
One can specify whether to include just files and skip directories (just_files = True). """
print "acquiring file list..."
if just_files:
flist = [ (f,os.path.join(path,f)) for f in os.listdir(path) if os.path.isfile(os.path.join(path,f)) and f.endswith(fending) ]
else:
flist = [ (f,os.path.join(path,f)) for f in os.listdir(path) if f.endswith(fending) ]
if print_list:
for f,fp in flist:
print fp
print "done!"
return flist
def build_df_from_flist(path
, flist = None
, default_ext = DEFAULT_F_EXT ):
"""Builds a pandas DataFrame whose records are .torrent information.
In particular:
fname (full name of the file)
fpath (full path of the file)
TorrentParser (TorrentParser object)
TrackerUrl
"""
if flist is None:
flist = list_dir(path)
flist_df = pd.DataFrame(flist, columns = ['fname','fpath'])
flist_df['fname_plain'] = flist_df.fname.apply( lambda f : f[:-len(default_ext)])
def parseTorrent(r):
"""calls TorrentParser constructor on each torrent file"""
try:
ret = tp.TorrentParser(r['fpath'])
except:
ret = None
return ret
print "parsing torrents..."
flist_df['TorrentParser'] = flist_df.apply( parseTorrent, axis = 1 )
print "extracting trackers..."
def get_tracker(r):
if r is not None:
try:
return r['TorrentParser'].get_tracker_url()
except:
return ''
else:
return ''
flist_df['TrackerUrl'] = flist_df.apply( get_tracker , axis = 1 )
print "done!"
return flist_df
def parse_tracker_list(flist_df
, substr
, output_filtered_torrent_list = False ):
"""filters the pd.DataFrame containing the .torrent files information and retains
the ones whose tracker address contains substr.
returns the filtered pd.DataFrame.
"""
flist_flt = flist_df[flist_df.TrackerUrl.apply(lambda st : substr in st)]
if output_filtered_torrent_list:
sorted_flist = flist_flt.fname
sorted_flist.order().to_csv('output.csv')
return flist_flt
def gather_downloaded_files(path):
"""retrieves the full list of files and dirs in path"""
flist = list_dir(path = path
, fending = ''
, just_files = False)
flist_names_set = set([f for f,fp in flist])
return {'fnames' : flist , 'fnames_set' : flist_names_set}
def filter_torrent_in_given_dl_site(flist_df,dl_site_content):
"""determines which .torrents (rows in flist_df np.DataFrame) are actually in the
download site (df_site_content)"""
print "filtering for dl site..."
matching = flist_df.fname_plain.apply(lambda fn : fn in dl_site_content['fnames_set'])
print "done"
return matching
def main(tor_fl_source
, tor_fl_dest
, tor_dl_source
, tracker_address_content = None ):
"""core function. The following steps are performed
1. a database DB out of .torrent files in tor_fl_source is built
2. the database DB is filtered based on which trackers contain in their
address the string tracker_address_content
3. the download site (tor_dl_source) is scanned and a set with the file names is built
4. the elements of DB which have a correspondence in the download site are identified
5. Indentified .torrents are copied in folder tor_fl_dest
"""
#builds database of torrent files
df = build_df_from_flist(tor_fl_source)
#possibly restricts torrent database considering just trackers having a particular string in their address
if tracker_address_content is not None:
df_fl = parse_tracker_list(df,tracker_address_content)
else:
df_fl = df
#builds the database of the download target directory
df_dl = gather_downloaded_files(tor_dl_source)
#extracts the intersection between the torrent files list and the files actually downloaded (based on the file name)
matching1 = filter_torrent_in_given_dl_site(df_fl,df_dl)
#obtains the list of .torrent files from matching1
df_fl1 = df_fl[matching1]
#copies selected .torrent files in destination path
df_fl1.fpath.apply(lambda pt : shutil.copy(pt,tor_fl_dest))
if __name__ == '__main__':
tor_dl_source = '/downloaded/torrent/location/'
tor_fl_source = '/torrent/file/source/'
tor_fl_dest = '/torrent/file/destination/'
tracker_address_content = 'tracker_address_content'
main(tor_fl_source, tor_fl_dest, tor_dl_source , tracker_address_content)
| gpl-2.0 | 2,012,223,801,593,562,400 | 33.251366 | 278 | 0.6388 | false |
alexryndin/ambari | ambari-common/src/main/python/ambari_jinja2/ambari_jinja2/__init__.py | 5 | 2437 | # -*- coding: utf-8 -*-
"""
ambari_jinja2
~~~~~~
Jinja2 is a template engine written in pure Python. It provides a
Django inspired non-XML syntax but supports inline expressions and
an optional sandboxed environment.
Nutshell
--------
Here a small example of a Jinja2 template::
{% extends 'base.html' %}
{% block title %}Memberlist{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
try:
__version__ = __import__('pkg_resources') \
.get_distribution('Jinja2').version
except:
__version__ = 'unknown'
# high level interface
from ambari_jinja2.environment import Environment, Template
# loaders
from ambari_jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
ModuleLoader
# bytecode caches
from ambari_jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
MemcachedBytecodeCache
# undefined types
from ambari_jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
# exceptions
from ambari_jinja2.exceptions import TemplateError, UndefinedError, \
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
TemplateAssertionError
# decorators and public utilities
from ambari_jinja2.filters import environmentfilter, contextfilter, \
evalcontextfilter
from ambari_jinja2.utils import Markup, escape, clear_caches, \
environmentfunction, evalcontextfunction, contextfunction, \
is_undefined
__all__ = [
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
'evalcontextfilter', 'evalcontextfunction'
]
| apache-2.0 | -8,816,913,606,122,467,000 | 32.383562 | 80 | 0.691424 | false |
mammique/django | django/contrib/admin/actions.py | 5 | 3184 | """
Built-in, globally-available admin actions.
"""
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it delets all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| bsd-3-clause | -5,815,250,412,941,063,000 | 37.361446 | 95 | 0.673681 | false |
tcarobruce/us-mayors | mayors.py | 1 | 5192 | #!/usr/bin/env python
"""
mayors.py - scrape information about US Mayors from usmayors.org
"""
import argparse
import csv
import json
from datetime import datetime
from os.path import splitext
import requests
from lxml import html
STATES = {
"AK": "Alaska",
"AL": "Alabama",
"AR": "Arkansas",
"AS": "American Samoa",
"AZ": "Arizona",
"CA": "California",
"CO": "Colorado",
"CT": "Connecticut",
"DC": "District of Columbia",
"DE": "Delaware",
"FL": "Florida",
"GA": "Georgia",
"GU": "Guam",
"HI": "Hawaii",
"IA": "Iowa",
"ID": "Idaho",
"IL": "Illinois",
"IN": "Indiana",
"KS": "Kansas",
"KY": "Kentucky",
"LA": "Louisiana",
"MA": "Massachusetts",
"MD": "Maryland",
"ME": "Maine",
"MI": "Michigan",
"MN": "Minnesota",
"MO": "Missouri",
"MP": "N. Mariana Islands",
"MS": "Mississippi",
"MT": "Montana",
"NC": "North Carolina",
"ND": "North Dakota",
"NE": "Nebraska",
"NH": "New Hampshire",
"NJ": "New Jersey",
"NM": "New Mexico",
"NV": "Nevada",
"NY": "New York",
"OH": "Ohio",
"OK": "Oklahoma",
"OR": "Oregon",
"PA": "Pennsylvania",
"PR": "Puerto Rico",
"RI": "Rhode Island",
"SC": "South Carolina",
"SD": "South Dakota",
"TN": "Tennessee",
"TX": "Texas",
"UT": "Utah",
"VA": "Virginia",
"VI": "Virgin Islands",
"VT": "Vermont",
"WA": "Washington",
"WI": "Wisconsin",
"WV": "West Virginia",
"WY": "Wyoming",
}
BASE_URL = "http://usmayors.org/"
SEARCH_URL = "https://www.usmayors.org/mayors/meet-the-mayors/"
CSV_FIELDS = '''
name email phone bio_url img_url city state population
city_site_url next_election'''.split()
def get_mayors_for_state(state):
state_name = STATES[state]
payload = {'submit': 'Search', 'searchTerm': state_name}
headers = {"User-Agent": "mayors-scraper/0.0.1"}
response = requests.post(SEARCH_URL, data=payload, headers=headers)
response.raise_for_status()
root = html.fromstring(response.content.decode('latin1'))
for node in root.cssselect('div.post-content ul'):
try:
result = _get_mayor_from_table(node)
if result and result["state"] == state:
yield result
except Exception:
print("ERROR doing {}".format(state))
import traceback
traceback.print_exc()
continue
def _get_mayor_from_table(node):
# Text example:
# 1 Ethan Berkowitz
# 2 Anchorage, AK
# 3 Population: 291,538
# 4 Web Site
# 5 Next Election Date: 04/06/2021
# 6 Bio
# 7 Phone:
# 8 907-343-7100
# 9 Email:
# 10 [email protected]
bold = node.cssselect("b")[0]
if bold is None or not bold.text or not bold.text.strip():
# empty name, vacant or unknown
return None
mayor_data = {}
text = (s.strip() for s in node.itertext() if s.strip())
links = (a.attrib["href"] for a in node.cssselect("a"))
mayor_data["img_url"] = node.cssselect("img")[0].attrib["src"]
mayor_data["name"] = next(text)
city_state = next(text)
mayor_data["city"], mayor_data["state"] = city_state.split(", ")
mayor_data["population"] = next(text).replace("Population: ", "").replace(",", "")
mayor_data["city_site_url"] = next(links)
next(text) # skip "Web Site" text
next_election = next(text).replace("Next Election Date: ", "")
if next_election:
try:
parsed_next_election = datetime.strptime(next_election, "%m/%d/%Y")
mayor_data["next_election"] = parsed_next_election.strftime("%Y-%m-%d")
except ValueError:
pass
mayor_data["bio_url"] = next(links)
mayor_data["phone"] = next(links).replace("tel:", "")
mayor_data["email"] = next(links).replace("mailto:", "")
return mayor_data
def get_mayors(states=STATES):
for state in states:
for mayor in get_mayors_for_state(state):
yield mayor
def write_to_csv(mayors, out):
w = csv.DictWriter(out, CSV_FIELDS)
w.writeheader()
for mayor in mayors:
w.writerow(mayor)
def write_to_json(mayors, out):
json.dump(list(mayors), out, indent=4)
def parse_arguments():
parser = argparse.ArgumentParser(
description="Scrape US mayoral data from usmayors.org")
parser.add_argument('out', type=argparse.FileType('w', encoding="UTF-8"),
default='-')
parser.add_argument('--format', choices=['csv', 'json'])
parser.add_argument('--state', nargs='*', default=STATES.keys())
args = parser.parse_args()
# guess format from file extension
if args.format is None:
fn = args.out.name
if fn != '<stdout>':
_, ext = splitext(fn)
args.format = ext[1:]
else:
args.format = 'csv'
args.writer = {
'csv': write_to_csv,
'json': write_to_json,
}[args.format] # may KeyError if format is unrecognized
return args
if __name__ == '__main__':
args = parse_arguments()
mayors = get_mayors(states=args.state)
args.writer(mayors, args.out)
| mit | 1,625,337,328,617,739,800 | 24.96 | 86 | 0.569723 | false |
cchurch/ansible | lib/ansible/cli/arguments/option_helpers.py | 13 | 16420 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import operator
import argparse
import os
import os.path
import sys
import time
import yaml
import ansible
from ansible import constants as C
from ansible.module_utils._text import to_native
from ansible.release import __version__
from ansible.utils.path import unfrackpath
#
# Special purpose OptionParsers
#
class SortingHelpFormatter(argparse.HelpFormatter):
def add_arguments(self, actions):
actions = sorted(actions, key=operator.attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
class AnsibleVersion(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ansible_version = to_native(version(getattr(parser, 'prog')))
print(ansible_version)
parser.exit()
class PrependListAction(argparse.Action):
"""A near clone of ``argparse._AppendAction``, but designed to prepend list values
instead of appending.
"""
def __init__(self, option_strings, dest, nargs=None, const=None, default=None, type=None,
choices=None, required=False, help=None, metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const' % argparse.OPTIONAL)
super(PrependListAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar
)
def __call__(self, parser, namespace, values, option_string=None):
items = copy.copy(ensure_value(namespace, self.dest, []))
items[0:0] = values
setattr(namespace, self.dest, items)
def ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
#
# Callbacks to validate and normalize Options
#
def unfrack_path(pathsep=False):
"""Turn an Option's data into a single path in Ansible locations"""
def inner(value):
if pathsep:
return [unfrackpath(x) for x in value.split(os.pathsep) if x]
if value == '-':
return value
return unfrackpath(value)
return inner
def _git_repo_info(repo_path):
""" returns a string containing git branch, commit id and commit date """
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
with open(os.path.join(repo_path, "HEAD")) as f:
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
with open(branch_path) as f:
commit = f.readline()[:10]
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = _git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
with open(submodules) as f:
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = _git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
return result
def version(prog=None):
""" return ansible version """
if prog:
result = " ".join((prog, __version__))
else:
result = __version__
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
#
# Functions to add pre-canned options to an OptionParser
#
def create_base_parser(prog, usage="", desc=None, epilog=None):
"""
Create an options parser for all ansible scripts
"""
# base opts
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=SortingHelpFormatter,
epilog=epilog,
description=desc,
conflict_handler='resolve',
)
version_help = "show program's version number, config file location, configured module search path," \
" module location, executable location and exit"
parser.add_argument('--version', action=AnsibleVersion, nargs=0, help=version_help)
add_verbosity_options(parser)
return parser
def add_verbosity_options(parser):
"""Add options for verbosity"""
parser.add_argument('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
def add_async_options(parser):
"""Add options for commands which can launch async tasks"""
parser.add_argument('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type=int, dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_argument('-B', '--background', dest='seconds', type=int, default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
def add_basedir_options(parser):
"""Add options for commands which can set a playbook basedir"""
parser.add_argument('--playbook-dir', default=None, dest='basedir', action='store',
help="Since this tool does not use playbooks, use this as a substitute playbook directory."
"This sets the relative path for many features including roles/ group_vars/ etc.")
def add_check_options(parser):
"""Add options for commands which can run with diagnostic information of tasks"""
parser.add_argument("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_argument('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_argument("-D", "--diff", default=C.DIFF_ALWAYS, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those"
" files; works great with --check")
def add_connect_options(parser):
"""Add options for commands which need to connection to other hosts"""
connect_group = parser.add_argument_group("Connection Options", "control as whom and how to connect to hosts")
connect_group.add_argument('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_argument('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', type=unfrack_path())
connect_group.add_argument('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_argument('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_argument('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type=int, dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_argument('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_argument('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_argument('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_argument('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_argument_group(connect_group)
def add_fork_options(parser):
"""Add options for commands that can fork worker processes"""
parser.add_argument('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type=int,
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
def add_inventory_options(parser):
"""Add options for commands that utilize inventory"""
parser.add_argument('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path or comma separated host list. --inventory-file is deprecated")
parser.add_argument('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_argument('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
def add_meta_options(parser):
"""Add options for commands which can launch meta tasks from the command line"""
parser.add_argument('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_argument('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache for every host in inventory")
def add_module_options(parser):
"""Add options for commands that load modules"""
module_path = C.config.get_configuration_definition('DEFAULT_MODULE_PATH').get('default', '')
parser.add_argument('-M', '--module-path', dest='module_path', default=None,
help="prepend colon-separated path(s) to module library (default=%s)" % module_path,
type=unfrack_path(pathsep=True), action=PrependListAction)
def add_output_options(parser):
"""Add options for commands which can change their output"""
parser.add_argument('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_argument('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
def add_runas_options(parser):
"""
Add options for commands which can run tasks as another user
Note that this includes the options from add_runas_prompt_options(). Only one of these
functions should be used.
"""
runas_group = parser.add_argument_group("Privilege Escalation Options", "control how and which user you become as on target hosts")
# consolidated privilege escalation (become)
runas_group.add_argument("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_argument('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD,
help="privilege escalation method to use (default=%(default)s), use "
"`ansible-doc -t become -l` to list valid choices.")
runas_group.add_argument('--become-user', default=None, dest='become_user', type=str,
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
add_runas_prompt_options(parser, runas_group=runas_group)
def add_runas_prompt_options(parser, runas_group=None):
"""
Add options for commands which need to prompt for privilege escalation credentials
Note that add_runas_options() includes these options already. Only one of the two functions
should be used.
"""
if runas_group is None:
runas_group = parser.add_argument_group("Privilege Escalation Options",
"control how and which user you become as on target hosts")
runas_group.add_argument('-K', '--ask-become-pass', dest='become_ask_pass', action='store_true',
default=C.DEFAULT_BECOME_ASK_PASS,
help='ask for privilege escalation password')
parser.add_argument_group(runas_group)
def add_runtask_options(parser):
"""Add options for commands that run a task"""
parser.add_argument('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
def add_subset_options(parser):
"""Add options for commands which can run a subset of tasks"""
parser.add_argument('-t', '--tags', dest='tags', default=C.TAGS_RUN, action='append',
help="only run plays and tasks tagged with these values")
parser.add_argument('--skip-tags', dest='skip_tags', default=C.TAGS_SKIP, action='append',
help="only run plays and tasks whose tags do not match these values")
def add_vault_options(parser):
"""Add options for loading vault files"""
parser.add_argument('--vault-id', default=[], dest='vault_ids', action='append', type=str,
help='the vault identity to use')
base_group = parser.add_mutually_exclusive_group()
base_group.add_argument('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
base_group.add_argument('--vault-password-file', default=[], dest='vault_password_files',
help="vault password file", type=unfrack_path(), action='append')
| gpl-3.0 | 4,984,088,760,433,884,000 | 44.611111 | 144 | 0.616626 | false |
DavidAndreev/indico | indico/util/string_test.py | 1 | 7885 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from itertools import count
import pytest
from enum import Enum
from indico.util.string import (seems_html, to_unicode, make_unique_token, slugify, text_to_repr, format_repr, snakify,
camelize, camelize_keys, snakify_keys, crc32, normalize_phone_number, render_markdown,
sanitize_email)
def test_seems_html():
assert seems_html('<b>test')
assert seems_html('a <b> c')
assert not seems_html('test')
assert not seems_html('a < b > c')
@pytest.mark.parametrize(('input', 'output'), (
(b'foo', u'foo'), # ascii
(u'foo', u'foo'), # unicode
(b'm\xc3\xb6p', u'm\xf6p'), # utf8
(b'm\xf6p', u'm\xf6p'), # latin1
(b'm\xc3\xb6p m\xf6p', # mixed...
u'm\xc3\xb6p m\xf6p'), # ...decoded as latin1
))
def test_to_unicode(input, output):
assert to_unicode(input) == output
def test_make_unique_token(monkeypatch):
monkeypatch.setattr('indico.util.string.uuid4', lambda _counter=count(): str(next(_counter)))
tokens = {'1', '3'}
def _get_token():
token = make_unique_token(lambda t: t not in tokens)
tokens.add(token)
return token
assert _get_token() == '0'
assert _get_token() == '2'
assert _get_token() == '4'
assert _get_token() == '5'
@pytest.mark.parametrize(('input', 'output'), (
(b'this is a test', 'this-is-a-test'),
(u'this is \xe4 test', 'this-is-ae-test'),
(u'12345!xxx ', '12345xxx'),
))
def test_slugify(input, output):
assert slugify(input) == output
def test_slugify_maxlen():
assert slugify('foo bar', maxlen=5) == 'foo-b'
def test_slugify_args():
assert slugify('foo', 123, 'bar') == 'foo-123-bar'
assert slugify(u'm\xf6p'.encode('utf-8'), 123, u'b\xe4r') == 'moep-123-baer'
@pytest.mark.parametrize(('input', 'lower', 'output'), (
('Test', True, 'test'),
('Test', False, 'Test'),
(u'm\xd6p', False, 'mOep'),
(u'm\xd6p', True, 'moep')
))
def test_slugify_lower(input, lower, output):
assert slugify(input, lower=lower) == output
@pytest.mark.parametrize(('input', 'html', 'max_length', 'output'), (
('Hello\n \tWorld', False, None, 'Hello World'),
('Hello<b>World</b>', False, None, 'Hello<b>World</b>'),
('Hello<b>World</b>', True, None, 'HelloWorld'),
('Hi <b>a</b> <br>', True, None, 'Hi a'),
('x' * 60, False, None, 'x' * 60),
('x' * 60, False, 50, 'x' * 50 + '...'),
('x' * 50, False, 50, 'x' * 50)
))
def test_text_to_repr(input, html, max_length, output):
assert text_to_repr(input, html=html, max_length=max_length) == output
@pytest.mark.parametrize(('args', 'kwargs', 'output'), (
((), {}, '<Foo()>'),
(('id', 'hello', 'dct'), {}, "<Foo(1, world, {'a': 'b'})>"),
(('id',), {}, '<Foo(1)>'),
(('id', 'enum'), {}, '<Foo(1, foo)>'),
(('id',), {'flag1': True, 'flag0': False}, '<Foo(1)>'),
(('id',), {'flag1': False, 'flag0': False}, '<Foo(1, flag1=True)>'),
(('id',), {'flag1': False, 'flag0': True}, '<Foo(1, flag0=False, flag1=True)>'),
(('id',), {'flag1': False, 'flag0': True, '_text': u'moo'}, '<Foo(1, flag0=False, flag1=True): "moo">')
))
def test_format_repr(args, kwargs, output):
class Foo(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class MyEnum(Enum):
foo = 'bar'
obj = Foo(id=1, enum=MyEnum.foo, hello='world', dct={'a': 'b'}, flag1=True, flag0=False)
assert format_repr(obj, *args, **kwargs) == output
@pytest.mark.parametrize(('input', 'output'), (
('', ''),
('FooBar', 'foo_bar'),
('fooBar', 'foo_bar'),
('fooBAR', 'foo_bar'),
('bar', 'bar'),
('Bar', 'bar'),
('aaBbCc', 'aa_bb_cc'),
))
def test_snakify(input, output):
assert snakify(input) == output
@pytest.mark.parametrize(('input', 'output'), (
('_', '_'),
('_foo_bar', '_fooBar'),
('foo', 'foo'),
('fooBar', 'fooBar'),
('foo_bar', 'fooBar'),
('aa_bb_cC', 'aaBbCc'),
))
def test_camelize(input, output):
assert camelize(input) == output
def test_camelize_keys():
d = {'fooBar': 'foo', 'bar_foo': 123, 'moo_bar': {'hello_world': 'test'},
'nested': [{'is_dict': True}, 'foo', ({'a_b': 'c'},)]}
orig = d.copy()
d2 = camelize_keys(d)
assert d == orig # original dict not modified
assert d2 == {'fooBar': 'foo', 'barFoo': 123, 'mooBar': {'helloWorld': 'test'},
'nested': [{'isDict': True}, 'foo', ({'aB': 'c'},)]}
def test_snakify_keys():
d = {'sn_case': 2, 'shouldBeSnakeCase': 3, 'snake': 4, 'snake-case': 5, 'inner': {'innerDict': 2}}
orig = d.copy()
d2 = snakify_keys(d)
assert d == orig
assert d2 == {'sn_case': 2, 'should_be_snake_case': 3, 'snake': 4, 'snake-case': 5, 'inner': {'inner_dict': 2}}
def test_crc32():
assert crc32(u'm\xf6p') == 2575016153
assert crc32(u'm\xf6p'.encode('utf-8')) == 2575016153
assert crc32(b'') == 0
assert crc32(b'hello world\0\1\2\3\4') == 140159631
@pytest.mark.parametrize(('input', 'output'), (
('', ''),
('+41785324567', '+41785324567'),
('++454545455', '+454545455'),
('123-456-789', '123456789'),
('0123456x0+', '0123456x0'),
('+48 785 326 691', '+48785326691'),
('0048785326691', '0048785326691'),
('123-456-xxxx', '123456xxxx')
))
def test_normalize_phone_number(input, output):
assert normalize_phone_number(input) == output
@pytest.mark.parametrize(('input', 'output'), (
('', ''),
('foo', 'foo'),
('[email protected]', '[email protected]'),
('<[email protected]>', '[email protected]'),
('foobar <[email protected]> asdf', '[email protected]'),
('foobar <[email protected]> <[email protected]>', '[email protected]')
))
def test_sanitize_email(input, output):
assert sanitize_email(input) == output
@pytest.mark.parametrize(('input', 'output'), (
('*coconut*', '<p><em>coconut</em></p>'),
('**swallow**', '<p><strong>swallow</strong></p>'),
('<span>Blabla **strong text**</span>', '<p><span>Blabla <strong>strong text</strong></span></p>'),
('[Python](http://www.google.com/search?q=holy+grail&ln=fr)',
'<p><a href="http://www.google.com/search?q=holy+grail&ln=fr">Python</a></p>'),
("<script>alert('I'm evil!')</script>", "<script>alert('I'm evil!')</script>"),
("Name|Colour\n---|---\nLancelot|Blue",
'<table>\n<thead>\n<tr>\n<th>Name</th>\n<th>Colour</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>Lancelot</td>\n'
'<td>Blue</td>\n</tr>\n</tbody>\n</table>'),
("**$2 * 2 * 2 > 7$**", "<p><strong>$2 * 2 * 2 > 7$</strong></p>"),
("Escaping works just fine! $ *a* $", "<p>Escaping works just fine! $ *a* $</p>"),
('', '<p><img alt="Just a cat" '
'src="http://myserver.example.com/cat.png"></p>'),
("<https://indico.github.io>", '<p><a href="https://indico.github.io">https://indico.github.io</a></p>')
))
def test_markdown(input, output):
assert render_markdown(input, extensions=('tables',)) == output
| gpl-3.0 | 6,633,426,769,576,582,000 | 35.169725 | 119 | 0.561953 | false |
RatulSaha/leetcode | 001-050/011-contains-most-water.py | 1 | 1392 | """
STATEMENT
Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai).
n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0).
Find two lines, which together with x-axis forms a container, such that the container contains the most water.
CLARIFICATIONS
- I am assuming there is no redundant duplicate in the list? Sure.
- Can the list be empty? No.
- I am assuming the list is not sorted? Sure.
- Should I return the lines, or the 'area' of water? The area is fine.
EXAMPLES
[2, 11, 13, 9] -> 18 (9*2, for lines [11,9])
COMMENTS
- We can try with the widest container and move the lines in if that gives a container with more area.
- Both the minimum height among the lines and the width matter, so we can keep the current area.
- O(n) time complexity and constant space complexity.
- Unless the list is sorted, I don't see any way to improve the complexity.
"""
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
left, right = 0, len(height)-1
area_so_far = 0
while left < right:
h = min(height[left], height[right])
area_so_far = max(area_so_far, (right-left)*h)
while (height[left] <= h and left < right):
left += 1
while (height[right] <= h and left < right):
right -= 1
return area_so_far
| mit | 5,954,015,889,920,177,000 | 36.621622 | 110 | 0.660201 | false |
Nidylei/azure-linux-automation | remote-scripts/RemoteCopy.py | 8 | 1464 | #!/usr/bin/python
import argparse
import sys
from azuremodules import *
import paramiko
import azuremodules
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--serverip', help='specifies server VIP of server name', required=True)
parser.add_argument('-m', '--mode', help='switch : specify "upload" or "download" (case sensitive)', choices=['upload', 'download'] )
parser.add_argument('-u', '--username', help='Remote host username', required=True)
parser.add_argument('-p', '--password', help='Remote host password', required=True)
parser.add_argument('-P', '--port', help='Remote host SSH port', required=True, type=int)
parser.add_argument('-l', '--localLocation', help='use with Download switch')
parser.add_argument('-r', '--remoteLocation', help='use with upload switch')
parser.add_argument('-f', '--files', help='mention the complete path of files you want to download or upload. Separate multiple files with (,) comma!')
args = parser.parse_args()
#SetVnetGlobalParameters()
hostIP = args.serverip
hostPassword = args.password
hostUsername = args.username
hostPort = int(args.port)
filesNames = args.files
localLocation = args.localLocation
remoteLocation = args.remoteLocation
copyMode = args.mode
if copyMode == 'upload':
RemoteUpload(hostIP, hostPassword, hostUsername, hostPort, filesNames, remoteLocation)
if copyMode == 'download':
RemoteDownload(hostIP, hostPassword, hostUsername, hostPort, filesNames, localLocation)
| apache-2.0 | 2,106,124,264,224,992,300 | 36.538462 | 151 | 0.743852 | false |
nirbheek/cerbero | test/test_cerbero_ide_pkgconfig.py | 27 | 3111 | #!/usr/bin/env python
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import os
from cerbero.ide.pkgconfig import PkgConfig
class TestPkgConfig(unittest.TestCase):
def setUp(self):
pc_path = os.path.join(os.path.dirname(__file__), 'pkgconfig')
os.environ['PKG_CONFIG_LIBDIR'] = pc_path
os.environ['PKG_CONFIG_PATH'] = pc_path
self.pkgconfig = PkgConfig('gstreamer-0.10')
self.pkgconfig2 = PkgConfig('gstreamer-0.10', False)
def testListAll(self):
expected = ['gobject-2.0', 'gmodule-2.0', 'libxml-2.0', 'gthread-2.0',
'glib-2.0', 'gmodule-no-export-2.0', 'gstreamer-0.10']
self.assertEquals(sorted(PkgConfig.list_all()), sorted(expected))
def testIncludeDirs(self):
expected = ['/usr/include/gstreamer-0.10', '/usr/include/glib-2.0',
'/usr/lib/glib-2.0/include',
'/usr/include/libxml2']
self.assertEquals(self.pkgconfig.include_dirs(), expected)
expected = ['/usr/include/gstreamer-0.10']
self.assertEquals(self.pkgconfig2.include_dirs(), expected)
def testCFlags(self):
expected = ['-pthread']
self.assertEquals(self.pkgconfig.cflags(), expected)
expected = []
self.assertEquals(self.pkgconfig2.cflags(), expected)
def testLibrariesDir(self):
expected = []
self.assertEquals(self.pkgconfig.libraries_dirs(), expected)
expected = []
self.assertEquals(self.pkgconfig2.libraries_dirs(), expected)
def testLibraries(self):
expected = ['gstreamer-0.10', 'gobject-2.0', 'gmodule-2.0', 'xml2',
'gthread-2.0', 'rt', 'glib-2.0']
self.assertEquals(self.pkgconfig.libraries(), expected)
expected = ['gstreamer-0.10']
self.assertEquals(self.pkgconfig2.libraries(), expected)
def testRequires(self):
expected = ['glib-2.0', 'gobject-2.0', 'gmodule-no-export-2.0',
'gthread-2.0', 'libxml-2.0']
self.assertEquals(self.pkgconfig.requires(), expected)
self.assertEquals(self.pkgconfig2.requires(), expected)
def testPrefix(self):
self.assertEquals(self.pkgconfig.prefix(), '/usr')
self.assertEquals(self.pkgconfig2.prefix(), '/usr')
| lgpl-2.1 | -6,374,893,418,650,670,000 | 40.48 | 78 | 0.661202 | false |
byterom/android_external_chromium_org | tools/cr/cr/targets/target.py | 44 | 4463 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to hold the Target plugin."""
import operator
import re
import cr
import cr.base.context
DEFAULT = cr.Config.From(
CR_DEFAULT_TARGET='chrome',
)
class Target(cr.base.context.Context, cr.AutoExport):
"""Base class for implementing cr targets.
A target is something that can be built and run.
"""
# The default base priority
PRIORITY = 0
# The default pattern used to try to detect whether a target is a test and
# should use the test runner.
TEST_PATTERN = re.compile('tests?$')
# The special "test type" that means it's not a test.
NOT_A_TEST = 'no'
# The default choice for the type of test when it can't be determined.
NORMAL_TEST = 'gtest'
# TODO(iancottrell): support the other test types
TEST_TYPES = [NOT_A_TEST, NORMAL_TEST]
def __init__(self, target_name):
super(Target, self).__init__(target_name)
test_type = None
if self.TEST_PATTERN.search(target_name):
test_type = self.NORMAL_TEST
config = cr.Config('DEFAULTS').From(
CR_TARGET=target_name,
CR_TARGET_NAME='{CR_TARGET}',
CR_BUILD_TARGET=cr.Config.Optional(
'{CR_TARGET}{CR_TARGET_SUFFIX}', '{CR_TARGET}'),
CR_RUN_ARGUMENTS='',
CR_TEST_TYPE=test_type,
)
self._data = cr.context.data
self.AddChildren(config, cr.context)
if hasattr(self, 'CONFIG'):
self.AddChild(self.CONFIG)
if not self.valid:
self.Set(CR_TARGET_SUFFIX='')
self.test_type = self.Find('CR_TEST_TYPE')
self.target_name = self.Find('CR_TARGET_NAME')
@property
def build_target(self):
return self.Get('CR_BUILD_TARGET')
@property
def valid(self):
return cr.Builder.IsTarget(self.build_target)
@property
def is_test(self):
return self.test_type and self.test_type != self.NOT_A_TEST
@classmethod
def AddArguments(cls, command, parser, allow_multiple=False):
nargs = '?'
help_string = 'The target to {0}'
if allow_multiple:
nargs = '*'
help_string = 'The target(s) to {0}'
parser.add_argument(
'_targets', metavar='target',
help=help_string.format(command.name),
nargs=nargs
)
@classmethod
def AllTargets(cls):
yield cls
for child in cls.__subclasses__():
for t in child.AllTargets():
yield t
@classmethod
def CreateTarget(cls, target_name):
"""Attempts to build a target by name.
This searches the set of installed targets in priority order to see if any
of them are willing to handle the supplied name.
If a target cannot be found, the program will be aborted.
Args:
target_name: The name of the target we are searching for.
Returns:
The target that matched.
"""
target_clses = sorted(
cls.AllTargets(),
key=operator.attrgetter('PRIORITY'),
reverse=True
)
for handler in target_clses:
target = handler.Build(target_name)
if target:
if not target.valid:
print 'Invalid target {0} as {1}'.format(
target_name, target.build_target)
guesses = cr.Builder.GuessTargets(target_name)
if guesses:
print 'Did you mean {0}?'.format(
', '.join(guesses[:-1]) + ' or ' + guesses[-1]
if len(guesses) > 1 else guesses[0])
exit(1)
return target
print 'Unknown target {0}'.format(target_name)
exit(1)
@classmethod
def GetTargets(cls):
target_names = getattr(cr.context.args, '_targets', None)
if not target_names:
target_names = [cr.context.Get('CR_DEFAULT_TARGET')]
elif hasattr(target_names, 'swapcase'):
# deal with the single target case
target_names = [target_names]
return [cls.CreateTarget(target_name)
for target_name in target_names]
@classmethod
def Build(cls, target_name):
return cls(target_name)
class NamedTarget(Target):
"""A base class for explicit named targets.
Only matches a target if the name is an exact match.
Up it's priority to come ahead of general purpose rule matches.
"""
NAME = None
PRIORITY = Target.PRIORITY + 1
@classmethod
def Build(cls, target_name):
try:
if target_name == cls.NAME:
return cls(target_name)
except AttributeError:
pass
return None
| bsd-3-clause | 4,266,460,641,322,779,600 | 27.980519 | 78 | 0.641273 | false |
GoogleCloudPlatform/training-data-analyst | self-paced-labs/tensorflow-2.x/census/trainer/model.py | 4 | 2904 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a Keras model and input function for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def input_fn(features, labels, shuffle, num_epochs, batch_size):
"""Generates an input function to be used for model training.
Args:
features: numpy array of features used for training or inference
labels: numpy array of labels for each example
shuffle: boolean for whether to shuffle the data or not (set True for
training, False for evaluation)
num_epochs: number of epochs to provide the data for
batch_size: batch size for training
Returns:
A tf.data.Dataset that can provide data to the Keras model for training or
evaluation
"""
if labels is None:
inputs = features
else:
inputs = (features, labels)
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if shuffle:
dataset = dataset.shuffle(buffer_size=len(features))
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
return dataset
def create_keras_model(input_dim, learning_rate):
"""Creates Keras Model for Binary Classification.
The single output node + Sigmoid activation makes this a Logistic
Regression.
Args:
input_dim: How many features the input has
learning_rate: Learning rate for training
Returns:
The compiled Keras model (still needs to be trained)
"""
Dense = tf.keras.layers.Dense
model = tf.keras.Sequential(
[
Dense(100, activation=tf.nn.relu, kernel_initializer='uniform',
input_shape=(input_dim,)),
Dense(75, activation=tf.nn.relu),
Dense(50, activation=tf.nn.relu),
Dense(25, activation=tf.nn.relu),
Dense(1, activation=tf.nn.sigmoid)
])
# Custom Optimizer:
# https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
optimizer = tf.keras.optimizers.RMSprop(lr=learning_rate)
# Compile Keras model
model.compile(
loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
| apache-2.0 | 2,626,484,259,290,972,700 | 33.164706 | 80 | 0.689738 | false |
anderojas1/moodle | moodle/moodle/settings.py | 1 | 3079 | """
Django settings for moodle project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from unipath import Path
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PATH_PROJECT = Path(__file__).ancestor(2)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@6@*fsmwwotsdtsoz^19nw@7_0hehf1x5@(p1vbncz%5uxr4(_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.sites',
'app.campus',
'app.curso',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'moodle.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [PATH_PROJECT.child('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'moodle.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'desarrollo2',
'USER': 'anderojas',
'PASSWORD': 'univalle',
'HOST': 'localhost',
'PORT': '5432',
}
}
#MEDIA_URL = PATH_PROJECT.child('media')
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'es-co'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'templates/campus/static/',
)
| gpl-2.0 | 2,914,428,871,197,563,400 | 25.316239 | 71 | 0.68204 | false |
safalNeupane/TestIt | testit.py | 1 | 1963 | #!/usr/bin/env python
import time
import os
import sys
import argparse
import subprocess
class TestIt:
"""Execute test files if they are modified"""
def __init__(self, testFiles):
self.files = testFiles
self.old_time = {}
self.cur_time = {}
def _getModified(self):
"""Returns recently modified files"""
modified = []
for f in self.files:
if (self.old_time[f] == self.cur_time[f]):
continue
else:
modified.append(f)
return modified
def _calcTime(self):
"""Calculates the modified time of a file"""
for f in self.files:
self.cur_time[f] = os.path.getmtime(f)
def test(self):
"""Compares the modified times and execute the files"""
self._calcTime()
self.old_time = self.cur_time.copy()
while True:
try:
time.sleep(1)
self._calcTime()
modified = self._getModified()
if not modified:
continue
else:
print("======================================")
print("[*] Running : ")
for m in modified:
print("\t----- " + m)
subprocess.Popen("./"+ m)
self.old_time = self.cur_time.copy()
except KeyboardInterrupt:
print(" Exiting TestIT .........")
break
except Exception as e:
print("[-] Opps! Something happened : " + str(e))
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Automate unittest.')
parser.add_argument('testFile', type=str, nargs='+', help='A python file with unittest TestCases')
args = parser.parse_args()
testFiles = args.testFile
print("[*] Searching Test Files .......")
for test in testFiles:
if os.path.isfile(test):
print("\t ----- [+] " + test + " Found!")
else:
print("\t ----- [-] " + test + " Not Found!")
testFiles.remove(test)
if not testFiles:
print("[-] No Test Files Found !")
sys.exit(1)
print("\n[*] Running Following Tests : ")
for test in testFiles:
print("\t -------- " + test)
print("\n")
testit = TestIt(testFiles)
testit.test()
| gpl-3.0 | -6,072,755,537,296,116,000 | 22.369048 | 99 | 0.603158 | false |
dhenrygithub/QGIS | python/plugins/processing/algs/gdal/ogrsql.py | 6 | 3204 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ogrsql.py
---------------------
Date : November 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'November 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.vector import ogrConnectionString
DIALECTS = [None, 'ogrsql', 'sqlite']
class OgrSql(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
SQL = 'SQL'
DIALECT = 'DIALECT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Execute SQL')
self.group, self.i18n_group = self.trAlgorithm('[OGR] Miscellaneous')
self.addParameter(ParameterVector(self.INPUT, self.tr('Input layer'),
[ParameterVector.VECTOR_TYPE_ANY], False))
self.addParameter(ParameterString(self.SQL, self.tr('SQL'), ''))
self.addParameter(ParameterSelection(
self.DIALECT,
self.tr('Dialect'),
DIALECTS)
)
self.addOutput(OutputVector(self.OUTPUT, self.tr('SQL result')))
def getConsoleCommands(self):
sql = self.getParameterValue(self.SQL)
if sql == '':
raise GeoAlgorithmExecutionException(
self.tr('Empty SQL. Please enter valid SQL expression and try again.'))
arguments = []
arguments.append('-sql')
arguments.append(sql)
dialectIdx = self.getParameterValue(self.DIALECT)
dialect = DIALECTS[dialectIdx]
if dialect:
arguments.append("-dialect")
arguments.append(dialect)
output = self.getOutputFromName(self.OUTPUT)
outFile = output.value
arguments.append(outFile)
layer = self.getParameterValue(self.INPUT)
conn = ogrConnectionString(layer)[1:-1]
arguments.append(conn)
return ['ogr2ogr', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | 5,991,469,534,832,261,000 | 34.208791 | 89 | 0.567104 | false |
semicoyoung/leetcode | solutions/028.Implement_strStr()/AC_rolling_hash_n.py | 7 | 1213 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_rolling_hash_n.py
# Create Date: 2015-03-06 15:02:07
# Usage: AC_rolling_hash_n.py
# Descripton:
class Solution:
# @param haystack, a string
# @param needle, a string
# @return an integer
def strStr(self, haystack, needle):
hlen, nlen = len(haystack), len(needle)
if nlen == 0:
return 0
if nlen > hlen or hlen == 0:
return -1
rolling = lambda x, y: x * 29 + y
get_hash = lambda ch: ord(ch) - ord('a')
nhash = reduce(rolling, map(get_hash, needle))
hhash = reduce(rolling, map(get_hash, haystack[:nlen]))
if nhash == hhash:
return 0
high_base = 29 ** (nlen - 1)
for i in range(nlen, hlen):
hhash -= get_hash(haystack[i - nlen]) * high_base # remove first in hash code
hhash = rolling(hhash, get_hash(haystack[i])) # add new
if nhash == hhash:
return i - nlen + 1
return -1
# debug
s = Solution()
print s.strStr('abcd', 'cd')
print s.strStr('', 'a')
print s.strStr('a', '')
print s.strStr('', '')
| gpl-2.0 | -4,085,151,994,210,057,000 | 26.568182 | 89 | 0.535037 | false |
agileblaze/OpenStackTwoFactorAuthentication | openstack_dashboard/dashboards/project/networks/ports/views.py | 16 | 5037 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.ports \
import forms as project_forms
from openstack_dashboard.dashboards.project.networks.ports \
import tables as project_tables
from openstack_dashboard.dashboards.project.networks.ports \
import tabs as project_tabs
STATE_DICT = dict(project_tables.DISPLAY_CHOICES)
STATUS_DICT = dict(project_tables.STATUS_DISPLAY_CHOICES)
class DetailView(tabs.TabView):
tab_group_class = project_tabs.PortDetailTabs
template_name = 'project/networks/ports/detail.html'
page_title = _("Port Details")
@memoized.memoized_method
def get_data(self):
port_id = self.kwargs['port_id']
try:
port = api.neutron.port_get(self.request, port_id)
port.admin_state_label = STATE_DICT.get(port.admin_state,
port.admin_state)
port.status_label = STATUS_DICT.get(port.status,
port.status)
except Exception:
port = []
redirect = self.get_redirect_url()
msg = _('Unable to retrieve port details.')
exceptions.handle(self.request, msg, redirect=redirect)
if (api.neutron.is_extension_supported(self.request, 'mac-learning')
and not hasattr(port, 'mac_state')):
port.mac_state = api.neutron.OFF_STATE
return port
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
port = self.get_data()
table = project_tables.PortsTable(self.request,
network_id=port.network_id)
context["port"] = port
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(port)
return context
def get_tabs(self, request, *args, **kwargs):
port = self.get_data()
return self.tab_group_class(request, port=port, **kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:networks:index')
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdatePort
form_id = "update_port_form"
modal_header = _("Edit Port")
template_name = 'project/networks/ports/update.html'
context_object_name = 'port'
submit_label = _("Save Changes")
submit_url = "horizon:project:networks:editport"
success_url = 'horizon:project:networks:detail'
page_title = _("Update Port")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['network_id'],))
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
port_id = self.kwargs['port_id']
try:
return api.neutron.port_get(self.request, port_id)
except Exception:
redirect = self.get_success_url()
msg = _('Unable to retrieve port details')
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
port = self._get_object()
context['port_id'] = port['id']
context['network_id'] = port['network_id']
args = (self.kwargs['network_id'], self.kwargs['port_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.success_url,
args=(self.kwargs['network_id'],))
return context
def get_initial(self):
port = self._get_object()
initial = {'port_id': port['id'],
'network_id': port['network_id'],
'tenant_id': port['tenant_id'],
'name': port['name'],
'admin_state': port['admin_state_up']}
if port['binding__vnic_type']:
initial['binding__vnic_type'] = port['binding__vnic_type']
try:
initial['mac_state'] = port['mac_learning_enabled']
except Exception:
# MAC Learning is not set
pass
return initial
| apache-2.0 | 8,972,426,100,918,073,000 | 37.159091 | 78 | 0.618424 | false |
jamesmcm/luigi | luigi/execution_summary.py | 4 | 20262 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provide the function :py:func:`summary` that is used for printing
an `execution summary
<https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_
at the end of luigi invocations.
"""
import textwrap
import collections
import functools
import enum
import luigi
class execution_summary(luigi.Config):
summary_length = luigi.IntParameter(default=5)
class LuigiStatusCode(enum.Enum):
"""
All possible status codes for the attribute ``status`` in :class:`~luigi.execution_summary.LuigiRunResult` when
the argument ``detailed_summary=True`` in *luigi.run() / luigi.build*.
Here are the codes and what they mean:
============================= ==========================================================
Status Code Name Meaning
============================= ==========================================================
SUCCESS There were no failed tasks or missing dependencies
SUCCESS_WITH_RETRY There were failed tasks but they all succeeded in a retry
FAILED There were failed tasks
FAILED_AND_SCHEDULING_FAILED There were failed tasks and tasks whose scheduling failed
SCHEDULING_FAILED There were tasks whose scheduling failed
NOT_RUN There were tasks that were not granted run permission by the scheduler
MISSING_EXT There were missing external dependencies
============================= ==========================================================
"""
SUCCESS = (":)", "there were no failed tasks or missing dependencies")
SUCCESS_WITH_RETRY = (":)", "there were failed tasks but they all succeeded in a retry")
FAILED = (":(", "there were failed tasks")
FAILED_AND_SCHEDULING_FAILED = (":(", "there were failed tasks and tasks whose scheduling failed")
SCHEDULING_FAILED = (":(", "there were tasks whose scheduling failed")
NOT_RUN = (":|", "there were tasks that were not granted run permission by the scheduler")
MISSING_EXT = (":|", "there were missing external dependencies")
class LuigiRunResult:
"""
The result of a call to build/run when passing the detailed_summary=True argument.
Attributes:
- one_line_summary (str): One line summary of the progress.
- summary_text (str): Detailed summary of the progress.
- status (LuigiStatusCode): Luigi Status Code. See :class:`~luigi.execution_summary.LuigiStatusCode` for what these codes mean.
- worker (luigi.worker.worker): Worker object. See :class:`~luigi.worker.worker`.
- scheduling_succeeded (bool): Boolean which is *True* if all the tasks were scheduled without errors.
"""
def __init__(self, worker, worker_add_run_status=True):
self.worker = worker
summary_dict = _summary_dict(worker)
self.summary_text = _summary_wrap(_summary_format(summary_dict, worker))
self.status = _tasks_status(summary_dict)
self.one_line_summary = _create_one_line_summary(self.status)
self.scheduling_succeeded = worker_add_run_status
def __str__(self):
return "LuigiRunResult with status {0}".format(self.status)
def __repr__(self):
return "LuigiRunResult(status={0!r},worker={1!r},scheduling_succeeded={2!r})".format(self.status, self.worker, self.scheduling_succeeded)
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history
if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"]
set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["upstream_scheduling_error"] = set()
set_tasks["not_run"] = set()
return set_tasks
def _root_task(worker):
"""
Return the first task scheduled by the worker, corresponding to the root task
"""
return worker._add_task_history[0][0]
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
upstream_scheduling_error = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if task in set_tasks["scheduling_error"]:
set_tasks["upstream_scheduling_error"].add(current_task)
upstream_scheduling_error = True
if not upstream_failure and not upstream_missing_dependency and \
not upstream_run_by_other_worker and not upstream_scheduling_error and \
current_task not in set_tasks["run_by_other_worker"]:
set_tasks["not_run"].add(current_task)
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
summary_length = execution_summary().summary_length
lines = []
task_names = sorted(task_dict.keys())
for task_family in task_names:
tasks = task_dict[task_family]
tasks = sorted(tasks, key=lambda x: str(x))
prefix_size = 8 if extra_indent else 4
prefix = ' ' * prefix_size
line = None
if summary_length > 0 and len(lines) >= summary_length:
line = prefix + "..."
lines.append(line)
break
if len(tasks[0].get_params()) == 0:
line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family))
elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \
(len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)):
"""
This is to make sure that there is no really long task in the output
"""
line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family)
elif len((tasks[0].get_params())) == 1:
attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks}
param_class = tasks[0].get_params()[0][1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 3:
param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last))
else:
param_str = '{0}'.format(_get_str_one_parameter(tasks))
line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str)
else:
ranging = False
params = _get_set_of_params(tasks)
unique_param_keys = list(_get_unique_param_keys(params))
if len(unique_param_keys) == 1:
unique_param, = unique_param_keys
attributes = params[unique_param]
param_class = unique_param[1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 2:
ranging = True
line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param))
if not ranging:
if len(tasks) == 1:
line = prefix + '- {0} {1}'.format(len(tasks), tasks[0])
if len(tasks) == 2:
line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1])
if len(tasks) > 2:
line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0])
lines.append(line)
return '\n'.join(lines)
def _get_len_of_params(task):
return sum(len(param[0]) for param in task.get_params())
def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param):
row = ''
str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last))
for param in tasks[0].get_params():
row += '{0}='.format(param[0])
if param[0] == unique_param[0]:
row += '{0}'.format(str_unique_param)
else:
row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0])))
if param != tasks[0].get_params()[-1]:
row += ", "
row += ')'
return row
def _get_set_of_params(tasks):
params = {}
for param in tasks[0].get_params():
params[param] = {getattr(task, param[0]) for task in tasks}
return params
def _get_unique_param_keys(params):
for param_key, param_values in params.items():
if len(param_values) > 1:
yield param_key
def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_attributes.difference(attributes)
if len(in_first) == 1 and len(in_second) == 1:
for x in attributes:
if {param_class.next_in_enumeration(x)} == in_second:
return next(iter(in_first)), x
return None, None
def _get_str_one_parameter(tasks):
row = ''
count = 0
for task in tasks:
if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200:
row += '...'
break
param = task.get_params()[0]
row += '{0}'.format(param[1].serialize(getattr(task, param[0])))
if count < len(tasks) - 1:
row += ','
count += 1
return row
def _serialize_first_param(task):
return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0]))
def _get_number_of_tasks_for(status, group_tasks):
if status == "still_pending":
return (_get_number_of_tasks(group_tasks["still_pending_ext"]) +
_get_number_of_tasks(group_tasks["still_pending_not_ext"]))
return _get_number_of_tasks(group_tasks[status])
def _get_number_of_tasks(task_dict):
return sum(len(tasks) for tasks in task_dict.values())
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
# Oredered in the sense that they'll be printed in this order
_ORDERED_STATUSES = (
"already_done",
"completed",
"ever_failed",
"failed",
"scheduling_error",
"still_pending",
"still_pending_ext",
"run_by_other_worker",
"upstream_failure",
"upstream_missing_dependency",
"upstream_run_by_other_worker",
"upstream_scheduling_error",
"not_run",
)
_PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):])
_COMMENTS = {
("already_done", 'complete ones were encountered'),
("completed", 'ran successfully'),
("failed", 'failed'),
("scheduling_error", 'failed scheduling'),
("still_pending", 'were left pending, among these'),
("still_pending_ext", 'were missing external dependencies'),
("run_by_other_worker", 'were being run by another worker'),
("upstream_failure", 'had failed dependencies'),
("upstream_missing_dependency", 'had missing dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("upstream_scheduling_error", 'had dependencies whose scheduling failed'),
("not_run", 'was not granted run permission by the scheduler'),
}
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set())
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and
returns a dictionary with dictionaries with an array of tasks grouped by
their status and task name
"""
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
def _summary_dict(worker):
set_tasks = _partition_tasks(worker)
set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker)
_populate_unknown_statuses(set_tasks)
return set_tasks
def _summary_format(set_tasks, worker):
group_tasks = {}
for status, task_dict in set_tasks.items():
group_tasks[status] = _group_tasks_by_name_and_status(task_dict)
comments = _get_comments(group_tasks)
num_all_tasks = sum([len(set_tasks["already_done"]),
len(set_tasks["completed"]), len(set_tasks["failed"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])])
str_output = ''
str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks)
for status in _ORDERED_STATUSES:
if status not in comments:
continue
str_output += '{0}'.format(comments[status])
if status != 'still_pending':
str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES))
ext_workers = _get_external_workers(worker)
group_tasks_ext_workers = {}
for ext_worker, task_dict in ext_workers.items():
group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict)
if len(ext_workers) > 0:
str_output += "\nThe other workers were:\n"
count = 0
for ext_worker, task_dict in ext_workers.items():
if count > 3 and count < len(ext_workers) - 1:
str_output += " and {0} other workers".format(len(ext_workers) - count)
break
str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict))
count += 1
str_output += '\n'
if num_all_tasks == sum([len(set_tasks["already_done"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])]):
if len(ext_workers) == 0:
str_output += '\n'
str_output += 'Did not run any tasks'
one_line_summary = _create_one_line_summary(_tasks_status(set_tasks))
str_output += "\n{0}".format(one_line_summary)
if num_all_tasks == 0:
str_output = 'Did not schedule any tasks'
return str_output
def _create_one_line_summary(status_code):
"""
Given a status_code of type LuigiStatusCode which has a tuple value, returns a one line summary
"""
return "This progress looks {0} because {1}".format(*status_code.value)
def _tasks_status(set_tasks):
"""
Given a grouped set of tasks, returns a LuigiStatusCode
"""
if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
return LuigiStatusCode.SUCCESS_WITH_RETRY
else:
if set_tasks["scheduling_error"]:
return LuigiStatusCode.FAILED_AND_SCHEDULING_FAILED
return LuigiStatusCode.FAILED
elif set_tasks["scheduling_error"]:
return LuigiStatusCode.SCHEDULING_FAILED
elif set_tasks["not_run"]:
return LuigiStatusCode.NOT_RUN
elif set_tasks["still_pending_ext"]:
return LuigiStatusCode.MISSING_EXT
else:
return LuigiStatusCode.SUCCESS
def _summary_wrap(str_output):
return textwrap.dedent("""
===== Luigi Execution Summary =====
{str_output}
===== Luigi Execution Summary =====
""").format(str_output=str_output)
def summary(worker):
"""
Given a worker, return a human readable summary of what the worker have
done.
"""
return _summary_wrap(_summary_format(_summary_dict(worker), worker))
# 5
| apache-2.0 | 5,799,251,319,585,199,000 | 40.605749 | 155 | 0.606752 | false |
wunderlins/learning | python/zodb/lib/osx/ZConfig/tests/test_datatypes.py | 2 | 15327 | ##############################################################################
#
# Copyright (c) 2002, 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests of standard ZConfig datatypes."""
import os
import sys
import shutil
import socket
import datetime
import tempfile
import unittest
import ZConfig.datatypes
try:
here = __file__
except NameError:
here = sys.argv[0]
here = os.path.abspath(here)
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
class DatatypeTestCase(unittest.TestCase):
types = ZConfig.datatypes.Registry()
def test_datatype_basickey(self):
convert = self.types.get("basic-key")
eq = self.assertEqual
raises = self.assertRaises
eq(convert("abc"), "abc")
eq(convert("ABC_DEF.123"), "abc_def.123")
eq(convert("Abc-Def-456"), "abc-def-456")
eq(convert("Abc.Def"), "abc.def")
raises(ValueError, convert, "_abc")
raises(ValueError, convert, "-abc")
raises(ValueError, convert, "123")
raises(ValueError, convert, "")
def test_datatype_boolean(self):
convert = self.types.get("boolean")
check = self.assertTrue
raises = self.assertRaises
check(convert("on"))
check(convert("true"))
check(convert("yes"))
check(not convert("off"))
check(not convert("false"))
check(not convert("no"))
raises(ValueError, convert, '0')
raises(ValueError, convert, '1')
raises(ValueError, convert, '')
raises(ValueError, convert, 'junk')
def test_datatype_float(self):
convert = self.types.get("float")
eq = self.assertEqual
raises = self.assertRaises
eq(convert("1"), 1.0)
self.assertTrue(type(convert(1)) is type(1.0))
eq(convert("1.1"), 1.1)
eq(convert("50.50"), 50.50)
eq(convert("-50.50"), -50.50)
eq(convert(0), 0.0)
eq(convert("0"), 0.0)
eq(convert("-0"), 0.0)
eq(convert("0.0"), 0.0)
raises(ValueError, convert, "junk")
raises(ValueError, convert, "0x234.1.9")
raises(ValueError, convert, "0.9-")
# These are not portable representations; make sure they are
# disallowed everywhere for consistency.
raises(ValueError, convert, "inf")
raises(ValueError, convert, "-inf")
raises(ValueError, convert, "nan")
if have_unicode:
raises(ValueError, convert, unicode("inf"))
raises(ValueError, convert, unicode("-inf"))
raises(ValueError, convert, unicode("nan"))
def test_datatype_identifier(self):
convert = self.types.get("identifier")
raises = self.assertRaises
self.check_names(convert)
self.check_never_namelike(convert)
raises(ValueError, convert, ".abc")
def check_names(self, convert):
eq = self.assert_ascii_equal
eq(convert, "AbcDef")
eq(convert, "a________")
eq(convert, "abc_def")
eq(convert, "int123")
eq(convert, "_abc")
eq(convert, "_123")
eq(convert, "__dict__")
def assert_ascii_equal(self, convert, value):
v = convert(value)
self.assertEqual(v, value)
self.assertTrue(isinstance(v, str))
if have_unicode:
unicode_value = unicode(value)
v = convert(unicode_value)
self.assertEqual(v, value)
self.assertTrue(isinstance(v, str))
def check_never_namelike(self, convert):
raises = self.assertRaises
raises(ValueError, convert, "2345")
raises(ValueError, convert, "23.45")
raises(ValueError, convert, ".45")
raises(ValueError, convert, "23.")
raises(ValueError, convert, "abc.")
raises(ValueError, convert, "-abc")
raises(ValueError, convert, "-123")
raises(ValueError, convert, "abc-")
raises(ValueError, convert, "123-")
raises(ValueError, convert, "-")
raises(ValueError, convert, ".")
raises(ValueError, convert, "&%$*()")
raises(ValueError, convert, "")
def test_datatype_dotted_name(self):
convert = self.types.get("dotted-name")
raises = self.assertRaises
self.check_names(convert)
self.check_dotted_names(convert)
self.check_never_namelike(convert)
raises(ValueError, convert, "abc.")
raises(ValueError, convert, ".abc.")
raises(ValueError, convert, "abc.def.")
raises(ValueError, convert, ".abc.def.")
raises(ValueError, convert, ".abc.def")
def test_datatype_dotted_suffix(self):
convert = self.types.get("dotted-suffix")
eq = self.assert_ascii_equal
raises = self.assertRaises
self.check_names(convert)
self.check_dotted_names(convert)
self.check_never_namelike(convert)
eq(convert, ".a")
eq(convert, ".a.b")
eq(convert, ".a.b.c.d.e.f.g.h.i.j.k.l.m.n.o")
raises(ValueError, convert, "abc.")
raises(ValueError, convert, ".abc.")
raises(ValueError, convert, "abc.def.")
raises(ValueError, convert, ".abc.def.")
def check_dotted_names(self, convert):
eq = self.assert_ascii_equal
eq(convert, "abc.def")
eq(convert, "abc.def.ghi")
eq(convert, "a.d.g.g.g.g.g.g.g")
def test_datatype_inet_address(self):
convert = self.types.get("inet-address")
eq = self.assertEqual
defhost = ZConfig.datatypes.DEFAULT_HOST
eq(convert("Host.Example.Com:80"), ("host.example.com", 80))
eq(convert("Host.Example.Com:0"), ("host.example.com", 0))
eq(convert(":80"), (defhost, 80))
eq(convert("80"), (defhost, 80))
eq(convert("[::1]:80"), ("::1", 80))
eq(convert("host.EXAMPLE.com"), ("host.example.com", None))
eq(convert("2001::ABCD"), ("2001::abcd", None))
self.assertRaises(ValueError, convert, "40 # foo")
def test_datatype_inet_binding_address(self):
convert = self.types.get("inet-binding-address")
eq = self.assertEqual
defhost = ""
eq(convert("Host.Example.Com:80"), ("host.example.com", 80))
eq(convert(":80"), (defhost, 80))
eq(convert("80"), (defhost, 80))
eq(convert("host.EXAMPLE.com"), ("host.example.com", None))
self.assertRaises(ValueError, convert, "40 # foo")
def test_datatype_inet_connection_address(self):
convert = self.types.get("inet-connection-address")
eq = self.assertEqual
defhost = "127.0.0.1"
eq(convert("Host.Example.Com:80"), ("host.example.com", 80))
eq(convert(":80"), (defhost, 80))
eq(convert("80"), (defhost, 80))
eq(convert("host.EXAMPLE.com"), ("host.example.com", None))
self.assertRaises(ValueError, convert, "40 # foo")
def test_datatype_integer(self):
convert = self.types.get("integer")
eq = self.assertEqual
raises = self.assertRaises
eq(convert('-100'), -100)
eq(convert('-1'), -1)
eq(convert('-0'), 0)
eq(convert('0'), 0)
eq(convert('1'), 1)
eq(convert('100'), 100)
eq(convert('65535'), 65535)
eq(convert('65536'), 65536)
raises(ValueError, convert, 'abc')
raises(ValueError, convert, '-0xabc')
raises(ValueError, convert, '')
raises(ValueError, convert, '123 456')
raises(ValueError, convert, '123-')
def test_datatype_locale(self):
convert = self.types.get("locale")
# Python supports "C" even when the _locale module is not available
self.assertEqual(convert("C"), "C")
self.assertRaises(ValueError, convert, "locale-does-not-exist")
def test_datatype_port(self):
convert = self.types.get("port-number")
eq = self.assertEqual
raises = self.assertRaises
raises(ValueError, convert, '-1')
eq(convert('0'), 0)
eq(convert('1'), 1)
eq(convert('80'), 80)
eq(convert('1023'), 1023)
eq(convert('1024'), 1024)
eq(convert('60000'), 60000)
eq(convert('65535'), 0xffff)
raises(ValueError, convert, '65536')
def test_datatype_socket_address(self):
convert = self.types.get("socket-address")
eq = self.assertEqual
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
defhost = ZConfig.datatypes.DEFAULT_HOST
def check(value, family, address, self=self, convert=convert):
a = convert(value)
self.assertEqual(a.family, family)
self.assertEqual(a.address, address)
check("Host.Example.Com:80", AF_INET, ("host.example.com", 80))
check(":80", AF_INET, (defhost, 80))
check("80", AF_INET, (defhost, 80))
check("host.EXAMPLE.com", AF_INET, ("host.example.com",None))
check("::1", AF_INET6,("::1", None))
check("[::]:80", AF_INET6,("::", 80))
a1 = convert("/tmp/var/@345.4")
a2 = convert("/tmp/var/@345.4:80")
self.assertEqual(a1.address, "/tmp/var/@345.4")
self.assertEqual(a2.address, "/tmp/var/@345.4:80")
if hasattr(socket, "AF_UNIX"):
self.assertEqual(a1.family, socket.AF_UNIX)
self.assertEqual(a2.family, socket.AF_UNIX)
else:
self.assertTrue(a1.family is None)
self.assertTrue(a2.family is None)
def test_ipaddr_or_hostname(self):
convert = self.types.get('ipaddr-or-hostname')
eq = self.assertEqual
raises = self.assertRaises
eq(convert('hostname'), 'hostname')
eq(convert('hostname.com'), 'hostname.com')
eq(convert('www.hostname.com'), 'www.hostname.com')
eq(convert('HOSTNAME'), 'hostname')
eq(convert('HOSTNAME.COM'), 'hostname.com')
eq(convert('WWW.HOSTNAME.COM'), 'www.hostname.com')
eq(convert('127.0.0.1'), '127.0.0.1')
eq(convert('::1'), '::1')
eq(convert('2001:DB8:1234:4567:89AB:cdef:0:1'), '2001:db8:1234:4567:89ab:cdef:0:1')
eq(convert('2001:DB8:1234:4567::10.11.12.13'), '2001:db8:1234:4567::10.11.12.13')
raises(ValueError, convert, '1hostnamewithleadingnumeric')
raises(ValueError, convert, '255.255')
raises(ValueError, convert, '12345678')
raises(ValueError, convert, '999.999.999.999')
raises(ValueError, convert, 'a!badhostname')
raises(ValueError, convert, '2001:DB8:0123:4567:89AB:cdef:0:1:2')
raises(ValueError, convert, '2001:DB8:0123:4567::10.11.12.13.14')
def test_existing_directory(self):
convert = self.types.get('existing-directory')
eq = self.assertEqual
raises = self.assertRaises
eq(convert('.'), '.')
eq(convert(os.path.dirname(here)), os.path.dirname(here))
raises(ValueError, convert, tempfile.mktemp())
def test_existing_file(self):
convert = self.types.get('existing-file')
eq = self.assertEqual
raises = self.assertRaises
eq(convert('.'), '.')
eq(convert(here), here)
raises(ValueError, convert, tempfile.mktemp())
def test_existing_path(self):
convert = self.types.get('existing-path')
eq = self.assertEqual
raises = self.assertRaises
eq(convert('.'), '.')
eq(convert(here), here)
eq(convert(os.path.dirname(here)), os.path.dirname(here))
raises(ValueError, convert, tempfile.mktemp())
def test_existing_dirpath(self):
convert = self.types.get('existing-dirpath')
eq = self.assertEqual
raises = self.assertRaises
eq(convert('.'), '.')
eq(convert(here), here)
raises(ValueError, convert, '/a/hopefully/nonexistent/path')
raises(ValueError, convert, here + '/bogus')
def test_byte_size(self):
eq = self.assertEqual
raises = self.assertRaises
convert = self.types.get('byte-size')
eq(convert('128'), 128)
eq(convert('128KB'), 128*1024)
eq(convert('128MB'), 128*1024*1024)
eq(convert('128GB'), 128*1024*1024*1024)
raises(ValueError, convert, '128TB')
eq(convert('128'), 128)
eq(convert('128kb'), 128*1024)
eq(convert('128mb'), 128*1024*1024)
eq(convert('128gb'), 128*1024*1024*1024)
raises(ValueError, convert, '128tb')
def test_time_interval(self):
eq = self.assertEqual
raises = self.assertRaises
convert = self.types.get('time-interval')
eq(convert('120'), 120)
eq(convert('120S'), 120)
eq(convert('120M'), 120*60)
eq(convert('120H'), 120*60*60)
eq(convert('120D'), 120*60*60*24)
raises(ValueError, convert, '120W')
eq(convert('120'), 120)
eq(convert('120s'), 120)
eq(convert('120m'), 120*60)
eq(convert('120h'), 120*60*60)
eq(convert('120d'), 120*60*60*24)
raises(ValueError, convert, '120w')
def test_timedelta(self):
eq = self.assertEqual
raises = self.assertRaises
convert = self.types.get('timedelta')
eq(convert('4w'), datetime.timedelta(weeks=4))
eq(convert('2d'), datetime.timedelta(days=2))
eq(convert('7h'), datetime.timedelta(hours=7))
eq(convert('12m'), datetime.timedelta(minutes=12))
eq(convert('14s'), datetime.timedelta(seconds=14))
eq(convert('4w 2d 7h 12m 14s'),
datetime.timedelta(2, 14, minutes=12, hours=7, weeks=4))
class RegistryTestCase(unittest.TestCase):
def test_registry_does_not_mask_toplevel_imports(self):
old_sys_path = sys.path[:]
tmpdir = tempfile.mkdtemp(prefix="test_datatypes_")
fn = os.path.join(tmpdir, "datatypes.py")
f = open(fn, "w")
f.write(TEST_DATATYPE_SOURCE)
f.close()
registry = ZConfig.datatypes.Registry()
# we really want the temp area to override everything else:
sys.path.insert(0, tmpdir)
try:
datatype = registry.get("datatypes.my_sample_datatype")
finally:
shutil.rmtree(tmpdir)
sys.path[:] = old_sys_path
self.assertEqual(datatype, 42)
TEST_DATATYPE_SOURCE = """
# sample datatypes file
my_sample_datatype = 42
"""
def test_suite():
suite = unittest.makeSuite(DatatypeTestCase)
suite.addTest(unittest.makeSuite(RegistryTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 | -4,808,217,243,733,428,000 | 35.579952 | 91 | 0.578522 | false |
bcl/anaconda | pyanaconda/ui/categories/customization.py | 17 | 1393 | # Localization category classes
#
# Copyright (C) 2011, 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <[email protected]>
# Martin Sivak <[email protected]>
#
from pyanaconda.i18n import N_
from pyanaconda.ui.categories import SpokeCategory
__all__ = ["CustomizationCategory"]
class CustomizationCategory(SpokeCategory):
displayOnHubGUI = "SummaryHub"
displayOnHubTUI = "SummaryHub"
sortOrder = 100
title = N_("CUSTOMIZATION")
| gpl-2.0 | -2,610,193,471,564,894,000 | 42.53125 | 77 | 0.752333 | false |
nowls/gnuradio | grc/gui/Colors.py | 17 | 1969 | """
Copyright 2008,2013 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
try:
import pygtk
pygtk.require('2.0')
import gtk
_COLORMAP = gtk.gdk.colormap_get_system() #create all of the colors
def get_color(color_code): return _COLORMAP.alloc_color(color_code, True, True)
HIGHLIGHT_COLOR = get_color('#00FFFF')
BORDER_COLOR = get_color('#444444')
# missing blocks stuff
MISSING_BLOCK_BACKGROUND_COLOR = get_color('#FFF2F2')
MISSING_BLOCK_BORDER_COLOR = get_color('red')
#param entry boxes
PARAM_ENTRY_TEXT_COLOR = get_color('black')
ENTRYENUM_CUSTOM_COLOR = get_color('#EEEEEE')
#flow graph color constants
FLOWGRAPH_BACKGROUND_COLOR = get_color('#FFFFFF')
COMMENT_BACKGROUND_COLOR = get_color('#F3F3F3')
FLOWGRAPH_EDGE_COLOR = COMMENT_BACKGROUND_COLOR
#block color constants
BLOCK_ENABLED_COLOR = get_color('#F1ECFF')
BLOCK_DISABLED_COLOR = get_color('#CCCCCC')
BLOCK_BYPASSED_COLOR = get_color('#F4FF81')
#connection color constants
CONNECTION_ENABLED_COLOR = get_color('black')
CONNECTION_DISABLED_COLOR = get_color('#BBBBBB')
CONNECTION_ERROR_COLOR = get_color('red')
except:
print 'Unable to import Colors'
DEFAULT_DOMAIN_COLOR_CODE = '#777777'
| gpl-3.0 | 4,934,265,944,466,381,000 | 38.38 | 83 | 0.728288 | false |
MDXDave/ModernWebif | plugin/controllers/models/control.py | 1 | 5595 | # -*- coding: utf-8 -*-
##############################################################################
# 2011 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
from Components.config import config
from enigma import eServiceReference, eActionMap, eServiceCenter
from urllib import unquote
from services import getProtection
from Screens.InfoBar import InfoBar, MoviePlayer
def zapInServiceList(service):
InfoBar_Instance = InfoBar.instance
servicelist = InfoBar_Instance.servicelist
if config.usage.multibouquet.value:
rootstrings = ('1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.tv" ORDER BY bouquet', '1:7:1:0:0:0:0:0:0:0:FROM BOUQUET "bouquets.radio" ORDER BY bouquet')
else:
rootstrings = ('1:7:1:0:0:0:0:0:0:0:(type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 134) || (type == 195) FROM BOUQUET "userbouquet.favourites.tv" ORDER BY bouquet', '1:7:2:0:0:0:0:0:0:0:(type == 2) || (type == 10) FROM BOUQUET "userbouquet.favourites.radio" ORDER BY bouquet')
bouquet_found = False
for bouquet_rootstr in rootstrings:
servicelist.bouquet_root = eServiceReference(bouquet_rootstr)
if bouquet_rootstr.find('radio') != -1:
servicelist.setModeRadio()
else:
servicelist.setModeTv()
bouquets = servicelist.getBouquetList()
for bouquet in bouquets:
reflist = [ ]
reflist = eServiceCenter.getInstance().list(bouquet[1])
if reflist:
while True:
new_service = reflist.getNext()
if not new_service.valid(): #check if end of list
break
if new_service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker):
continue
if new_service == service:
bouquet_found = True
break
if bouquet_found:
break
if bouquet_found:
break
if bouquet_found:
bouquet = bouquet[1]
if servicelist.getRoot() != bouquet:
servicelist.clearPath()
if servicelist.bouquet_root != bouquet:
servicelist.enterPath(servicelist.bouquet_root)
servicelist.enterPath(bouquet)
else:
servicelist.clearPath()
servicelist.enterPath(service)
servicelist.setCurrentSelection(service) #select the service in servicelist
servicelist.zap()
def zapService(session, id, title = ""):
# Must NOT unquote id here, breaks zap to streams
service = eServiceReference(id)
if len(title) > 0:
service.setName(title)
else:
title = id
# TODO: check standby
isRecording = service.getPath()
isRecording = isRecording and isRecording.startswith("/")
if not isRecording:
if config.ParentalControl.servicepinactive.value and config.ModernWebif.parentalenabled.value:
if getProtection(service.toString()) != "0":
return {
"result": False,
"message": "Service '%s' is blocked by parental Control" % title
}
# use mediaplayer for recording
if isRecording:
if isinstance(session.current_dialog, InfoBar):
session.open(MoviePlayer, service)
else:
session.nav.playService(service)
else:
if isinstance(session.current_dialog, MoviePlayer):
session.current_dialog.lastservice = service
session.current_dialog.close()
zapInServiceList(service)
return {
"result": True,
"message": "Active service is now '%s'" % title
}
def remoteControl(key, type = "", rcu = ""):
# TODO: do something better here
if rcu == "standard":
remotetype = "dreambox remote control (native)"
elif rcu == "advanced":
remotetype = "dreambox advanced remote control (native)"
elif rcu == "keyboard":
remotetype = "dreambox ir keyboard"
else:
if config.misc.rcused.value == 0:
remotetype = "dreambox advanced remote control (native)"
else:
remotetype = "dreambox remote control (native)"
try:
from Tools.HardwareInfo import HardwareInfo
if HardwareInfo().get_device_model() in ("xp1000", "formuler1", "formuler3", "et9000", "et9200", "hd1100", "hd1200"):
remotetype = "dreambox advanced remote control (native)"
except:
print "[ModernWebif] wrong hw detection"
amap = eActionMap.getInstance()
if type == "long":
amap.keyPressed(remotetype, key, 0)
amap.keyPressed(remotetype, key, 3)
elif type == "ascii":
amap.keyPressed(remotetype, key, 4)
else:
amap.keyPressed(remotetype, key, 0)
amap.keyPressed(remotetype, key, 1)
return {
"result": True,
"message": "RC command '%s' has been issued" % str(key)
}
def setPowerState(session, state):
from Screens.Standby import Standby, TryQuitMainloop, inStandby
state = int(state)
if state == 0: # Toggle StandBy
if inStandby == None:
session.open(Standby)
else:
inStandby.Power()
elif state == 1: # DeepStandBy
session.open(TryQuitMainloop, state)
elif state == 2: # Reboot
session.open(TryQuitMainloop, state)
elif state == 3: # Restart Enigma
session.open(TryQuitMainloop, state)
elif state == 4: # Wakeup
if inStandby != None:
inStandby.Power()
elif state == 5: # Standby
if inStandby == None:
session.open(Standby)
elif state == 6:
print "HAHA"
return {
"result": True,
"instandby": inStandby != None
}
def getStandbyState(session):
from Screens.Standby import inStandby
return {
"result": True,
"instandby": inStandby != None
}
| gpl-2.0 | -6,510,634,847,256,786,000 | 31.719298 | 303 | 0.653798 | false |
camradal/ansible | lib/ansible/modules/database/postgresql/postgresql_lang.py | 25 | 10095 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database.
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database. The module can be used on the machine
where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify casade to
automatically drop objects that depend on the language (such as functions in the
language). In case the language can't be deleted because it is required by the
database system, you can specify fail_on_drop=no to ignore the error.
- Be carefull when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
version_added: "1.7"
options:
lang:
description:
- name of the procedural language to add, remove or change
required: true
default: null
trust:
description:
- make this language trusted for the selected db
required: false
default: no
choices: [ "yes", "no" ]
db:
description:
- name of database where the language will be added, removed or changed
required: false
default: null
force_trust:
description:
- marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- use with care!
required: false
default: no
choices: [ "yes", "no" ]
fail_on_drop:
description:
- if C(yes), fail when removing a language. Otherwise just log and continue
- in some cases, it is not possible to remove a language (used by the db-system). When dependencies block the removal, consider using C(cascade).
required: false
default: 'yes'
choices: [ "yes", "no" ]
cascade:
description:
- when dropping a language, also delete object that depend on this language.
- only used when C(state=absent).
required: false
default: no
choices: [ "yes", "no" ]
port:
description:
- Database port to connect to.
required: false
default: 5432
login_user:
description:
- User used to authenticate with PostgreSQL
required: false
default: postgres
login_password:
description:
- Password used to authenticate with PostgreSQL (must match C(login_user))
required: false
default: null
login_host:
description:
- Host running PostgreSQL where you want to execute the actions.
required: false
default: localhost
state:
description:
- The state of the language for the selected database
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module. If
the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Jens Depuydt (@jensdepuydt)"
'''
EXAMPLES = '''
# Add language pltclu to database testdb if it doesn't exist:
- postgresql_lang db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted:
# Marks the language as trusted if it exists but isn't trusted yet
# force_trust makes sure that the language will be marked as trusted
- postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: yes
force_trust: yes
# Remove language pltclu from database testdb:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
# Remove language pltclu from database testdb and remove all dependencies:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: yes
# Remove language c from database testdb but ignore errors if something prevents the removal:
- postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: no
'''
try:
import psycopg2
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname='%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = %s WHERE lanname=%s"
cursor.execute(query, (trust, lang))
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
cursor.execute("DROP LANGUAGE \"%s\" CASCADE" % lang)
else:
cursor.execute("DROP LANGUAGE \"%s\"" % lang)
except:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default="", no_log=True),
login_host=dict(default=""),
db=dict(required=True),
port=dict(default='5432'),
lang=dict(required=True),
state=dict(default="present", choices=["absent", "present"]),
trust=dict(type='bool', default='no'),
force_trust=dict(type='bool', default='no'),
cascade=dict(type='bool', default='no'),
fail_on_drop=dict(type='bool', default='yes'),
),
supports_check_mode = True
)
db = module.params["db"]
port = module.params["port"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port",
"db":"database"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.items()
if k in params_map and v != "" )
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
changed = False
lang_dropped = False
kw = dict(db=db,lang=lang,trust=trust)
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = "unable to drop language, use cascade to delete dependencies or fail_on_drop=no to ignore"
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
module.exit_json(**kw)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
| gpl-3.0 | -399,532,289,757,774,500 | 33.336735 | 159 | 0.647647 | false |
voidcc/POXPOF | pox/lib/recoco/recoco.py | 2 | 25749 | # Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import deque
from Queue import PriorityQueue
from Queue import Queue
import time
import threading
from threading import Thread
import select
import traceback
import os
import socket
import pox.lib.util
import random
from pox.lib.epoll_select import EpollSelect
CYCLE_MAXIMUM = 2
# A ReturnFunction can return this to skip a scheduled slice at the last
# moment.
ABORT = object()
defaultScheduler = None
nextTaskID = 0
def generateTaskID ():
global nextTaskID
nextTaskID += 1
return nextTaskID
class BaseTask (object):
id = None
#running = False
priority = 1
@classmethod
def new (cls, *args, **kw):
"""
Creates a task and starts it on the default scheduler with the
default priority.
"""
o = cls(*args, **kw)
o.start(fast=True)
return o
def __init__ (self, *args, **kw):
#NOTE: keep in sync with Task.__init__ !
# (better yet, refactor)
self.id = generateTaskID()
self.gen = self.run(*args, **kw)
self.rv = None
self.rf = None # ReturnFunc
def start (self, scheduler = None, priority = None, fast = False):
"""
Schedules this task.
See Scheduler.schedule() and Scheduler.fast_schedule() for the meaning
of the 'fast' argument.
"""
if scheduler is None:
scheduler = defaultScheduler
#print ("defaultScheduler", defaultScheduler) #print information
if priority != None:
self.priority = priority
if fast:
scheduler.fast_schedule(self)
#print("Task start, fast schedule") # print information
else:
scheduler.schedule(self)
#print("Task start, not fast schedule") # print information
def execute (self):
if self.rf is not None:
v = self.rf(self)
self.rf = None
self.rv = None
if v == ABORT:
return False
else:
v = self.rv
self.rv = None
return self.gen.send(v)
def run (self):
print("Dummy task")
yield 0
class Task (BaseTask):
"""
Provides an interface close to threading.Thread
"""
def __init__ (self, group=None, target=None, name=None, args=(), kwargs={}):
#NOTE: keep in sync with BaseTask.__init__ !
# (better yet, refactor)
assert(group == None) # Not supported
self.id = generateTaskID()
self.rv = None
self.name = name
if name == None:
self.name = str(self.id)
self.target = target
self.args = args
self.kwargs = kwargs
self.gen = self.run(*args, **kwargs)
BaseTask.__init__(self)
def run (self):
g = self.target(*self.args, **self.kwargs)
g.next()
while True:
g.send((yield))
def __str__ (self):
return "<" + self.__class__.__name__ + "/tid" + str(self.name) + ">"
class Scheduler (object):
""" Scheduler for Tasks """
def __init__ (self, isDefaultScheduler = None, startInThread = True,
daemon = False, useEpoll=False):
self._ready = deque()
self._hasQuit = False
self._selectHub = SelectHub(self, useEpoll=useEpoll)
self._thread = None
self._event = threading.Event()
self._lock = threading.Lock()
self._callLaterTask = None
self._allDone = False
global defaultScheduler
if isDefaultScheduler or (isDefaultScheduler is None and
defaultScheduler is None):
defaultScheduler = self
if startInThread:
self.runThreaded(daemon)
def __del__ (self):
self._hasQuit = True
super(Scheduler, self).__del__()
def callLater (self, func, *args, **kw):
"""
Calls func with the given arguments at some later point, within this
scheduler. This is a good way for another thread to call something in
a co-op-thread-safe manner.
"""
with self._lock:
if self._callLaterTask is None:
self._callLaterTask = CallLaterTask()
self._callLaterTask.start()
self._callLaterTask.callLater(func, *args, **kw)
def runThreaded (self, daemon = False):
self._thread = Thread(target = self.run) # make self.run as a thread
self._thread.daemon = daemon
self._thread.start() # excute this thread
def synchronized (self):
return Synchronizer(self)
def schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
Unlike fast_schedule(), this method will not schedule a task to run
multiple times. The one exception is if a Task actually schedules
itself. The easiest way to avoid this is simply not to do it.
See fast_schedule() and ScheduleTask for more info.
"""
if threading.current_thread() is self._thread:
# We're know we're good.
#TODO: Refactor the following with ScheduleTask
if task in self._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", task)
return False
self.fast_schedule(task, first)
return True
st = ScheduleTask(self, task)
#print('Schedule --> schedule()') #cc
st.start(fast=True)
def fast_schedule (self, task, first = False):
"""
Schedule the given task to run later.
If first is True, the task will be the next to run.
This method does not protect you from scheduling the same Task more
than once, which you probably really don't want to do.
If you are scheduling an existing Task (waking it) from another Task,
you should either implement your own logic to ensure that you don't
schedule it multiple times, or you should just use schedule().
If you are scheduling an existing Task (waking it) from any thread
besides the one the scheduler is running on, there's a race condition
which makes it nontrivial to ensure that multiple schedulings never
happen, and you should just use schedule() for such Tasks.
If you are scheduling a new Task that you just created, this method
is always safe.
"""
# Sanity check. Won't catch all cases.
assert task not in self._ready
if first:
self._ready.appendleft(task)
else:
self._ready.append(task)
self._event.set()
def quit (self):
self._hasQuit = True
def run (self):
try:
while self._hasQuit == False:
if len(self._ready) == 0:
self._event.wait(CYCLE_MAXIMUM) # Wait for a while
self._event.clear()
if self._hasQuit: break
r = self.cycle() # run self.cycle
#print('Scheduler --> run()') #print information
finally:
#print("Scheduler done")
self._hasQuit = True
self._selectHub._cycle()
self._allDone = True
def cycle (self):
#if len(self._ready) == 0: return False
# Patented hilarious priority system
#TODO: Replace it with something better
#print('Scheduler --> cycle() instructed by run()') #print information
t = None
try:
while True:
t = self._ready.popleft()
if t.priority >= 1:
break
if len(self._ready) == 0:
break
if t.priority >= random.random():
break
self._ready.append(t)
except IndexError:
return False
#print(len(self._ready), "tasks")
try:
rv = t.execute()
except StopIteration:
return True
except:
try:
print("Task", t, "caused exception and was de-scheduled")
traceback.print_exc()
except:
pass
return True
if isinstance(rv, BlockingOperation):
try:
rv.execute(t, self)
except:
print("Task", t, "caused exception during a blocking operation and " +
"was de-scheduled")
traceback.print_exc()
elif rv is False:
# Just unschedule/sleep
#print "Unschedule", t, rv
pass
elif type(rv) == int or type(rv) == long or type(rv) == float:
# Sleep time
if rv == 0:
#print "sleep 0"
self._ready.append(t)
else:
self._selectHub.registerTimer(t, rv)
elif rv == None:
raise RuntimeError("Must yield a value!")
return True
#TODO: Read() and Write() BlockingOperations that use nonblocking sockets with
# SelectHub and do post-processing of the return value.
class BlockingOperation (object):
"""
A base class for what can be thought of as syscalls for Tasks.
The separation between __init__ and execute may seem sort of artificial, but
it serves an actual purpose, which is that it makes it impossible for a task
to accidentally start to make a syscall (by instantiating a BlockingOperation)
without actually yielding.
"""
def __init__ (self):
""" When the syscall is made by a task, this is executed """
pass
def execute (self, task, scheduler):
""" Scheduler calls this to actually execute the syscall """
pass
class CallBlocking (BlockingOperation):
"""
Syscall that calls an actual blocking operation (like a real .recv()).
In order to keep from blocking, it calls it on another thread.
The return value is (ret_val, exc_info), one of which is always None.
"""
@classmethod
def new (_cls, _func, *_args, **_kw):
return _cls(_func, *_args, **_kw)
def __init__ (self, func, args=(), kw={}):
self.t = None
self.scheduler = None
self.task = None
self.func = func
self.args = args
self.kw = kw
def _proc (self):
try:
self.task.rv = (self.func(*self.args, **self.kw), None)
except:
import sys
self.task.rv = (None, sys.exc_info())
self.scheduler.fast_schedule(self.task)
def execute (self, task, scheduler):
self.task = task
self.scheduler = scheduler
#NOTE: It might be nice to use a pool here
self.t = threading.Thread(target=self._proc)
#pool.add(self._proc)
self.t.daemon = True
self.t.start()
class Exit (BlockingOperation):
"""
Syscall that kills the scheduler
"""
def __init__ (self):
pass
def execute (self, task, scheduler):
scheduler.quit()
class Sleep (BlockingOperation):
"""
Sleep for specified amount of time (seconds)
None means unscheduler (i.e., sleep until an outside force wakes it)
0 means reschedule for later (no additional time)
"""
def __init__ (self, timeToWake = None, absoluteTime = False):
if absoluteTime == False and timeToWake != None: timeToWake += time.time()
self._t = timeToWake
def execute (self, task, scheduler):
if self._t is None:
# Just unschedule
return
if self._t is 0 or self._t < time.time():
# Just reschedule
scheduler.fast_schedule(task)
return
scheduler._selectHub.registerTimer(task, self._t, True) # A bit ugly
class Select (BlockingOperation):
"""
Should be very similar to Python select.select()
"""
def __init__ (self, *args, **kw):
self._args = args
self._kw = kw
#print('args:',args) #print information
#print('kw:',kw)
def execute (self, task, scheduler):
#print('class Select --> execute')
scheduler._selectHub.registerSelect(task, *self._args, **self._kw)
defaultRecvFlags = 0
try:
defaultRecvFlags = socket.MSG_DONTWAIT
except:
pass
class Recv (BlockingOperation):
def __init__ (self, fd, bufsize = 1024*8, flags = defaultRecvFlags,
timeout = None):
"""
Recv call on fd.
"""
self._fd = fd
self._length = bufsize
self._timeout = timeout
self._flags = flags
def _recvReturnFunc (self, task):
# Select() will have placed file descriptors in rv
if len(task.rv[2]) != 0 or len(task.rv[0]) == 0:
# Socket error
task.rv = None
return None
sock = task.rv[0][0]
task.rv = None
try:
return sock.recv(self._length, self._flags)
except:
traceback.print_exc()
return None #
def execute (self, task, scheduler):
task.rf = self._recvReturnFunc
scheduler._selectHub.registerSelect(task, [self._fd], None, [self._fd],
timeout=self._timeout)
class Send (BlockingOperation):
def __init__ (self, fd, data):
self._fd = fd
self._data = data
self._sent = 0
self._scheduler = None
def _sendReturnFunc (self, task):
# Select() will have placed file descriptors in rv
sock = task.rv[1]
if len(task.rv[2]) != 0:
# Socket error
task.rv = None
return self._sent
task.rv = None
try:
if len(self._data) > 1024:
data = self._data[:1024]
self._data = self._data[1024:]
l = sock.send(data, flags = socket.MSG_DONTWAIT)
self._sent += l
if l == len(data) and len(self._data) == 0:
return self._sent
self._data = data[l:] + self._data
except:
pass
# Still have data to send...
self.execute(task, self._scheduler)
return ABORT
def execute (self, task, scheduler):
self._scheduler = scheduler
task.rf = self._sendReturnFunc
scheduler._selectHub.registerSelect(task, None, [self._fd], [self._fd])
#TODO: just merge this in with Scheduler?
class SelectHub (object):
"""
This class is a single select() loop that handles all Select() requests for
a scheduler as well as timed wakes (i.e., Sleep()).
"""
def __init__ (self, scheduler, useEpoll=False):
# We store tuples of (elapse-time, task)
self._sleepers = [] # Sleeping items stored as a heap
self._incoming = Queue() # Threadsafe queue for new items
self._scheduler = scheduler
self._pinger = pox.lib.util.makePinger()
self.epoll = EpollSelect() if useEpoll else None
self._ready = False
self._thread = Thread(target = self._threadProc)
self._thread.daemon = True
self._thread.start()
# Ugly busy wait for initialization
#while self._ready == False:
def _threadProc (self):
tasks = {}
timeouts = []
rets = {}
while self._scheduler._hasQuit == False:
#print("SelectHub cycle")
if len(timeouts) == 0:
timeout = None
else:
timeout = self._sleepers[0][0] - time.time()
if timeout < 0: timeout = 0
#NOTE: Everything you select on eventually boils down to file descriptors,
# which are unique, obviously. It might be possible to leverage this
# to reduce hashing cost (i.e. by picking a really good hashing
# function), though this is complicated by wrappers, etc...
rl = {}
wl = {}
xl = {}
timeout = None
timeoutTask = None
now = time.time()
expired = None
for t,trl,twl,txl,tto in tasks.itervalues():
if tto != None:
if tto <= now:
# Already expired
if expired is None: expired = []
expired.append(t)
if tto-now > 0.1: print("preexpired",tto,now,tto-now)
continue
tt = tto - now
if tt < timeout or timeout is None:
timeout = tt
timeoutTask = t
if trl:
for i in trl: rl[i] = t
if twl:
for i in twl: wl[i] = t
if txl:
for i in txl: xl[i] = t
if expired:
for t in expired:
del tasks[t]
self._return(t, ([],[],[]))
if timeout is None: timeout = CYCLE_MAXIMUM
if self.epoll:
ro, wo, xo = self.epoll.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
else:
ro, wo, xo = select.select( rl.keys() + [self._pinger],
wl.keys(),
xl.keys(), timeout )
if len(ro) == 0 and len(wo) == 0 and len(xo) == 0 and timeoutTask != None:
# IO is idle - dispatch timers / release timeouts
del tasks[timeoutTask]
self._return(timeoutTask, ([],[],[]))
else:
# We have IO events
if self._pinger in ro:
self._pinger.pongAll()
while not self._incoming.empty():
stuff = self._incoming.get(True)
task = stuff[0]
assert task not in tasks
tasks[task] = stuff
self._incoming.task_done()
if len(ro) == 1 and len(wo) == 0 and len(xo) == 0:
# Just recycle
continue
ro.remove(self._pinger)
# At least one thread is going to be resumed
for i in ro:
task = rl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][0].append(i)
for i in wo:
task = wl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][1].append(i)
for i in xo:
task = xl[i]
if task not in rets: rets[task] = ([],[],[])
rets[task][2].append(i)
for t,v in rets.iteritems():
del tasks[t]
self._return(t, v)
rets.clear()
def registerSelect (self, task, rlist = None, wlist = None, xlist = None,
timeout = None, timeIsAbsolute = False):
if not timeIsAbsolute:
if timeout != None:
timeout += time.time()
self._incoming.put((task, rlist, wlist, xlist, timeout))
self._cycle()
def _cycle (self):
"""
Cycle the wait thread so that new timers or FDs can be picked up
"""
self._pinger.ping()
def registerTimer (self, task, timeToWake, timeIsAbsolute = False):
"""
Register a task to be wakened up interval units in the future.
It means timeToWake seconds in the future if absoluteTime is False.
"""
return self.registerSelect(task, None, None, None, timeToWake,
timeIsAbsolute)
def _return (self, sleepingTask, returnVal):
#print("reschedule", sleepingTask)
sleepingTask.rv = returnVal
self._scheduler.fast_schedule(sleepingTask)
class ScheduleTask (BaseTask):
"""
If multiple real threads (such as a recoco scheduler thread and any
other thread, or any two other threads) try to schedule ("wake") the
same Task with Scheduler.fast_schedule(), there is a race condition where
the Task may get scheduled multiple times, which is probably quite bad.
Scheduler.schedule() fixes this by creating one of these ScheduleTasks,
and it's this ScheduleTask that actually calls fast_schedule(). This
way, the Task is only ever *really* scheduled from the scheduler thread
and the race condition doesn't exist.
"""
def __init__ (self, scheduler, task):
BaseTask.__init__(self)
self._scheduler = scheduler
self._task = task
def run (self):
#TODO: Refactor the following, since it is copy/pasted from schedule().
if self._task in self._scheduler._ready:
# It might make sense to keep a flag on the task, since checking
# if it's in the ready list is not very efficient.
# Not sure if it makes sense to print out a message here or not.
import logging
logging.getLogger("recoco").info("Task %s scheduled multiple " +
"times", self._task)
else:
self._scheduler.fast_schedule(self._task, True)
yield False
class SyncTask (BaseTask):
def __init__ (self, *args, **kw):
BaseTask.__init__(self)
self.inlock = threading.Lock()
self.outlock = threading.Lock()
self.inlock.acquire()
self.outlock.acquire()
def run (self):
self.inlock.release()
self.outlock.acquire()
class Synchronizer (object):
def __init__ (self, scheduler = None):
if scheduler is None:
scheduler = defaultScheduler
self.scheduler = scheduler
self.syncer = None
self.enter = 0
def __enter__ (self):
self.enter += 1
if self.enter == 1:
self.syncer = SyncTask()
self.syncer.start(self.scheduler) #NOTE: maybe add it to head of list?
self.syncer.inlock.acquire()
return self.syncer
def __exit__ (self, type_, value, traceback):
self.enter -= 1
if self.enter == 0:
self.syncer.outlock.release()
class Timer (Task):
"""
A simple timer.
timeToWake Amount of time to wait before calling callback (seconds)
callback Some callable to be called when the timer expires
absoluteTime A specific time to fire (as from time.time())
recurring Whether to call repeatedly or just once
args, kw Args and keyword args for the callback
scheduler The recoco scheduler to use (None means default scheduler)
started If False, requires you to call .start() to begin timer
selfStoppable If True, the callback can return False to cancel the timer
"""
def __init__ (self, timeToWake, callback, absoluteTime = False,
recurring = False, args = (), kw = {}, scheduler = None,
started = True, selfStoppable = True):
if absoluteTime and recurring:
raise RuntimeError("Can't have a recurring timer for an absolute time!")
Task.__init__(self)
self._self_stoppable = selfStoppable
self._next = timeToWake
self._interval = timeToWake if recurring else 0
if not absoluteTime:
self._next += time.time()
self._cancelled = False
self._recurring = recurring
self._callback = callback
self._args = args
self._kw = kw
if started: self.start(scheduler)
def cancel (self):
self._cancelled = True
def run (self):
while not self._cancelled:
yield Sleep(timeToWake=self._next, absoluteTime=True)
if self._cancelled: break
self._next = time.time() + self._interval
rv = self._callback(*self._args,**self._kw)
if self._self_stoppable and (rv is False): break
if not self._recurring: break
yield False # Quit
class CallLaterTask (BaseTask):
def __init__ (self):
BaseTask.__init__(self)
self._pinger = pox.lib.util.makePinger()
from collections import deque
self._calls = deque()
def callLater (self, func, *args, **kw):
assert callable(func)
self._calls.append((func,args,kw))
self._pinger.ping()
def run (self):
while True:
yield Select([self._pinger], None, None)
self._pinger.pongAll()
try:
while True:
e = self._calls.popleft()
try:
e[0](*e[1], **e[2])
except:
import logging
logging.getLogger("recoco").exception("Exception calling %s", e[0])
except:
pass
class BlockingTask (BaseTask):
@classmethod
def new (_cls, _func, _cb=None, *_args, **_kw):
return _cls(_func, _cb, *_args, **_kw)
def __init__ (self, func, callback=None, args=(), kw={}):
"""
callback takes two parameters: rv and exc. One is always None.
if callback is actually a tuple, the first one is called with
the return value on normal exit, the second is called with
exc_info on an exception.
"""
BaseTask.__init__(self)
self.func = func
self.callback = callback
self.args = args
self.kw = kw
def run (self):
rv,exc = (yield CallBlocking(self.func, args=self.args, kw=self.kw))
if self.callback is None:
pass
elif isinstance(self.callback, tuple):
if exc is not None:
if self.callback[1] is not None:
self.callback[1](exc)
else:
if self.callback[0] is not None:
self.callback[0](rv)
else:
self.callback(rv,exc)
# Sanity tests
if __name__ == "__main__":
class TestTask (BaseTask):
def __init__ (self, *args, **kw):
BaseTask.__init__(self, *args, **kw)
def run (self, a, b, inc = 1, sleep = 0):
n = a
while n <= b:
print(n)
n+=inc
yield Select([],[],[],sleep)
s = Scheduler(daemon=True)
t = TestTask(5,10,sleep=10)
t.start()
t = TestTask(100,110,sleep=20)
t.start()
#TestTask(1000,1010,sleep=1).start()
import code
code.interact(local=locals())
s.quit()
| apache-2.0 | 2,807,824,183,292,650,500 | 29.080607 | 82 | 0.584877 | false |
guillermooo-forks/Vintageous | tests/commands/test__vi_big_e.py | 9 | 1113 | from collections import namedtuple
from Vintageous.tests import ViewTest
from Vintageous.vi.utils import modes
test_data = namedtuple('test_data', 'text startRegion mode expectedRegion msg')
ALL_CASES = (
test_data('01. 4', (1, 1), modes.NORMAL, (2, 2), 'Normal'),
test_data('012 4', (1, 1), modes.INTERNAL_NORMAL, (1, 3), 'Internal Normal'),
test_data('0ab3 5', (1, 3), modes.VISUAL, (1, 4), 'Visual Forward'),
test_data('0b2 a5', (5, 1), modes.VISUAL, (5, 2), 'Visual Reverse no crossover'),
test_data('0ba3 5', (3, 1), modes.VISUAL, (2, 4), 'Visual Reverse crossover'),
)
class Test_vi_big_e(ViewTest):
def runTests(self, data):
for (i, data) in enumerate(data):
self.write(data.text)
self.clear_sel()
self.add_sel(self.R(*data.startRegion))
self.view.run_command('_vi_big_e', {'mode': data.mode, 'count': 1})
self.assert_equal_regions(self.R(*data.expectedRegion), self.first_sel(),
"Failed on index {} {} : Text:\"{}\" Region:{}"
.format(i, data.msg, data.text, data.startRegion))
def testAllCases(self):
self.runTests(ALL_CASES)
| mit | 1,162,513,183,405,325,000 | 38.75 | 91 | 0.639712 | false |
ucloud/uai-sdk | uaitrain/api/get_train_job_predict_start_time.py | 1 | 1951 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uaitrain.api.base_op import BaseUAITrainAPIOp
class GetUAITrainJobStartPredictApiOp(BaseUAITrainAPIOp):
"""
GetUAITrainJobStartPredictAPI
Identical with UAI Train GetUAITrainJobStartPredict API func
Input:
TrainJobId string(required) Job id of the job
Output:
RetCode int API return code: 0: success, others: error code
Action string Action name
Message string Message: error description
WaitJobCount int waited job count
"""
ACTION_NAME = "GetUAITrainJobStartPredict"
def __init__(self, pub_key, priv_key, job_id, project_id="", region="", zone=""):
super(GetUAITrainJobStartPredictApiOp, self).__init__(self.ACTION_NAME, pub_key, priv_key, project_id, region, zone)
self.cmd_params["TrainJobId"] = job_id
def _check_args(self):
super(GetUAITrainJobStartPredictApiOp, self)._check_args()
if self.cmd_params["TrainJobId"] == "" or type(self.cmd_params["TrainJobId"]) != str:
raise ValueError("TrainJobId should be <str> and should not be nil")
| apache-2.0 | -4,543,918,334,692,889,000 | 45.452381 | 124 | 0.615069 | false |
andrewcmyers/tensorflow | tensorflow/contrib/keras/python/keras/layers/convolutional_recurrent.py | 25 | 24296 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convolutional-recurrent layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.layers.recurrent import Recurrent
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
class ConvRecurrent2D(Recurrent):
"""Abstract base class for convolutional recurrent layers.
Do not use in a model -- it's not a functional layer!
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
Input shape:
5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.
Output shape:
- if `return_sequences`: 5D tensor with shape
`(num_samples, timesteps, channels, rows, cols)`.
- else, 4D tensor with shape `(num_samples, channels, rows, cols)`.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an `Embedding` layer with the `mask_zero` parameter
set to `True`.
**Note:** for the time being, masking is only supported with Theano.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch.
This assumes a one-to-one mapping between
samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
a `batch_input_size=(...)` to the first layer in your model.
This is the expected shape of your inputs *including the batch
size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
return_sequences=False,
go_backwards=False,
stateful=False,
**kwargs):
super(ConvRecurrent2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
'dilation_rate')
self.return_sequences = return_sequences
self.go_backwards = go_backwards
self.stateful = stateful
self.input_spec = [InputSpec(ndim=5)]
self.state_spec = None
def _compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[3]
cols = input_shape[4]
elif self.data_format == 'channels_last':
rows = input_shape[2]
cols = input_shape[3]
rows = conv_utils.conv_output_length(
rows,
self.kernel_size[0],
padding=self.padding,
stride=self.strides[0],
dilation=self.dilation_rate[0])
cols = conv_utils.conv_output_length(
cols,
self.kernel_size[1],
padding=self.padding,
stride=self.strides[1],
dilation=self.dilation_rate[1])
if self.return_sequences:
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], self.filters, rows, cols])
elif self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], rows, cols, self.filters])
else:
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], self.filters, rows, cols])
elif self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, self.filters])
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful
}
base_config = super(ConvRecurrent2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvLSTM2D(ConvRecurrent2D):
"""Convolutional LSTM.
It is similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
go_backwards: Boolean (default False).
If True, rocess the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Input shape:
- if data_format='channels_first'
5D tensor with shape:
`(samples,time, channels, rows, cols)`
- if data_format='channels_last'
5D tensor with shape:
`(samples,time, rows, cols, channels)`
Output shape:
- if `return_sequences`
- if data_format='channels_first'
5D tensor with shape:
`(samples, time, filters, output_row, output_col)`
- if data_format='channels_last'
5D tensor with shape:
`(samples, time, output_row, output_col, filters)`
- else
- if data_format ='channels_first'
4D tensor with shape:
`(samples, filters, output_row, output_col)`
- if data_format='channels_last'
4D tensor with shape:
`(samples, output_row, output_col, filters)`
where o_row and o_col depend on the shape of the filter and
the padding
Raises:
ValueError: in case of invalid constructor arguments.
References:
- [Convolutional LSTM Network: A Machine Learning Approach for
Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)
The current implementation does not include the feedback loop on the
cells output
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
return_sequences=False,
go_backwards=False,
stateful=False,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(ConvLSTM2D, self).__init__(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=stateful,
**kwargs)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_spec = [InputSpec(ndim=4), InputSpec(ndim=4)]
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
batch_size = input_shape[0] if self.stateful else None
self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:])
if self.stateful:
self.reset_states()
else:
# initial states: 2 all-zero tensor of shape (filters)
self.states = [None, None]
if self.data_format == 'channels_first':
channel_axis = 2
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
state_shape = [None] * 4
state_shape[channel_axis] = input_dim
state_shape = tuple(state_shape)
self.state_spec = [
InputSpec(shape=state_shape),
InputSpec(shape=state_shape)
]
kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
self.kernel_shape = kernel_shape
recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)
self.kernel = self.add_weight(
shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name='recurrent_kernel',
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.filters * 4,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
if self.unit_forget_bias:
bias_value = np.zeros((self.filters * 4,))
bias_value[self.filters:self.filters * 2] = 1.
K.set_value(self.bias, bias_value)
else:
self.bias = None
self.kernel_i = self.kernel[:, :, :, :self.filters]
self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]
self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]
self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters:
self.filters * 2]
self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]
self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2:
self.filters * 3]
self.kernel_o = self.kernel[:, :, :, self.filters * 3:]
self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]
if self.use_bias:
self.bias_i = self.bias[:self.filters]
self.bias_f = self.bias[self.filters:self.filters * 2]
self.bias_c = self.bias[self.filters * 2:self.filters * 3]
self.bias_o = self.bias[self.filters * 3:]
else:
self.bias_i = None
self.bias_f = None
self.bias_c = None
self.bias_o = None
self.built = True
def get_initial_state(self, inputs):
# (samples, timesteps, rows, cols, filters)
initial_state = K.zeros_like(inputs)
# (samples, rows, cols, filters)
initial_state = K.sum(initial_state, axis=1)
shape = list(self.kernel_shape)
shape[-1] = self.filters
initial_state = self.input_conv(
initial_state, K.zeros(tuple(shape)), padding=self.padding)
initial_states = [initial_state for _ in range(2)]
return initial_states
def reset_states(self):
if not self.stateful:
raise RuntimeError('Layer must be stateful.')
input_shape = self.input_spec[0].shape
output_shape = self._compute_output_shape(input_shape)
if not input_shape[0]:
raise ValueError('If a RNN is stateful, a complete '
'input_shape must be provided '
'(including batch size). '
'Got input shape: ' + str(input_shape))
if self.return_sequences:
out_row, out_col, out_filter = output_shape[2:]
else:
out_row, out_col, out_filter = output_shape[1:]
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], out_row, out_col, out_filter)))
K.set_value(self.states[1],
np.zeros((input_shape[0], out_row, out_col, out_filter)))
else:
self.states = [
K.zeros((input_shape[0], out_row, out_col, out_filter)),
K.zeros((input_shape[0], out_row, out_col, out_filter))
]
def get_constants(self, inputs, training=None):
constants = []
if self.implementation == 0 and 0 < self.dropout < 1:
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones += 1
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
shape = list(self.kernel_shape)
shape[-1] = self.filters
ones = K.zeros_like(inputs)
ones = K.sum(ones, axis=1)
ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)
ones += 1.
def dropped_inputs(): # pylint: disable=function-redefined
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(4)
]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def input_conv(self, x, w, b=None, padding='valid'):
conv_out = K.conv2d(
x,
w,
strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if b is not None:
conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
return conv_out
def reccurent_conv(self, x, w):
conv_out = K.conv2d(
x, w, strides=(1, 1), padding='same', data_format=self.data_format)
return conv_out
def step(self, inputs, states):
assert len(states) == 4
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = states[2]
rec_dp_mask = states[3]
x_i = self.input_conv(
inputs * dp_mask[0], self.kernel_i, self.bias_i, padding=self.padding)
x_f = self.input_conv(
inputs * dp_mask[1], self.kernel_f, self.bias_f, padding=self.padding)
x_c = self.input_conv(
inputs * dp_mask[2], self.kernel_c, self.bias_c, padding=self.padding)
x_o = self.input_conv(
inputs * dp_mask[3], self.kernel_o, self.bias_o, padding=self.padding)
h_i = self.reccurent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i)
h_f = self.reccurent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f)
h_c = self.reccurent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c)
h_o = self.reccurent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(ConvLSTM2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 | 8,820,626,537,844,753,000 | 40.040541 | 80 | 0.630762 | false |
hanlind/nova | nova/virt/hyperv/driver.py | 3 | 15289 | # Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
import functools
import platform
import sys
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _LE
from nova.virt import driver
from nova.virt.hyperv import eventhandler
from nova.virt.hyperv import hostops
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import serialconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
def convert_exceptions(function, exception_map):
expected_exceptions = tuple(exception_map.keys())
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except expected_exceptions as ex:
raised_exception = exception_map.get(type(ex))
if not raised_exception:
# exception might be a subclass of an expected exception.
for expected in expected_exceptions:
if isinstance(ex, expected):
raised_exception = exception_map[expected]
break
exc_info = sys.exc_info()
# NOTE(claudiub): Python 3 raises the exception object given as
# the second argument in six.reraise.
# The original message will be maintained by passing the original
# exception.
exc = raised_exception(six.text_type(exc_info[1]))
six.reraise(raised_exception, exc, exc_info[2])
return wrapper
def decorate_all_methods(decorator, *args, **kwargs):
def decorate(cls):
for attr in cls.__dict__:
class_member = getattr(cls, attr)
if callable(class_member):
setattr(cls, attr, decorator(class_member, *args, **kwargs))
return cls
return decorate
exception_conversion_map = {
# expected_exception: converted_exception
os_win_exc.OSWinException: exception.NovaException,
os_win_exc.HyperVVMNotFoundException: exception.InstanceNotFound,
}
# NOTE(claudiub): the purpose of the decorator below is to prevent any
# os_win exceptions (subclasses of OSWinException) to leak outside of the
# HyperVDriver.
@decorate_all_methods(convert_exceptions, exception_conversion_map)
class HyperVDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
}
def __init__(self, virtapi):
# check if the current version of Windows is supported before any
# further driver initialisation.
self._check_minimum_windows_version()
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps(virtapi)
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
self._serialconsoleops = serialconsoleops.SerialConsoleOps()
self._imagecache = imagecache.ImageCache()
def _check_minimum_windows_version(self):
if not utilsfactory.get_hostutils().check_min_windows_version(6, 2):
# the version is of Windows is older than Windows Server 2012 R2.
# Log an error, letting users know that this version is not
# supported any longer.
LOG.error(_LE('You are running nova-compute on an unsupported '
'version of Windows (older than Windows / Hyper-V '
'Server 2012). The support for this version of '
'Windows has been removed in Mitaka.'))
raise exception.HypervisorTooOld(version='6.2')
@property
def need_legacy_block_device_info(self):
return False
def init_host(self, host):
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
event_handler.start_listener()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def estimate_instance_overhead(self, instance_info):
return self._vmops.estimate_instance_overhead(instance_info)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
self.unplug_vifs(instance, network_info)
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance.name)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance.name)
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector()
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_available_nodes(self, refresh=False):
return [platform.node()]
def host_power_action(self, action):
return self._hostops.host_power_action(action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info,
destroy_disks=destroy_disks)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
return migrate_data
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info,
migrate_data)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
self._livemigrationops.cleanup_live_migration_destination_check(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, context, migration, instance, network_info):
self._migrationops.confirm_migration(context, migration,
instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_serial_console(self, context, instance):
return self._serialconsoleops.get_serial_console(instance.name)
def get_console_output(self, context, instance):
return self._serialconsoleops.get_console_output(instance.name)
def manage_image_cache(self, context, all_instances):
self._imagecache.update(context, all_instances)
def attach_interface(self, context, instance, image_meta, vif):
return self._vmops.attach_interface(instance, vif)
def detach_interface(self, context, instance, vif):
return self._vmops.detach_interface(instance, vif)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
self._vmops.rescue_instance(context, instance, network_info,
image_meta, rescue_password)
def unrescue(self, instance, network_info):
self._vmops.unrescue_instance(instance)
| apache-2.0 | 9,137,694,425,202,053,000 | 41.706704 | 79 | 0.595461 | false |
Cloudzero/cloudzero-reactor-aws | reactor/features/resources/enrich_plugins/ec2/network_interface.py | 1 | 12944 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-present, CloudZero, Inc. All rights reserved.
# Licensed under the BSD-style license. See LICENSE file in the project root for full license information.
"""
Enrichment plugin for EC2 Network Interface (ENI) resource types.
This resource type is explicitly supported by Amazon.
"""
import re
from reactor.features.accounts.probe import load_metadata_for_single_resource
import reactor.features.resources.resource as resource
import lambda_tools as tools
import reactor.common.czrn as czrn
logger = tools.setup_logging('reactor')
def enrich_resource(account_context, resource_stream_event):
"""
Enriches a resource record with as much information as possible given metadata we've loaded previously from
a connected cloud account. Includes things like properties, tags, and explicit resource relationships.
Args:
account_context (attrdict): Contextual information about the connected account and the Reactor installation
resource_stream_event (dict): An un-enriched resource record
Returns:
dict - An enriched resource record
"""
logger.info(f"Received for enrichment {resource_stream_event['res_id']}")
data = {}
service_type = resource_stream_event['service_type']
resource_type = resource_stream_event['resource_type']
cloud_local_id = resource_stream_event['cloud_local_id']
interface = load_metadata_for_single_resource(account_context, service_type, resource_type, cloud_local_id)
data['properties'] = _get_interface_properties(interface)
data['relationships'] = _get_interface_relationships(interface, resource_stream_event, data['properties'])
data['tags'] = _get_interface_tags(interface)
enriched_resource = resource.create_from_stream_event(resource_stream_event,
resource.DISPOSITION_IDENTIFIED, data)
interface_id = data['properties']['network_interface_id']
enriched_resource['cloud_availability_zone'] = data['properties'].get('availability_zone')
enriched_resource['cloud_vpc_id'] = data['properties'].get('vpc_id')
enriched_resource['resource_state'] = data['properties'].get('status')
enriched_resource['cloud_local_id'] = interface_id
enriched_resource['cloud_global_id'] = None # Instances don't have ARNs
# For future use in CZ-NRF
enriched_resource['resource_properties'] = None
enriched_resource['resource_relationships'] = None
enriched_resource['resource_tags'] = None
enriched_resource['resource_present'] = True
# TODO: this one is obsolete
enriched_resource['resource_name'] = interface_id
# For now, make missing network resources highly visible so we can enhance this plugin
if not all({data['properties']['attachment_service_type'], data['properties']['attachment_resource_type']}):
logger.warning(f"ec2.network-interface: unknown attachment type for {enriched_resource['res_id']}")
logger.info(f"Enriched {resource_stream_event['res_id']}")
return enriched_resource
def _get_interface_properties(interface_data):
all_properties = {}
# Simple string properties, easy to load and store
string_lookup_keys = {
('Attachment', 'AttachTime'), ('Attachment', 'DeleteOnTermination'),
('Attachment', 'DeviceIndex'),
('Attachment', 'Status'), ('Description',), ('InterfaceType',), ('NetworkInterfaceId',), ('OwnerId',),
('RequesterManaged',), ('SourceDestCheck',), ('Status',),
# These are really pseudo-properties - properties belonging to other resources but are
# rolled up by AWS to be defined as if they belong to this resource.
# Eventually, we want to to derive these via resource relationships, but in absence of having this capability
# we'll pretend they are real properties for now so users can see information they need.
# TODO: remove these when we are able to represent them via relationship traversal.
('AvailabilityZone',), ('MacAddress',), ('PrivateDnsName',), ('PrivateIpAddress',), ('VpcId',),
('SubnetId',), ('Association', 'PublicDnsName',), ('Association', 'PublicIp',),
}
all_properties['attachment_id'] = interface_data.get('Attachment', {}).get('AttachmentId')
# Array properties; these need to be loaded from a given key spec based on a loop.
# Generalizing this is challenging - as more examples become available maybe we can try it.
array_properties = {
# These are also pseudo-properties.
# TODO: remove these when we are able to represent them via relationship traversal.
'security_groups': [
{resource.normalize_property_key_names(k): v for k, v in g.items()}
for g in interface_data.get('Groups', [])
]
}
all_properties.update(**resource.extract_string_properties(interface_data, string_lookup_keys))
all_properties.update(**array_properties)
# Special properties to reflect what kind of resource this interface is attached to.
# This matters a lot when defining network resource relationships.
(all_properties['attachment_service_type'],
all_properties['attachment_resource_type'],
all_properties['attachment_resource_local_id']) = _get_attachment_details(interface_data)
logger.debug(f'ec2.network-interface: properties found: {all_properties}')
return all_properties
def _get_interface_relationships(interface_data, resource_stream_event, properties):
relationships = []
region = resource_stream_event['region']
account_id = resource_stream_event['cloud_account_id']
attachment_service_type = properties['attachment_service_type']
attachment_resource_type = properties['attachment_resource_type']
attachment_resource_local_id = properties['attachment_resource_local_id']
interface_id = interface_data.get('NetworkInterfaceId')
public_ip_owner = interface_data.get('Association', {}).get('IpOwnerId')
subnet_id = interface_data.get('SubnetId')
if subnet_id:
subnet_czrn = czrn.create_from_components(service_type='ec2', resource_type='subnet',
cloud_local_id=subnet_id,
region=region, account_id=account_id)
relationships.append(subnet_czrn)
security_groups = interface_data.get('Groups', [])
for security_group in security_groups:
security_group_id = security_group.get('GroupId')
if security_group_id:
security_group_czrn = czrn.create_from_components(service_type='ec2', resource_type='security-group',
cloud_local_id=security_group_id,
region=region, account_id=account_id)
relationships.append(security_group_czrn)
if not all({attachment_service_type, attachment_resource_type, interface_id, attachment_resource_local_id}):
logger.warning(f"Not yet able to process: {interface_id} "
f"- {attachment_service_type} {attachment_resource_type} "
f"{interface_id} {attachment_resource_local_id}")
return relationships
network_local_id_prefix = (f'{attachment_service_type}|{attachment_resource_type}|'
f'{attachment_resource_local_id}|{interface_id}')
mac_address = interface_data.get('MacAddress')
if mac_address:
mac_address_czrn = czrn.create_from_components(service_type='cz-network', resource_type='mac-address',
cloud_local_id=f'{network_local_id_prefix}|{mac_address}',
region=region, account_id=account_id)
relationships.append(mac_address_czrn)
public_ip = interface_data.get('Association', {}).get('PublicIp')
if public_ip:
public_ip_type = 'elastic-ip' if public_ip_owner != 'amazon' else 'public-ipv4'
public_ip_czrn = czrn.create_from_components(service_type='cz-network', resource_type=public_ip_type,
cloud_local_id=f'{network_local_id_prefix}|{public_ip}',
region=region, account_id=account_id)
relationships.append(public_ip_czrn)
public_dns = interface_data.get('Association', {}).get('PublicDnsName')
if public_dns:
public_dns_czrn = czrn.create_from_components(service_type='cz-network', resource_type='public-dns-entry',
cloud_local_id=f'{network_local_id_prefix}|{public_dns}',
region=region, account_id=account_id)
relationships.append(public_dns_czrn)
ipv6_addresses = interface_data.get('Ipv6Addresses', [])
for ipv6_address in [d.get('Ipv6Address') for d in ipv6_addresses if d.get('Ipv6Address')]:
ipv6_address_czrn = czrn.create_from_components(service_type='cz-network', resource_type='ipv6',
cloud_local_id=f'{network_local_id_prefix}|{ipv6_address}',
region=region, account_id=account_id)
relationships.append(ipv6_address_czrn)
private_addresses = interface_data.get('PrivateIpAddresses', [])
for private_address in private_addresses:
private_ip = private_address.get('PrivateIpAddress')
private_dns = private_address.get('PrivateDnsName')
if private_ip:
private_ip_czrn = czrn.create_from_components(service_type='cz-network', resource_type='private-ipv4',
cloud_local_id=f'{network_local_id_prefix}|{private_ip}',
region=region, account_id=account_id)
relationships.append(private_ip_czrn)
if private_dns:
private_dns_czrn = czrn.create_from_components(service_type='cz-network', resource_type='private-dns-entry',
cloud_local_id=f'{network_local_id_prefix}|{private_dns}',
region=region, account_id=account_id)
relationships.append(private_dns_czrn)
logger.debug(f'ec2.network-interface: relationships found: {relationships}')
return relationships
def _get_interface_tags(interface_data):
tags = [{resource.normalize_property_key_names(k): v for k, v in d.items()} for d in
interface_data.get('TagSet', [])]
logger.debug(f'ec2.network-interface: tags found: {tags}')
return tags
def _get_attachment_details(interface_data):
attachment_service_type = None
attachment_resource_type = None
attachment_resource_local_id = None
# different services record ENI attachments differently
requestor = interface_data.get('RequesterId')
owner = interface_data.get('Attachment', {}).get('InstanceOwnerId')
instance_id = interface_data.get('Attachment', {}).get('InstanceId')
description = interface_data.get('Description', "")
if not any({requestor, owner, instance_id, description}):
return None, None, None
if owner == 'aws-lambda' and ':' in requestor and instance_id is None:
attachment_service_type = 'lambda'
attachment_resource_type = 'function'
attachment_resource_local_id = requestor.split(':')[-1]
elif instance_id:
# todo I think this won't handle old style short instance ID's like i-28925870???
if instance_id.startswith('i-') and re.search('^\d{12}$', owner):
attachment_service_type = 'ec2'
attachment_resource_type = 'instance'
attachment_resource_local_id = instance_id
elif description:
if description == "RDSNetworkInterface":
attachment_service_type = 'rds'
attachment_resource_type = 'instance'
attachment_resource_local_id = None # we will need to resolve this from the ENI attachment to get this
elif description.startswith('ELB'):
attachment_service_type = 'elasticloadbalancing'
attachment_resource_type = 'aws-service'
attachment_resource_local_id = None # and this
elif description.startswith('ElastiCache'):
attachment_service_type = 'ec'
attachment_resource_type = 'aws-service'
attachment_resource_local_id = None # and this
else:
logger.warning(f'Unclear attachment details for interface: {interface_data}')
# TODO: Figure out how to handle network interfaces with net resources but no attachment at all
return attachment_service_type, attachment_resource_type, attachment_resource_local_id
| bsd-3-clause | -3,504,019,114,605,472,000 | 50.569721 | 120 | 0.644391 | false |
elkingtonmcb/cloudbiolinux | cloudbio/cloudbiolinux.py | 10 | 1916 | """CloudBioLinux specific scripts
"""
import os
from fabric.api import *
from fabric.contrib.files import *
from cloudbio.custom import shared
def _freenx_scripts(env):
"""Provide graphical access to clients via FreeNX.
"""
home_dir = env.safe_run_output("echo $HOME")
setup_script = "setupnx.sh"
bin_dir = shared._get_bin_dir(env)
install_file_dir = os.path.join(env.config_dir, os.pardir, "installed_files")
if not env.safe_exists(os.path.join(bin_dir, setup_script)):
env.safe_put(os.path.join(install_file_dir, setup_script),
os.path.join(home_dir, setup_script))
env.safe_run("chmod 0777 %s" % os.path.join(home_dir, setup_script))
env.safe_sudo("mv %s %s" % (os.path.join(home_dir, setup_script), bin_dir))
remote_login = "configure_freenx.sh"
if not env.safe_exists(os.path.join(home_dir, remote_login)):
env.safe_put(os.path.join(install_file_dir, 'bash_login'), os.path.join(home_dir, remote_login))
env.safe_run("chmod 0777 %s" % os.path.join(home_dir, remote_login))
_configure_gnome(env)
def _cleanup_space(env):
"""Cleanup to recover space from builds and packages.
"""
env.logger.info("Cleaning up space from package builds")
with settings(warn_only=True):
env.safe_sudo("rm -rf .cpanm")
env.safe_sudo("rm -f /var/crash/*")
env.safe_run("rm -f ~/*.dot")
env.safe_run("rm -f ~/*.log")
def _configure_gnome(env):
"""Configure NX server to use classic GNOME.
http://askubuntu.com/questions/50503/why-do-i-get-unity-instead-of-classic-when-using-nx
http://notepad2.blogspot.com/2012/04/install-freenx-server-on-ubuntu-1110.html
"""
add = 'COMMAND_START_GNOME="gnome-session --session gnome-fallback"'
fname = "/etc/nxserver/node.conf"
if env.safe_exists("/etc/nxserver/"):
env.safe_append(fname, add, use_sudo=True)
| mit | -2,508,022,323,949,165,000 | 40.652174 | 104 | 0.655532 | false |
andreaso/ansible | lib/ansible/playbook/play.py | 11 | 12480 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars import preprocess_vars
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, Become):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_name = FieldAttribute(isa='string', default='', always_post_validate=True)
# TODO: generalize connection
_accelerate = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_ipv6 = FieldAttribute(isa='bool', default=False, always_post_validate=True)
_accelerate_port = FieldAttribute(isa='int', default=5099, always_post_validate=True)
# Connection
_fact_path = FieldAttribute(isa='string', default=None)
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='barelist', default=None, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=None, always_post_validate=True)
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=[], priority=99)
_vars_prompt = FieldAttribute(isa='list', default=[], always_post_validate=True)
_vault_password = FieldAttribute(isa='string', always_post_validate=True)
# Role Attributes
_roles = FieldAttribute(isa='list', default=[], priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=[])
_pre_tasks = FieldAttribute(isa='list', default=[])
_post_tasks = FieldAttribute(isa='list', default=[])
_tasks = FieldAttribute(isa='list', default=[])
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=[], always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
def __repr__(self):
return self.get_name()
def get_name(self):
''' return the name of the Play '''
return self._attributes.get('name')
@staticmethod
def load(data, variable_manager=None, loader=None):
if ('name' not in data or data['name'] is None) and 'hosts' in data:
if isinstance(data['hosts'], list):
data['name'] = ','.join(data['hosts'])
else:
data['name'] = data['hosts']
p = Play()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
assert isinstance(ds, dict)
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for %s."
" The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed block was encountered.", obj=self._ds)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
return roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
for prompt_data in new_ds:
if 'name' not in prompt_data:
display.deprecated("Using the 'short form' for vars_prompt has been deprecated", version="2.7")
for vname, prompt in prompt_data.items():
vars_prompts.append(dict(
name = vname,
prompt = prompt,
default = None,
private = None,
confirm = None,
encrypt = None,
salt_size = None,
salt = None,
))
else:
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
return new_me
| gpl-3.0 | -7,250,526,678,271,705,000 | 36.253731 | 137 | 0.59976 | false |
akaariai/django | tests/httpwrappers/tests.py | 30 | 26829 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import copy
import json
import os
import pickle
import unittest
from django.core.exceptions import SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import TestCase
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_text, smart_str
from django.utils.functional import lazy
lazystr = lazy(force_text, six.text_type)
class QueryDictTests(unittest.TestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(str('')))
def test_missing_key(self):
q = QueryDict()
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict()
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertNotIn('bar', q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
self.assertListEqual(sorted(list(six.iteritems(q))),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(list(six.iterlists(q))),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(list(six.iterkeys(q))),
['foo', 'name'])
self.assertListEqual(sorted(list(six.itervalues(q))),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
if six.PY2:
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
if six.PY2:
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
if six.PY2:
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding), falling back to ISO-8859-1 decoding.
This test doesn't apply under Python 3 because the URL is a string
and not a bytestring.
"""
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\xff')
self.assertEqual(q.getlist('foo'), ['bar', '\xff'])
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# The following tests explicitly test types in addition to values
# because in Python 2 u'foo' == b'foo'.
# ASCII unicode or bytes values are converted to native strings.
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
self.assertIn(b'test', r.serialize_headers())
# Latin-1 unicode or bytes values are also converted to native strings.
r['key'] = 'café'
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], smart_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
self.assertRaises(UnicodeError, r.__setitem__, 'føø', 'bar')
self.assertRaises(UnicodeError, r.__setitem__, 'føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering http://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachement; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertEqual(r.get('test'), None)
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertEqual(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:[email protected]',
'file:///etc/passwd',
]
for url in bad_urls:
self.assertRaises(SuspiciousOperation,
HttpResponseRedirect, url)
self.assertRaises(SuspiciousOperation,
HttpResponsePermanentRedirect, url)
class HttpResponseSubclassesTests(TestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect('/redirected/',
content='The resource has temporarily moved',
content_type='text/html')
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'],
content='Only the GET method is allowed',
content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
class JsonResponseTests(TestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
class StreamingHttpResponseTests(TestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over Unicode strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode('utf-8')
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(TestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# don't automatically close file when we finish iterating the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertFalse(file1.closed)
list(r)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';')) # IE compat
self.assertNotIn(",", c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
"""
Regression test for #18403
"""
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| bsd-3-clause | -6,559,107,247,882,449,000 | 37.511494 | 94 | 0.600022 | false |
jonian/kickoff-player | widgets/streambox.py | 1 | 3755 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject
from helpers.gtk import add_widget_class, image_from_path
class StreamBox(Gtk.Box):
__gtype_name__ = 'StreamBox'
stream = GObject.property(type=object, flags=GObject.PARAM_READWRITE)
callback = GObject.property(type=object, flags=GObject.PARAM_READWRITE)
compact = GObject.property(type=bool, default=False, flags=GObject.PARAM_READWRITE)
def __init__(self, *args, **kwargs):
Gtk.Box.__init__(self, *args, **kwargs)
self.stream = self.get_property('stream')
self.callback = self.get_property('callback')
self.compact = self.get_property('compact')
self.stream_name = self.do_stream_name()
self.stream_rate = self.do_stream_rate()
self.stream_logo = self.do_stream_logo()
self.stream_lang = self.do_stream_language()
self.play_button = self.do_play_button()
self.set_orientation(Gtk.Orientation.HORIZONTAL)
self.connect('realize', self.on_realized)
self.connect('notify::stream', self.on_stream_updated)
self.show()
def on_realized(self, *_args):
self.on_stream_updated(_args)
self.pack_start(self.stream_lang, False, False, 0)
self.pack_start(self.stream_logo, False, False, 1)
self.pack_start(self.stream_name, False, False, 2)
self.pack_end(self.play_button, False, False, 0)
self.pack_end(self.stream_rate, False, False, 1)
def on_stream_updated(self, *_args):
self.update_stream_language()
self.update_stream_logo()
self.update_stream_name()
self.update_play_button()
self.update_stream_rate()
def do_stream_logo(self):
image = image_from_path(path='images/acestream.svg', size=16)
image.set_halign(Gtk.Align.CENTER)
image.set_valign(Gtk.Align.CENTER)
image.set_margin_right(10)
add_widget_class(image, 'stream-image')
return image
def update_stream_logo(self):
logo = getattr(self.stream, 'logo')
image_from_path(path=logo, size=16, image=self.stream_logo)
self.stream_logo.show()
def do_stream_language(self):
label = Gtk.Label('Unknown')
label.set_halign(Gtk.Align.START)
label.set_margin_right(10)
add_widget_class(label, 'stream-language')
return label
def update_stream_language(self):
language = getattr(self.stream, 'language')
self.stream_lang.set_label(language)
if self.compact:
self.stream_lang.hide()
else:
self.stream_lang.show()
def do_stream_rate(self):
label = Gtk.Label('0Kbps')
label.set_halign(Gtk.Align.END)
label.set_margin_right(10)
add_widget_class(label, 'stream-rate')
return label
def update_stream_rate(self):
ratio = "%sKbps" % str(getattr(self.stream, 'rate'))
self.stream_rate.set_label(ratio)
self.stream_rate.show()
def do_stream_name(self):
label = Gtk.Label('Unknown Channel')
label.set_halign(Gtk.Align.START)
label.set_margin_right(10)
add_widget_class(label, 'stream-name')
return label
def update_stream_name(self):
chan = getattr(self.stream, 'channel')
name = 'Unknown Channel' if chan is None else getattr(chan, 'name')
self.stream_name.set_label(name)
if self.compact:
self.stream_name.hide()
else:
self.stream_name.show()
def do_play_button(self):
kwargs = { 'icon_name': 'media-playback-start-symbolic', 'size': Gtk.IconSize.BUTTON }
button = Gtk.Button.new_from_icon_name(**kwargs)
button.set_halign(Gtk.Align.END)
button.connect('clicked', self.on_play_button_clicked)
add_widget_class(button, 'stream-play')
return button
def update_play_button(self):
self.play_button.show()
def on_play_button_clicked(self, _widget):
self.callback(self.stream)
| gpl-3.0 | 7,803,798,225,231,156,000 | 27.44697 | 90 | 0.677763 | false |
Ladaniels/censusreporter | censusreporter/apps/census/templatetags/partition.py | 10 | 4524 | """
Template filters to partition lists into rows or columns.
A common use-case is for splitting a list into a table with columns::
{% load partition %}
<table>
{% for row in mylist|columns:3 %}
<tr>
{% for item in row %}
<td>{{ item }}</td>
{% endfor %}
</tr>
{% endfor %}
</table>
"""
from django.template import Library
register = Library()
def rows(thelist, n):
"""
Break a list into ``n`` rows, filling up each row to the maximum equal
length possible. For example::
>>> l = range(10)
>>> rows(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> rows(l, 3)
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]
>>> rows(l, 4)
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> rows(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
>>> rows(l, 9)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [], [], [], []]
# This filter will always return `n` rows, even if some are empty:
>>> rows(range(2), 3)
[[0], [1], []]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = list_len // n
if list_len % n != 0:
split += 1
return [thelist[split*i:split*(i+1)] for i in range(n)]
def rows_distributed(thelist, n):
"""
Break a list into ``n`` rows, distributing columns as evenly as possible
across the rows. For example::
>>> l = range(10)
>>> rows_distributed(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> rows_distributed(l, 3)
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> rows_distributed(l, 4)
[[0, 1, 2], [3, 4, 5], [6, 7], [8, 9]]
>>> rows_distributed(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
>>> rows_distributed(l, 9)
[[0, 1], [2], [3], [4], [5], [6], [7], [8], [9]]
# This filter will always return `n` rows, even if some are empty:
>>> rows(range(2), 3)
[[0], [1], []]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = list_len // n
remainder = list_len % n
offset = 0
rows = []
for i in range(n):
if remainder:
start, end = (split+1)*i, (split+1)*(i+1)
else:
start, end = split*i+offset, split*(i+1)+offset
rows.append(thelist[start:end])
if remainder:
remainder -= 1
offset += 1
return rows
def columns_distributed(thelist, n):
"""
Break a list into ``n`` columns, filling up each column to the maximum equal
length possible. For example:
>>> from pprint import pprint
>>> for i in range(7, 11):
... print '%sx%s:' % (i, 3)
... pprint(columns(range(i), 3), width=20)
7x3:
[[0, 3, 6],
[1, 4],
[2, 5]]
8x3:
[[0, 3, 6],
[1, 4, 7],
[2, 5]]
9x3:
[[0, 3, 6],
[1, 4, 7],
[2, 5, 8]]
10x3:
[[0, 4, 8],
[1, 5, 9],
[2, 6],
[3, 7]]
# Note that this filter does not guarantee that `n` columns will be
# present:
>>> pprint(columns(range(4), 3), width=10)
[[0, 2],
[1, 3]]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
list_len = len(thelist)
split = list_len // n
if list_len % n != 0:
split += 1
return [thelist[i::split] for i in range(split)]
def columns(thelist, n):
""" Break a list into n peices, but "horizontally." That is,
columns_distributed(range(10), 3) gives::
[[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9]]
Clear as mud?
"""
from math import ceil
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
newlists = [list() for i in range(int(ceil(len(thelist) / float(n))))]
for i, val in enumerate(thelist):
newlists[i/n].append(val)
return newlists
register.filter(rows)
register.filter(rows_distributed)
register.filter(columns)
register.filter(columns_distributed)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test() | mit | -786,298,356,566,191,000 | 23.592391 | 80 | 0.46618 | false |
tomkralidis/GeoHealthCheck | GeoHealthCheck/__init__.py | 3 | 1467 | # =================================================================
#
# Authors: Tom Kralidis <[email protected]>
#
# Copyright (c) 2014 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from util import read
def get_package_version(file_):
"""get version from top-level package init"""
return read(file_)
__version__ = get_package_version('../VERSION')
| mit | -3,211,174,613,477,137,400 | 37.605263 | 67 | 0.687117 | false |
choderalab/ThermoPyL | thermopyl/thermoml_lib.py | 2 | 7207 | import numpy as np
import re
import copy
import pandas as pd
from . import thermoml_schema # Obtained by `wget http://media.iupac.org/namespaces/ThermoML/ThermoML.xsd` and `pyxbgen ThermoML.xsd`
class Parser(object):
def __init__(self, filename):
"""Create a parser object from an XML filename."""
self.filename = filename
self.root = thermoml_schema.CreateFromDocument(open(self.filename).read())
self.store_compounds()
def store_compounds(self):
"""Extract and store compounds from a thermoml XML file."""
self.compound_num_to_name = {}
self.compound_name_to_formula = {}
for Compound in self.root.Compound:
nOrgNum = Compound.RegNum.nOrgNum
sCommonName = Compound.sCommonName[0]
sFormulaMolec = Compound.sFormulaMolec
self.compound_num_to_name[nOrgNum] = sCommonName
self.compound_name_to_formula[sCommonName] = sFormulaMolec
def parse(self):
"""Parse the current XML filename and return a list of measurements."""
alldata = []
for PureOrMixtureData in self.root.PureOrMixtureData:
components = []
for Component in PureOrMixtureData.Component:
nSampleNm = Component.nSampleNm
nOrgNum = Component.RegNum.nOrgNum
sCommonName = self.compound_num_to_name[nOrgNum]
components.append(sCommonName)
components_string = "__".join(components)
property_dict = {}
ePropPhase_dict = {}
for Property in PureOrMixtureData.Property:
nPropNumber = Property.nPropNumber
ePropName = Property.Property_MethodID.PropertyGroup.content()[0].ePropName # ASSUMING LENGTH 1
property_dict[nPropNumber] = ePropName
ePropPhase = Property.PropPhaseID[0].ePropPhase # ASSUMING LENGTH 1
ePropPhase_dict[nPropNumber] = ePropPhase
state = dict(filename=self.filename, components=components_string)
state["Pressure, kPa"] = None # This is the only pressure unit used in ThermoML
state['Temperature, K'] = None # This is the only temperature unit used in ThermoML
composition = dict()
for Constraint in PureOrMixtureData.Constraint:
nConstraintValue = Constraint.nConstraintValue
ConstraintType = Constraint.ConstraintID.ConstraintType
assert len(ConstraintType.content()) == 1
constraint_type = ConstraintType.content()[0]
state[constraint_type] = nConstraintValue
if constraint_type in ["Mole fraction", "Mass Fraction", "Molality, mol/kg", "Solvent: Amount concentration (molarity), mol/dm3"]:
nOrgNum = Constraint.ConstraintID.RegNum.nOrgNum
sCommonName = self.compound_num_to_name[nOrgNum]
if Constraint.Solvent is not None:
solvents = [self.compound_num_to_name[x.nOrgNum] for x in Constraint.Solvent.RegNum]
else:
solvents = []
solvent_string = "%s___%s" % (sCommonName, "__".join(solvents))
state["%s metadata" % constraint_type] = solvent_string
variable_dict = {}
for Variable in PureOrMixtureData.Variable:
nVarNumber = Variable.nVarNumber
VariableType = Variable.VariableID.VariableType
assert len(VariableType.content()) == 1
vtype = VariableType.content()[0] # Assume length 1, haven't found counterexample yet.
variable_dict[nVarNumber] = vtype
if vtype in ["Mole fraction", "Mass Fraction", "Molality, mol/kg", "Solvent: Amount concentration (molarity), mol/dm3"]:
nOrgNum = Variable.VariableID.RegNum.nOrgNum
sCommonName = self.compound_num_to_name[nOrgNum]
if Variable.Solvent is not None:
solvents = [self.compound_num_to_name[x.nOrgNum] for x in Variable.Solvent.RegNum]
else:
solvents = []
solvent_string = "%s___%s" % (sCommonName, "__".join(solvents))
state["%s Variable metadata" % vtype] = solvent_string
for NumValues in PureOrMixtureData.NumValues:
current_data = copy.deepcopy(state) # Copy in values of constraints.
current_composition = copy.deepcopy(composition)
for VariableValue in NumValues.VariableValue:
nVarValue = VariableValue.nVarValue
nVarNumber = VariableValue.nVarNumber
vtype = variable_dict[nVarNumber]
current_data[vtype] = nVarValue
for PropertyValue in NumValues.PropertyValue:
nPropNumber = PropertyValue.nPropNumber
nPropValue = PropertyValue.nPropValue
ptype = property_dict[nPropNumber]
phase = ePropPhase_dict[nPropNumber]
current_data[ptype] = nPropValue
current_data["phase"] = phase
# Now attempt to extract measurement uncertainty for the same measurement
try:
uncertainty = PropertyValue.PropUncertainty[0].nStdUncertValue
except IndexError:
uncertainty = np.nan
current_data[ptype + "_std"] = uncertainty
alldata.append(current_data)
return alldata
def count_atoms(formula_string):
"""Parse a chemical formula and return the total number of atoms."""
element_counts = formula_to_element_counts(formula_string)
return sum(val for key, val in element_counts.items())
def count_atoms_in_set(formula_string, which_atoms):
"""Parse a chemical formula and return the number of atoms in a set of atoms."""
element_counts = formula_to_element_counts(formula_string)
return sum(val for key, val in element_counts.items() if key in which_atoms)
def get_first_entry(cas):
"""If cirpy returns several CAS results, extracts the first one."""
if type(cas) == type([]):
cas = cas[0]
return cas
def formula_to_element_counts(formula_string):
"""Transform a chemical formula into a dictionary of (element, number) pairs."""
pattern = r'([A-Z][a-z]{0,2}\d*)'
pieces = re.split(pattern, formula_string)
#print "\nformula_string=%r pieces=%r" % (formula_string, pieces)
data = pieces[1::2]
rubbish = filter(None, pieces[0::2])
pattern2 = r'([A-Z][a-z]{0,2})'
results = {}
for piece in data:
#print(piece)
element, number = re.split(pattern2, piece)[1:]
try:
number = int(number)
except ValueError:
number = 1
results[element] = number
return results
| gpl-2.0 | 6,158,946,275,024,231,000 | 44.327044 | 146 | 0.584154 | false |
tchellomello/home-assistant | homeassistant/components/cover/device_action.py | 9 | 6180 | """Provides device automations for Cover."""
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
SERVICE_CLOSE_COVER,
SERVICE_CLOSE_COVER_TILT,
SERVICE_OPEN_COVER,
SERVICE_OPEN_COVER_TILT,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
SERVICE_STOP_COVER,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
)
CMD_ACTION_TYPES = {"open", "close", "stop", "open_tilt", "close_tilt"}
POSITION_ACTION_TYPES = {"set_position", "set_tilt_position"}
CMD_ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(CMD_ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
}
)
POSITION_ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(POSITION_ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
vol.Required("position"): vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
}
)
ACTION_SCHEMA = vol.Any(CMD_ACTION_SCHEMA, POSITION_ACTION_SCHEMA)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if not state or ATTR_SUPPORTED_FEATURES not in state.attributes:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
# Add actions for each entity that belongs to this integration
if supported_features & SUPPORT_SET_POSITION:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "set_position",
}
)
else:
if supported_features & SUPPORT_OPEN:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "open",
}
)
if supported_features & SUPPORT_CLOSE:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "close",
}
)
if supported_features & SUPPORT_STOP:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "stop",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "set_tilt_position",
}
)
else:
if supported_features & SUPPORT_OPEN_TILT:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "open_tilt",
}
)
if supported_features & SUPPORT_CLOSE_TILT:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "close_tilt",
}
)
return actions
async def async_get_action_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List action capabilities."""
if config[CONF_TYPE] not in POSITION_ACTION_TYPES:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional("position", default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
}
)
}
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if config[CONF_TYPE] == "open":
service = SERVICE_OPEN_COVER
elif config[CONF_TYPE] == "close":
service = SERVICE_CLOSE_COVER
elif config[CONF_TYPE] == "stop":
service = SERVICE_STOP_COVER
elif config[CONF_TYPE] == "open_tilt":
service = SERVICE_OPEN_COVER_TILT
elif config[CONF_TYPE] == "close_tilt":
service = SERVICE_CLOSE_COVER_TILT
elif config[CONF_TYPE] == "set_position":
service = SERVICE_SET_COVER_POSITION
service_data[ATTR_POSITION] = config["position"]
elif config[CONF_TYPE] == "set_tilt_position":
service = SERVICE_SET_COVER_TILT_POSITION
service_data[ATTR_TILT_POSITION] = config["position"]
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
| apache-2.0 | 4,451,048,983,173,102,000 | 31.698413 | 86 | 0.543528 | false |
btenaglia/hpc-historias-clinicas | hpc-historias-clinicas/fojas_quirurgicas/migrations/0009_auto_20150506_0117.py | 1 | 1096 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('fojas_quirurgicas', '0008_auto_20150505_2053'),
]
operations = [
migrations.AlterField(
model_name='fojasquirurgicas',
name='fecha',
field=models.DateField(default=datetime.datetime(2015, 5, 6, 1, 17, 17, 402560)),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_comienzo',
field=models.TimeField(default=datetime.datetime(2015, 5, 6, 1, 17, 17, 402614), verbose_name='Hora / Comienzo Operac\xf3n'),
preserve_default=True,
),
migrations.AlterField(
model_name='fojasquirurgicas',
name='hora_fin',
field=models.TimeField(default=datetime.datetime(2015, 5, 6, 1, 17, 17, 402648), verbose_name='Hora / Termin\xf3 Operac\xf3n'),
preserve_default=True,
),
]
| bsd-3-clause | 2,529,253,372,075,745,000 | 32.212121 | 139 | 0.597628 | false |
kabakchey/django-annoying | setup.py | 1 | 2610 | from annoying import __version__
from setuptools import setup, find_packages
setup(
name="django-annoying",
version=__version__,
packages=['annoying'],
author="Stavros Korokithakis",
author_email="[email protected]",
description="This is a django application that tries to eliminate annoying things in the Django framework.",
long_description="""
**Features:**
- render_to decorator - Reduce typing in django views.
- signals decorator - Allow using signals as decorators.
- ajax_request decorator - Returns JsonResponse with dict as content.
- autostrip decorator - Strip form text fields before validation.
- get_object_or_None function - Similar to get_object_or_404, but returns None if the object is not found.
- get_config function - Get settings from django.conf if exists, return a default value otherwise.
- AutoOneToOne field - Creates related object on first call if it doesn't exist yet.
- HttpResponseReload - Reload and stay on same page from where request was made.
- StaticServer middleware - Instead of configuring urls.py, just add this middleware and it will serve you static files.
- JSONField - A field that stores a Python object as JSON and retrieves it as a Python object.
**Installation instructions:**
- Copy the "annoying" directory to your Django project or put it in your PYTHONPATH.
- You can also run "python setup.py install", "easy_install django-annoying", or "pip install django-annoying".
**Download:**
- git clone git://github.com/skorokithakis/django-annoying.git
- hg clone http://bitbucket.org/Stavros/django-annoying/
""",
license="BSD",
keywords="django",
url="https://github.com/skorokithakis/django-annoying",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| bsd-3-clause | 4,905,829,680,932,158,000 | 44 | 132 | 0.636782 | false |
thesuperzapper/tensorflow | tensorflow/contrib/distributions/python/ops/distribution.py | 3 | 38481 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import types
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import tf_inspect
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distritributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented")
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._quantile(value, **kwargs)
except NotImplementedError as original_exception:
raise original_exception
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32),
array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
| apache-2.0 | -3,494,982,006,322,052,000 | 33.698828 | 80 | 0.657155 | false |
KronoSKoderS/CalPack | calpack/models/fields/IntFields.py | 1 | 6249 | """
"""
import ctypes
from calpack.models.fields import Field, BYTE_SIZE
from calpack.utils import typed_property
__all__ = [
'IntField', 'IntField8', 'IntField16', 'IntField32', 'IntField64'
]
class IntField(Field):
"""
An Integer field. This field can be configured to be signed or unsigned. It's bit length can
also be set, however the max bit length for this field is
:code:`ctypes.sizeof(ctypes.c_int) * 8`. This wraps around the :code:`ctypes.c_int` or
:code:`ctypes.c_uint` data type.
.. warning:: A word of caution when using the :code:`bit_len`. If the combination of IntFields
with the bit_len set are not byte aligned, there is the possibility of "spare" bits not
accessible but used in the overall strucuture. See :ref:`Unused Bits`
for more information
:param int bit_len: the length in bits of the integer.
:param bool signed: whether to treat the int as an signed integer or unsigned integer (default
unsigned)
:param int default_val: the default value of the field (default 0)
:raises ValueError: if the :code:`bit_len` is less than or equal to 0 or greater than
:code:`ctypes.sizeof(ctypes.c_int) * 8`
"""
signed = typed_property('signed', bool, False)
_c_types = (ctypes.c_uint, ctypes.c_int)
def __init__(self, bit_len=None, signed=False, default_val=0):
super(IntField, self).__init__(default_val)
self.signed = signed
self.c_type = self._c_types[int(self.signed)]
self.bit_len = bit_len
if bit_len is None:
self.bit_len = ctypes.sizeof(self.c_type) * BYTE_SIZE
if self.bit_len <= 0 or self.bit_len > ctypes.sizeof(self.c_type) * BYTE_SIZE:
raise ValueError("bit_len must be between 1 and {max_val}".format(
max_val=ctypes.sizeof(self.c_type) * BYTE_SIZE)
)
def py_to_c(self, val):
if not self.signed and val < 0:
raise TypeError("Signed valued cannot be set for an unsigned IntField!")
return val
def create_field_c_tuple(self):
if self.bit_len < ctypes.sizeof(self.c_type) * BYTE_SIZE:
return (self.field_name, self.c_type, self.bit_len)
return (self.field_name, self.c_type)
class IntField8(IntField):
"""
An Integer field. This field can be configured to be signed or unsigned. It's bit length can
also be set, however the max bit length for this field is 8. This wraps around the
:code:`ctypes.c_int8` or :code:`ctypes.c_uint8` data type.
.. warning:: A word of caution when using the :code:`bit_len`. If the combination of IntFields
with the bit_len set are not byte aligned, there is the possibility of "spare" bits not
accessible but used in the overall strucuture. See :ref:`Unused Bits`
for more information
:param int bit_len: the length in bits of the integer.
:param bool signed: whether to treat the int as an signed integer or unsigned integer (default
unsigned)
:param int default_val: the default value of the field (default 0)
:raises ValueError: if the :code:`bit_len` is less than or equal to 0 or greater than 8
"""
_c_types = (ctypes.c_uint8, ctypes.c_int8)
class IntField16(IntField):
"""
An Integer field. This field can be configured to be signed or unsigned. It's bit length can
also be set, however the max bit length for this field is 16. This wraps around the
:code:`ctypes.c_int16` or :code:`ctypes.c_uint16` data type.
.. warning:: A word of caution when using the :code:`bit_len`. If the combination of IntFields
with the bit_len set are not byte aligned, there is the possibility of "spare" bits not
accessible but used in the overall strucuture. See :ref:`Unused Bits`
for more information
:param int bit_len: the length in bits of the integer.
:param bool signed: whether to treat the int as an signed integer or unsigned integer (default
unsigned)
:param int default_val: the default value of the field (default 0)
:raises ValueError: if the :code:`bit_len` is less than or equal to 0 or greater than 16
"""
_c_types = (ctypes.c_uint16, ctypes.c_int16)
class IntField32(IntField):
"""
An Integer field. This field can be configured to be signed or unsigned. It's bit length can
also be set, however the max bit length for this field is 32. This wraps around the
:code:`ctypes.c_int32` or :code:`ctypes.c_uint32` data type.
.. warning:: A word of caution when using the :code:`bit_len`. If the combination of IntFields
with the bit_len set are not byte aligned, there is the possibility of "spare" bits not
accessible but used in the overall strucuture. See :ref:`Unused Bits`
for more information
:param int bit_len: the length in bits of the integer.
:param bool signed: whether to treat the int as an signed integer or unsigned integer (default
unsigned)
:param int default_val: the default value of the field (default 0)
:raises ValueError: if the :code:`bit_len` is less than or equal to 0 or greater than 32
"""
_c_types = (ctypes.c_uint32, ctypes.c_int32)
class IntField64(IntField):
"""
An Integer field. This field can be configured to be signed or unsigned. It's bit length can
also be set, however the max bit length for this field is 64. This wraps around the
:code:`ctypes.c_int64` or :code:`ctypes.c_uint64` data type.
.. warning:: A word of caution when using the :code:`bit_len`. If the combination of IntFields
with the bit_len set are not byte aligned, there is the possibility of "spare" bits not
accessible but used in the overall strucuture. See :ref:`Unused Bits`
for more information
:param int bit_len: the length in bits of the integer.
:param bool signed: whether to treat the int as an signed integer or unsigned integer (default
unsigned)
:param int default_val: the default value of the field (default 0)
:raises ValueError: if the :code:`bit_len` is less than or equal to 0 or greater than 64
"""
_c_types = (ctypes.c_uint64, ctypes.c_int64)
| mit | -635,531,106,161,597,300 | 43.956835 | 99 | 0.673388 | false |
rwade42/GameMaster | client.py | 1 | 1774 | from flask import Flask, request, make_response, make_response
from slackclient import SlackClient
from TriviaGame import load_trivia
from button_json import generate_button_json
from gameplay import *
import os
import json
# App's Slackbot user token
SLACK_BOT_TOKEN = os.environ["SLACK_BOT_TOKEN"]
SLACK_VERIFICATION_TOKEN = os.environ["SLACK_VERIFICATION_TOKEN"]
# Slack client for API requests
SLACK_CLIENT = SlackClient(SLACK_BOT_TOKEN)
#Webserver for incoming traffic
app = Flask(__name__)
#Posting question in slack
@app.route("/slack/button_options", methods=["POST"])
def button_options():
# Parse the request payload
form_json = json.loads(request.form["payload"])
button_layout = generate_button_json()
return Response(json.dumps(button_layout), mimetype='application/json')
#Looking at question response
@app.route("/slack/button_actions", method=["POST"])
def button_actions():
# Parse the request payload
form_json = json.loads(request.form["payload"])
response = status(form_json)
if convert(response):
play(trivia)
response = slack_client.api_call(
"chat.update",
channel=form_json["channel"]["id"],
ts=form_json["message_ts"],
text=message_text,
attachments=[]
)
return make_response("", 200)
if __name__ = "__main__":
app.run()
curl -X POST -H 'Content-type: application/json' --data '{ "text": "Would you like to play a game?", "attachments": [ {"text": "Choose a game to play","fallback": "You are unable to choose a game","callback_id": "wopr_game", "color": "#3AA3E3","attachment_type": "default","actions": [{"name": "game","text": "Chess","type": "button","value": "chess"}]}]}' https://hooks.slack.com/services/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
| mit | -6,435,240,125,242,046,000 | 30.678571 | 434 | 0.695603 | false |
Arcanemagus/SickRage | tests/sickrage_tests/system/shutdown_tests.py | 1 | 2074 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://sick-rage.github.io
# Git: https://github.com/Sick-Rage/Sick-Rage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Test shutdown
"""
from __future__ import print_function, unicode_literals
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..')))
import sickbeard
from sickbeard.event_queue import Events
from sickrage.system.Shutdown import Shutdown
import six
class ShutdownTests(unittest.TestCase):
"""
Test shutdown
"""
def test_shutdown(self):
"""
Test shutdown
"""
sickbeard.PID = 123456
sickbeard.events = Events(None)
test_cases = {
0: False,
'0': False,
123: False,
'123': False,
123456: True,
'123456': True,
}
unicode_test_cases = {
'0': False,
'123': False,
'123456': True,
}
for tests in test_cases, unicode_test_cases:
for (pid, result) in six.iteritems(tests):
self.assertEqual(Shutdown.stop(pid), result)
if __name__ == '__main__':
print('=====> Testing {0}'.format(__file__))
SUITE = unittest.TestLoader().loadTestsFromTestCase(ShutdownTests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 | 4,476,693,702,913,620,500 | 26.289474 | 92 | 0.635005 | false |
bohlian/frappe | frappe/utils/jinja.py | 3 | 6075 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
def get_jenv():
import frappe
if not getattr(frappe.local, 'jenv', None):
from jinja2 import Environment, DebugUndefined
# frappe will be loaded last, so app templates will get precedence
jenv = Environment(loader = get_jloader(),
undefined=DebugUndefined)
set_filters(jenv)
jenv.globals.update(get_allowed_functions_for_jenv())
frappe.local.jenv = jenv
return frappe.local.jenv
def get_template(path):
return get_jenv().get_template(path)
def get_email_from_template(name, args):
from jinja2 import TemplateNotFound
args = args or {}
try:
message = get_template('templates/emails/' + name + '.html').render(args)
except TemplateNotFound as e:
raise e
try:
text_content = get_template('templates/emails/' + name + '.txt').render(args)
except TemplateNotFound:
text_content = None
return (message, text_content)
def validate_template(html):
"""Throws exception if there is a syntax error in the Jinja Template"""
import frappe
from jinja2 import TemplateSyntaxError
jenv = get_jenv()
try:
jenv.from_string(html)
except TemplateSyntaxError as e:
frappe.msgprint('Line {}: {}'.format(e.lineno, e.message))
frappe.throw(frappe._("Syntax error in template"))
def render_template(template, context, is_path=None):
'''Render a template using Jinja
:param template: path or HTML containing the jinja template
:param context: dict of properties to pass to the template
:param is_path: (optional) assert that the `template` parameter is a path'''
if not template:
return ""
# if it ends with .html then its a freaking path, not html
if (is_path
or template.startswith("templates/")
or (template.endswith('.html') and '\n' not in template)):
return get_jenv().get_template(template).render(context)
else:
return get_jenv().from_string(template).render(context)
def get_allowed_functions_for_jenv():
import os, json
import frappe
import frappe.utils
import frappe.utils.data
from frappe.utils.autodoc import automodule, get_version
from frappe.model.document import get_controller
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
import mimetypes
from html2text import html2text
from frappe.www.printview import get_visible_columns
datautils = {}
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
else:
date_format = 'yyyy-mm-dd'
for key, obj in frappe.utils.data.__dict__.items():
if key.startswith("_"):
# ignore
continue
if hasattr(obj, "__call__"):
# only allow functions
datautils[key] = obj
if "_" in getattr(frappe.local, 'form_dict', {}):
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = {
# make available limited methods of frappe
"frappe": {
"_": frappe._,
"get_url": frappe.utils.get_url,
'format': frappe.format_value,
"format_value": frappe.format_value,
'date_format': date_format,
"format_date": frappe.utils.data.global_date_format,
"form_dict": getattr(frappe.local, 'form_dict', {}),
"local": frappe.local,
"get_hooks": frappe.get_hooks,
"get_meta": frappe.get_meta,
"get_doc": frappe.get_doc,
"get_list": frappe.get_list,
"get_all": frappe.get_all,
"utils": datautils,
"user": user,
"get_fullname": frappe.utils.get_fullname,
"get_gravatar": frappe.utils.get_gravatar_url,
"full_name": frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
"render_template": frappe.render_template,
'session': {
'user': user,
'csrf_token': frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
},
},
'style': {
'border_color': '#d1d8dd'
},
"autodoc": {
"get_version": get_version,
"automodule": automodule,
"get_controller": get_controller
},
'get_toc': get_toc,
'get_next_link': get_next_link,
"_": frappe._,
"get_shade": get_shade,
"scrub": scrub,
"guess_mimetype": mimetypes.guess_type,
'html2text': html2text,
'json': json,
"dev_server": 1 if os.environ.get('DEV_SERVER', False) else 0
}
if not frappe.flags.in_setup_help:
out['get_visible_columns'] = get_visible_columns
out['frappe']['date_format'] = date_format
out['frappe']["db"] = {
"get_value": frappe.db.get_value,
"get_default": frappe.db.get_default,
}
return out
def get_jloader():
import frappe
if not getattr(frappe.local, 'jloader', None):
from jinja2 import ChoiceLoader, PackageLoader, PrefixLoader
if frappe.local.flags.in_setup_help:
apps = ['frappe']
else:
apps = frappe.get_hooks('template_apps')
if not apps:
apps = frappe.local.flags.web_pages_apps or frappe.get_installed_apps(sort=True)
apps.reverse()
if not "frappe" in apps:
apps.append('frappe')
frappe.local.jloader = ChoiceLoader(
# search for something like app/templates/...
[PrefixLoader(dict(
(app, PackageLoader(app, ".")) for app in apps
))]
# search for something like templates/...
+ [PackageLoader(app, ".") for app in apps]
)
return frappe.local.jloader
def set_filters(jenv):
import frappe
from frappe.utils import global_date_format, cint, cstr, flt, markdown
from frappe.website.utils import get_shade, abs_url
jenv.filters["global_date_format"] = global_date_format
jenv.filters["markdown"] = markdown
jenv.filters["json"] = frappe.as_json
jenv.filters["get_shade"] = get_shade
jenv.filters["len"] = len
jenv.filters["int"] = cint
jenv.filters["str"] = cstr
jenv.filters["flt"] = flt
jenv.filters["abs_url"] = abs_url
if frappe.flags.in_setup_help: return
# load jenv_filters from hooks.py
for app in frappe.get_installed_apps():
for jenv_filter in (frappe.get_hooks(app_name=app).jenv_filter or []):
filter_name, filter_function = jenv_filter.split(":")
jenv.filters[filter_name] = frappe.get_attr(filter_function)
| mit | -4,252,877,944,635,622,400 | 28.066986 | 107 | 0.695144 | false |
mat128/netman | netman/core/objects/exceptions.py | 2 | 9177 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from netman.core.objects.access_groups import IN, OUT
class NetmanException(Exception):
pass
class UnknownResource(NetmanException):
def __init__(self, msg="Resource not found"):
super(UnknownResource, self).__init__(msg)
class Conflict(NetmanException):
def __init__(self, msg="Conflicting value"):
super(Conflict, self).__init__(msg)
class SessionAlreadyExists(Conflict):
def __init__(self, session_id=None):
super(SessionAlreadyExists, self).__init__(msg="Session ID already exists: {}".format(session_id))
class UnavailableResource(NetmanException):
def __init__(self, msg="Resource not available"):
super(UnavailableResource, self).__init__(msg)
class OperationNotCompleted(NetmanException):
def __init__(self, problem=None):
super(OperationNotCompleted, self).__init__("An error occured while completing operation, no modifications have been applied : {0}".format(problem))
class UnknownVlan(UnknownResource):
def __init__(self, vlan_number=None):
super(UnknownVlan, self).__init__("Vlan {} not found".format(vlan_number))
class UnknownInterface(UnknownResource):
def __init__(self, interface=None):
super(UnknownInterface, self).__init__("Unknown interface {}".format(interface))
class UnknownIP(UnknownResource):
def __init__(self, ip_network=None):
super(UnknownIP, self).__init__("IP {} not found".format(ip_network))
class UnknownAccessGroup(UnknownResource):
def __init__(self, direction=None):
super(UnknownAccessGroup, self).__init__("{} IP access group not found".format({IN: "Inbound", OUT: "Outgoing"}[direction] if direction else ""))
class UnknownSession(UnknownResource):
def __init__(self, session_id=None):
super(UnknownSession, self).__init__("Session \"{}\" not found.".format(session_id))
class UnknownVrf(UnknownResource):
def __init__(self, name=None):
super(UnknownVrf, self).__init__("VRF name \"{}\" was not configured.".format(name))
class UnknownDhcpRelayServer(UnknownResource):
def __init__(self, vlan_number, ip_address):
super(UnknownDhcpRelayServer, self).__init__("DHCP relay server {} not found on VLAN {}".format(ip_address, vlan_number))
class DhcpRelayServerAlreadyExists(UnknownResource):
def __init__(self, vlan_number, ip_address):
super(DhcpRelayServerAlreadyExists, self).__init__("DHCP relay server {} already exists on VLAN {}".format(ip_address, vlan_number))
class AccessVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(AccessVlanNotSet, self).__init__("Access Vlan is not set on interface {}".format(interface))
class TrunkVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(TrunkVlanNotSet, self).__init__("Trunk Vlan is not set on interface {}".format(interface))
class NativeVlanNotSet(UnknownResource):
def __init__(self, interface=None):
super(NativeVlanNotSet, self).__init__("Trunk native Vlan is not set on interface {}".format(interface))
class InterfaceSpanningTreeNotEnabled(UnknownResource):
def __init__(self, interface=None):
super(InterfaceSpanningTreeNotEnabled, self).__init__("Spanning tree is not enabled on interface {}".format(interface))
class VlanVrfNotSet(UnknownResource):
def __init__(self, vlan=None):
super(VlanVrfNotSet, self).__init__("VRF is not set on vlan {}".format(vlan))
class IPNotAvailable(Conflict):
def __init__(self, ip_network=None, reason=None):
super(IPNotAvailable, self).__init__("IP {} is not available in this vlan{}".format(ip_network, (": " + reason) if reason is not None else ""))
class IPAlreadySet(Conflict):
def __init__(self, ip_network=None, present_ip_network=None):
super(IPAlreadySet, self).__init__("IP {} is already present in this vlan as {}".format(ip_network, present_ip_network))
class VlanAlreadyExist(Conflict):
def __init__(self, vlan_number=None):
super(VlanAlreadyExist, self).__init__("Vlan {} already exists".format(vlan_number))
class InterfaceInWrongPortMode(Conflict):
def __init__(self, mode=None):
super(InterfaceInWrongPortMode, self).__init__("Operation cannot be performed on a {} mode interface".format(mode))
class VlanAlreadyInTrunk(Conflict):
def __init__(self, vlan=None):
super(VlanAlreadyInTrunk, self).__init__("Vlan {} cannot be set as native vlan because it is already a member of the trunk".format(vlan))
class VrrpAlreadyExistsForVlan(Conflict):
def __init__(self, vlan=None, vrrp_group_id=None):
Conflict.__init__(self, "Vrrp group {group} is already in use on vlan {vlan}".format(group=vrrp_group_id, vlan=vlan))
class VrrpDoesNotExistForVlan(ValueError):
def __init__(self, vlan=None, vrrp_group_id=None):
ValueError.__init__(self, "Vrrp group {group} does not exist for vlan {vlan}".format(group=vrrp_group_id, vlan=vlan))
class NoIpOnVlanForVrrp(ValueError):
def __init__(self, vlan=None):
ValueError.__init__(self, "Vlan {vlan} needs an IP before configuring VRRP".format(vlan=vlan))
class BadVlanNumber(ValueError):
def __init__(self):
super(BadVlanNumber, self).__init__("Vlan number is invalid")
class BadInterfaceDescription(ValueError):
def __init__(self, desc=None):
super(BadInterfaceDescription, self).__init__("Invalid description : {}".format(desc))
class BadVrrpGroupNumber(ValueError):
def __init__(self, minimum=None, maximum=None):
super(BadVrrpGroupNumber, self).__init__("VRRP group number is invalid, must be contained between {min} and {max}".format(min=minimum, max=maximum))
class BadVrrpPriorityNumber(ValueError):
def __init__(self, minimum=None, maximum=None):
super(BadVrrpPriorityNumber, self).__init__("VRRP priority value is invalid, must be contained between {min} and {max}".format(min=minimum, max=maximum))
class BadVrrpTimers(ValueError):
def __init__(self):
super(BadVrrpTimers, self).__init__("VRRP timers values are invalid")
class BadVrrpAuthentication(ValueError):
def __init__(self):
super(BadVrrpAuthentication, self).__init__("VRRP authentication is invalid")
class BadVrrpTracking(ValueError):
def __init__(self):
super(BadVrrpTracking, self).__init__("VRRP tracking values are invalid")
class BadVlanName(ValueError):
def __init__(self):
super(BadVlanName, self).__init__("Vlan name is invalid")
class LockedSwitch(UnavailableResource):
def __init__(self):
super(LockedSwitch, self).__init__("Switch is locked and can't be modified")
class UnableToAcquireLock(UnavailableResource):
def __init__(self):
super(UnableToAcquireLock, self).__init__("Unable to acquire a lock in a timely fashion")
class BadBondNumber(ValueError):
def __init__(self):
super(BadBondNumber, self).__init__("Bond number is invalid")
class InterfaceNotInBond(UnknownResource):
def __init__(self):
super(InterfaceNotInBond, self).__init__("Interface not associated to specified bond")
class BondAlreadyExist(Conflict):
def __init__(self, number=None):
super(BondAlreadyExist, self).__init__("Bond {} already exists".format(number))
class UnknownBond(UnknownResource):
def __init__(self, number=None):
super(UnknownBond, self).__init__("Bond {} not found".format(number))
class BadBondLinkSpeed(ValueError):
def __init__(self):
super(BadBondLinkSpeed, self).__init__("Malformed bond link speed")
class UnknownSwitch(UnknownResource):
def __init__(self, name=None):
super(UnknownSwitch, self).__init__("Switch \"{0}\" is not configured".format(name))
class MalformedSwitchSessionRequest(ValueError):
def __init__(self):
super(MalformedSwitchSessionRequest, self).__init__("Malformed switch session request")
class Timeout(Exception):
pass
class ConnectTimeout(Exception):
def __init__(self, host=None, port=None):
super(ConnectTimeout, self).__init__("Timed out while connecting to {} on port {}".format(host, port))
class CommandTimeout(Exception):
def __init__(self, wait_for=None, buffer=None):
super(CommandTimeout, self).__init__("Command timed out expecting {}. Current read buffer: {}"
.format(repr(wait_for), buffer))
class CouldNotConnect(Exception):
def __init__(self, host=None, port=None):
super(CouldNotConnect, self).__init__("Could not connect to {} on port {}".format(host, port))
| apache-2.0 | -1,146,578,154,553,787,100 | 34.988235 | 161 | 0.689005 | false |
intel-hpdd/intel-manager-for-lustre | chroma_api/utils.py | 1 | 16877 | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import sys
import traceback
import logging
import itertools
from chroma_core.models.jobs import SchedulingError
import bisect
from collections import namedtuple
from django.contrib.contenttypes.models import ContentType
from django.http import Http404, QueryDict
from django.utils import timezone
from tastypie.resources import ModelDeclarativeMetaclass, Resource, ResourceOptions
from tastypie import fields
from tastypie import http
from tastypie.serializers import Serializer
from tastypie.http import HttpBadRequest, HttpMethodNotAllowed
from tastypie.exceptions import ImmediateHttpResponse
from chroma_core.models.command import Command
from chroma_core.models.target import ManagedMgs
from chroma_core.models import StorageResourceRecord
from chroma_core.services import log_register
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
from chroma_api.chroma_model_resource import ChromaModelResource
import chroma_core.lib.conf_param
from chroma_core.models import utils as conversion_util
from iml_common.lib.date_time import IMLDateTime
from collections import defaultdict
from django.db.models.query import QuerySet
from django.db.models import fields as django_fields
log = log_register(__name__)
def custom_response(resource, request, response_klass, response_data):
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.utils.mime import build_content_type
desired_format = resource.determine_format(request)
response = response_klass(
content=resource.serialize(request, response_data, desired_format),
content_type=build_content_type(desired_format),
)
return ImmediateHttpResponse(response=response)
def dehydrate_command(command):
"""There are a few places where we invoke CommandResource from other resources
to build a dict of a Command in 202 responses, so wrap the process here."""
if command:
from chroma_api.command import CommandResource
cr = CommandResource()
return cr.full_dehydrate(cr.build_bundle(obj=command)).data
else:
return None
# Given a dict of queries, turn the variables into the correct format for a django filter.
def filter_fields_to_type(klass, query_dict):
reserved_fields = ["order_by", "format", "limit", "offset"]
q = QuerySet(klass)
query = dict(query_dict)
fields = {}
for field in q.model._meta.fields:
fields[field.column] = field
# Remove the reserved fields we know about.
for field in query.keys():
if field in reserved_fields:
del query[field]
# This will error if it find an unknown field and cause the standard tasty pie query to run.
for field in query.keys():
try:
field_type = type(fields[field])
value = query[field]
if field_type == django_fields.AutoField or field_type == django_fields.IntegerField:
value = int(value)
elif field_type == django_fields.BooleanField:
value = value.lower() == "true"
query[field] = value
except KeyError:
pass
return query
# monkey-patch ResourceOptions to have a default-empty readonly list
setattr(ResourceOptions, "readonly", [])
class CustomModelDeclarativeMetaclass(ModelDeclarativeMetaclass):
"""
Customizations at the metaclass level.
"""
def __new__(cls, name, bases, attrs):
new_class = super(CustomModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
# At the moment, the only reason for this class' existence is
# to allow us to define a list of readonly fields in the
# Resources' Meta classes. It's kind of a hack, but it works
# the same way as other resource configuration. The only
# wrinkle is that this hacking works best in a metaclass,
# and there's no way to monkey-patch the __metaclass__ for a
# class, so we have to either declare this as the __metaclass__
# for all of our classes which need this functionality or
# else just have them inherit from a single class which uses
# this one as its metaclass. The latter seems cleanest.
#
# Why do this instead of setting readonly=True on the various
# Resource fields? Because when we explicitly declare a field
# in a Resource class we lose the ORM-level attributes like
# help_text. Plus, in many cases we'd declare fields in the
# Resources for the sole purpose of marking them readonly,
# and that adds clutter.
#
# TODO: Explore feasibility of getting this readonly fieldlist
# feature pushed upstream. Alternatively, fix
# ModelResource.get_fields() to preserve the underlying
# ORM field attributes unless they're overridden.
parent_readonly = []
# Merge readonly lists from parents into the new class' list.
try:
parents = [b for b in bases if issubclass(b, Resource)]
parents.reverse()
for p in parents:
parent_readonly.extend(p._meta.readonly)
except NameError:
pass
# stupid de-dupe tricks
new_class._meta.readonly = list(set(new_class._meta.readonly + parent_readonly))
for field in new_class._meta.readonly:
try:
new_class.base_fields[field].readonly = True
except KeyError:
pass
return new_class
class CustomModelResource(ChromaModelResource):
"""
Container for local customizations to tastypie's ModelResource class.
"""
__metaclass__ = CustomModelDeclarativeMetaclass
class StatefulModelResource(CustomModelResource):
content_type_id = fields.IntegerField()
label = fields.CharField()
class Meta:
abstract = True
readonly = ["id", "immutable_state", "state", "content_type_id", "label", "state_modified_at"]
def dehydrate_content_type_id(self, bundle):
if hasattr(bundle.obj, "content_type"):
return bundle.obj.content_type_id
else:
return ContentType.objects.get_for_model(bundle.obj.__class__).pk
def dehydrate_label(self, bundle):
return bundle.obj.get_label()
# PUT handler for accepting {'state': 'foo', 'dry_run': <true|false>}
def obj_update(self, bundle, **kwargs):
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(
response=self.error_response(bundle.request, bundle.errors[self._meta.resource_name])
)
request = bundle.request
bundle.obj = self.cached_obj_get(bundle, **self.remove_api_resource_names(kwargs))
stateful_object = bundle.obj
dry_run = bundle.data.get("dry_run", False)
if "state" in bundle.data:
new_state = bundle.data["state"]
if dry_run:
# FIXME: should this be a GET to something like /foo/transitions/from/to/
# to get information about that transition?
if stateful_object.state == new_state:
report = []
else:
report = JobSchedulerClient.get_transition_consequences(stateful_object, new_state)
raise custom_response(self, request, http.HttpResponse, report)
else:
try:
command = Command.set_state([(stateful_object, new_state)])
except SchedulingError as e:
raise custom_response(self, request, http.HttpBadRequest, {"state": e.message})
if command:
raise custom_response(self, request, http.HttpAccepted, {"command": dehydrate_command(command)})
else:
raise custom_response(self, request, http.HttpNoContent, None)
else:
return bundle
def obj_delete(self, bundle, **kwargs):
obj = self.obj_get(bundle, **kwargs)
try:
if obj.immutable_state and "forgotten" in obj.states:
command = Command.set_state([(obj, "forgotten")])
else:
command = Command.set_state([(obj, "removed")])
except SchedulingError as e:
raise custom_response(self, bundle.request, http.HttpBadRequest, {"__all__": e.message})
raise custom_response(self, bundle.request, http.HttpAccepted, {"command": dehydrate_command(command)})
class ConfParamResource(StatefulModelResource):
conf_params = fields.DictField()
def dehydrate_conf_params(self, bundle):
try:
return chroma_core.lib.conf_param.get_conf_params(bundle.obj)
except NotImplementedError:
return None
# PUT handler for accepting {'conf_params': {}}
def obj_update(self, bundle, **kwargs):
self.is_valid(bundle)
if bundle.errors:
raise ImmediateHttpResponse(
response=self.error_response(bundle.request, bundle.errors[self._meta.resource_name])
)
request = bundle.request
bundle.obj = self.cached_obj_get(bundle, **self.remove_api_resource_names(kwargs))
if hasattr(bundle.obj, "content_type"):
obj = bundle.obj.downcast()
else:
obj = bundle.obj
# FIXME HYD-1032: PUTing modified conf_params and modified state in the same request will
# cause one of those two things to be ignored.
if "conf_params" not in bundle.data or isinstance(obj, ManagedMgs):
super(ConfParamResource, self).obj_update(bundle, **kwargs)
try:
conf_params = bundle.data["conf_params"]
except KeyError:
# TODO: pass in whole objects every time so that I can legitimately
# validate the presence of this field
pass
else:
# Belt-and-braces: child classes should have validated first, but let's
# make sure (bad conf params can be very harmful)
errors = chroma_core.lib.conf_param.validate_conf_params(obj.__class__, conf_params)
if errors:
raise custom_response(self, request, http.HttpBadRequest, {"conf_params": errors})
# Store the conf params
mgs_id = chroma_core.lib.conf_param.set_conf_params(obj, conf_params)
# If we were returned an MGS, then something has changed, and we will
# kick off a command to apply the changes to the filesystem
if mgs_id:
command_id = JobSchedulerClient.command_run_jobs(
[{"class_name": "ApplyConfParams", "args": {"mgs_id": mgs_id}}], "Updating configuration parameters"
)
raise custom_response(
self,
request,
http.HttpAccepted,
{
"command": dehydrate_command(Command.objects.get(pk=command_id)),
self.Meta.resource_name: self.alter_detail_data_to_serialize(
request, self.full_dehydrate(bundle)
).data,
},
)
else:
return super(ConfParamResource, self).obj_update(bundle, **kwargs)
return bundle
class SeverityResource(ChromaModelResource):
"""Handles serverity for subclasses
The basis for this Resource is to add the Severity field and support for
converting it to and from it's FE form (string) and db form (int)
"""
severity = fields.CharField(
attribute="severity",
help_text=("String indicating the severity " "one of %s") % conversion_util.STR_TO_SEVERITY.keys(),
)
def dehydrate_severity(self, bundle):
"""Convert from int in DB to String for FE"""
return logging.getLevelName(bundle.obj.severity)
def hydrate_severity(self, bundle):
"""Convert severity name to int value for saving to DB"""
try:
bundle.data["severity"] = conversion_util.STR_TO_SEVERITY[bundle.data["severity"]]
except KeyError as exc:
raise custom_response(
self, bundle.request, http.HttpBadRequest, {"severity": ["invalid severity: {0}".format(*exc.args)]}
)
return bundle
def build_filters(self, filters=None, **kwargs):
"""FE will send severity strings which are converted to int here"""
is_query_dict = isinstance(filters, QueryDict)
severity = filters.get("severity", None)
if severity is not None:
# Handle single string rep of severity values. (numeric in DB)
del filters["severity"]
if severity:
filters["severity"] = conversion_util.STR_TO_SEVERITY[severity]
else:
# Handle list of string reps of severity values (numeric in DB)
if is_query_dict:
severity_list = filters.getlist("severity__in", None)
else:
severity_list = filters.get("severity__in", None)
if isinstance(severity_list, basestring):
severity_list = [severity_list]
if severity_list:
del filters["severity__in"]
converted_list = []
for severity_str in severity_list:
converted_list.append(str(conversion_util.STR_TO_SEVERITY[severity_str]))
if is_query_dict:
filters.setlist("severity__in", converted_list)
else:
filters.set("severity__in", converted_list)
return super(SeverityResource, self).build_filters(filters, **kwargs)
class BulkResourceOperation(object):
def _bulk_operation(self, action, object_name, bundle, request, **kwargs):
bulk_action_results = []
errors_exist = False
def _call_action(bulk_action_results, action, data, request, **kwargs):
try:
bulk_action_result = action(self, data, request, **kwargs)
except Exception as e:
bulk_action_result = self.BulkActionResult(
None, str(e), "\n".join(traceback.format_exception(*(sys.exc_info())))
)
bulk_action_results.append(bulk_action_result)
return bulk_action_result.error != None
for data in bundle.data.get("objects", [bundle.data]):
errors_exist |= _call_action(bulk_action_results, action, data, request, **kwargs)
if "objects" in bundle.data:
raise custom_response(
self,
request,
http.HttpBadRequest if errors_exist else http.HttpAccepted,
{
"objects": [
{
object_name: bulk_action_result.object,
"error": bulk_action_result.error,
"traceback": bulk_action_result.traceback,
}
for bulk_action_result in bulk_action_results
]
},
)
if errors_exist:
# Return 400, a failure here could mean many things.
raise custom_response(
self,
request,
http.HttpBadRequest,
{"error": bulk_action_results[0].error, "traceback": bulk_action_results[0].traceback},
)
else:
# REMOVE BEFORE LANDING
# TODO: Horrible special case that I don't want to fix up at this time. When command is returned it is returned command: data
# but everything else is just data.
# I'm not going to raise a ticket because it will not make the backlog, but at some point the front and back should remove
# this anomaly.
if object_name == "csssommand":
result = {"command": bulk_action_results[0].object}
else:
result = bulk_action_results[0].object
raise custom_response(
self, request, http.HttpAccepted if bulk_action_results[0].object else http.HttpNoContent, result
)
BulkActionResult = namedtuple("BulkActionResult", ["object", "error", "traceback"])
class DateSerializer(Serializer):
"""
Serializer to format datetimes in ISO 8601 but with timezone
offset.
"""
def format_datetime(self, data):
if timezone.is_naive(data):
return super(DateSerializer, self).format_datetime(data)
return data.isoformat()
| mit | -5,619,192,406,841,260,000 | 37.269841 | 137 | 0.61652 | false |
karthik339/Agni | MainDemo/flask/lib/python2.7/site-packages/migrate/tests/versioning/test_shell.py | 27 | 25016 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import tempfile
from cStringIO import StringIO
from sqlalchemy import MetaData, Table
from nose.plugins.skip import SkipTest
from migrate.exceptions import *
from migrate.versioning.repository import Repository
from migrate.versioning import genmodel, shell, api
from migrate.tests.fixture import Shell, DB, usedb
from migrate.tests.fixture import models
class TestShellCommands(Shell):
"""Tests migrate.py commands"""
def test_help(self):
"""Displays default help dialog"""
self.assertEqual(self.env.run('migrate -h').returncode, 0)
self.assertEqual(self.env.run('migrate --help').returncode, 0)
self.assertEqual(self.env.run('migrate help').returncode, 0)
def test_help_commands(self):
"""Display help on a specific command"""
# we can only test that we get some output
for cmd in api.__all__:
result = self.env.run('migrate help %s' % cmd)
self.assertTrue(isinstance(result.stdout, basestring))
self.assertTrue(result.stdout)
self.assertFalse(result.stderr)
def test_shutdown_logging(self):
"""Try to shutdown logging output"""
repos = self.tmp_repos()
result = self.env.run('migrate create %s repository_name' % repos)
result = self.env.run('migrate version %s --disable_logging' % repos)
self.assertEqual(result.stdout, '')
result = self.env.run('migrate version %s -q' % repos)
self.assertEqual(result.stdout, '')
# TODO: assert logging messages to 0
shell.main(['version', repos], logging=False)
def test_main_with_runpy(self):
if sys.version_info[:2] == (2, 4):
raise SkipTest("runpy is not part of python2.4")
from runpy import run_module
try:
original = sys.argv
sys.argv=['X','--help']
run_module('migrate.versioning.shell', run_name='__main__')
finally:
sys.argv = original
def _check_error(self,args,code,expected,**kw):
original = sys.stderr
try:
actual = StringIO()
sys.stderr = actual
try:
shell.main(args,**kw)
except SystemExit, e:
self.assertEqual(code,e.args[0])
else:
self.fail('No exception raised')
finally:
sys.stderr = original
actual = actual.getvalue()
self.assertTrue(expected in actual,'%r not in:\n"""\n%s\n"""'%(expected,actual))
def test_main(self):
"""Test main() function"""
repos = self.tmp_repos()
shell.main(['help'])
shell.main(['help', 'create'])
shell.main(['create', 'repo_name', '--preview_sql'], repository=repos)
shell.main(['version', '--', '--repository=%s' % repos])
shell.main(['version', '-d', '--repository=%s' % repos, '--version=2'])
self._check_error(['foobar'],2,'error: Invalid command foobar')
self._check_error(['create', 'f', 'o', 'o'],2,'error: Too many arguments for command create: o')
self._check_error(['create'],2,'error: Not enough arguments for command create: name, repository not specified')
self._check_error(['create', 'repo_name'],2,'already exists', repository=repos)
def test_create(self):
"""Repositories are created successfully"""
repos = self.tmp_repos()
# Creating a file that doesn't exist should succeed
result = self.env.run('migrate create %s repository_name' % repos)
# Files should actually be created
self.assert_(os.path.exists(repos))
# The default table should not be None
repos_ = Repository(repos)
self.assertNotEquals(repos_.config.get('db_settings', 'version_table'), 'None')
# Can't create it again: it already exists
result = self.env.run('migrate create %s repository_name' % repos,
expect_error=True)
self.assertEqual(result.returncode, 2)
def test_script(self):
"""We can create a migration script via the command line"""
repos = self.tmp_repos()
result = self.env.run('migrate create %s repository_name' % repos)
result = self.env.run('migrate script --repository=%s Desc' % repos)
self.assert_(os.path.exists('%s/versions/001_Desc.py' % repos))
result = self.env.run('migrate script More %s' % repos)
self.assert_(os.path.exists('%s/versions/002_More.py' % repos))
result = self.env.run('migrate script "Some Random name" %s' % repos)
self.assert_(os.path.exists('%s/versions/003_Some_Random_name.py' % repos))
def test_script_sql(self):
"""We can create a migration sql script via the command line"""
repos = self.tmp_repos()
result = self.env.run('migrate create %s repository_name' % repos)
result = self.env.run('migrate script_sql mydb foo %s' % repos)
self.assert_(os.path.exists('%s/versions/001_foo_mydb_upgrade.sql' % repos))
self.assert_(os.path.exists('%s/versions/001_foo_mydb_downgrade.sql' % repos))
# Test creating a second
result = self.env.run('migrate script_sql postgres foo --repository=%s' % repos)
self.assert_(os.path.exists('%s/versions/002_foo_postgres_upgrade.sql' % repos))
self.assert_(os.path.exists('%s/versions/002_foo_postgres_downgrade.sql' % repos))
# TODO: test --previews
def test_manage(self):
"""Create a project management script"""
script = self.tmp_py()
self.assert_(not os.path.exists(script))
# No attempt is made to verify correctness of the repository path here
result = self.env.run('migrate manage %s --repository=/bla/' % script)
self.assert_(os.path.exists(script))
class TestShellRepository(Shell):
"""Shell commands on an existing repository/python script"""
def setUp(self):
"""Create repository, python change script"""
super(TestShellRepository, self).setUp()
self.path_repos = self.tmp_repos()
result = self.env.run('migrate create %s repository_name' % self.path_repos)
def test_version(self):
"""Correctly detect repository version"""
# Version: 0 (no scripts yet); successful execution
result = self.env.run('migrate version --repository=%s' % self.path_repos)
self.assertEqual(result.stdout.strip(), "0")
# Also works as a positional param
result = self.env.run('migrate version %s' % self.path_repos)
self.assertEqual(result.stdout.strip(), "0")
# Create a script and version should increment
result = self.env.run('migrate script Desc %s' % self.path_repos)
result = self.env.run('migrate version %s' % self.path_repos)
self.assertEqual(result.stdout.strip(), "1")
def test_source(self):
"""Correctly fetch a script's source"""
result = self.env.run('migrate script Desc --repository=%s' % self.path_repos)
filename = '%s/versions/001_Desc.py' % self.path_repos
source = open(filename).read()
self.assert_(source.find('def upgrade') >= 0)
# Version is now 1
result = self.env.run('migrate version %s' % self.path_repos)
self.assertEqual(result.stdout.strip(), "1")
# Output/verify the source of version 1
result = self.env.run('migrate source 1 --repository=%s' % self.path_repos)
self.assertEqual(result.stdout.strip(), source.strip())
# We can also send the source to a file... test that too
result = self.env.run('migrate source 1 %s --repository=%s' %
(filename, self.path_repos))
self.assert_(os.path.exists(filename))
fd = open(filename)
result = fd.read()
self.assert_(result.strip() == source.strip())
class TestShellDatabase(Shell, DB):
"""Commands associated with a particular database"""
# We'll need to clean up after ourself, since the shell creates its own txn;
# we need to connect to the DB to see if things worked
level = DB.CONNECT
@usedb()
def test_version_control(self):
"""Ensure we can set version control on a database"""
path_repos = repos = self.tmp_repos()
url = self.url
result = self.env.run('migrate create %s repository_name' % repos)
result = self.env.run('migrate drop_version_control %(url)s %(repos)s'\
% locals(), expect_error=True)
self.assertEqual(result.returncode, 1)
result = self.env.run('migrate version_control %(url)s %(repos)s' % locals())
# Clean up
result = self.env.run('migrate drop_version_control %(url)s %(repos)s' % locals())
# Attempting to drop vc from a database without it should fail
result = self.env.run('migrate drop_version_control %(url)s %(repos)s'\
% locals(), expect_error=True)
self.assertEqual(result.returncode, 1)
@usedb()
def test_wrapped_kwargs(self):
"""Commands with default arguments set by manage.py"""
path_repos = repos = self.tmp_repos()
url = self.url
result = self.env.run('migrate create --name=repository_name %s' % repos)
result = self.env.run('migrate drop_version_control %(url)s %(repos)s' % locals(), expect_error=True)
self.assertEqual(result.returncode, 1)
result = self.env.run('migrate version_control %(url)s %(repos)s' % locals())
result = self.env.run('migrate drop_version_control %(url)s %(repos)s' % locals())
@usedb()
def test_version_control_specified(self):
"""Ensure we can set version control to a particular version"""
path_repos = self.tmp_repos()
url = self.url
result = self.env.run('migrate create --name=repository_name %s' % path_repos)
result = self.env.run('migrate drop_version_control %(url)s %(path_repos)s' % locals(), expect_error=True)
self.assertEqual(result.returncode, 1)
# Fill the repository
path_script = self.tmp_py()
version = 2
for i in range(version):
result = self.env.run('migrate script Desc --repository=%s' % path_repos)
# Repository version is correct
result = self.env.run('migrate version %s' % path_repos)
self.assertEqual(result.stdout.strip(), str(version))
# Apply versioning to DB
result = self.env.run('migrate version_control %(url)s %(path_repos)s %(version)s' % locals())
# Test db version number (should start at 2)
result = self.env.run('migrate db_version %(url)s %(path_repos)s' % locals())
self.assertEqual(result.stdout.strip(), str(version))
# Clean up
result = self.env.run('migrate drop_version_control %(url)s %(path_repos)s' % locals())
@usedb()
def test_upgrade(self):
"""Can upgrade a versioned database"""
# Create a repository
repos_name = 'repos_name'
repos_path = self.tmp()
result = self.env.run('migrate create %(repos_path)s %(repos_name)s' % locals())
self.assertEquals(self.run_version(repos_path), 0)
# Version the DB
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path), expect_error=True)
result = self.env.run('migrate version_control %s %s' % (self.url, repos_path))
# Upgrades with latest version == 0
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
result = self.env.run('migrate upgrade %s %s 1' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 1)
result = self.env.run('migrate upgrade %s %s -1' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 2)
# Add a script to the repository; upgrade the db
result = self.env.run('migrate script Desc --repository=%s' % (repos_path))
self.assertEquals(self.run_version(repos_path), 1)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
# Test preview
result = self.env.run('migrate upgrade %s %s 0 --preview_sql' % (self.url, repos_path))
result = self.env.run('migrate upgrade %s %s 0 --preview_py' % (self.url, repos_path))
result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 1)
# Downgrade must have a valid version specified
result = self.env.run('migrate downgrade %s %s' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 2)
result = self.env.run('migrate downgrade %s %s -1' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 2)
result = self.env.run('migrate downgrade %s %s 2' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 2)
self.assertEquals(self.run_db_version(self.url, repos_path), 1)
result = self.env.run('migrate downgrade %s %s 0' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
result = self.env.run('migrate downgrade %s %s 1' % (self.url, repos_path), expect_error=True)
self.assertEquals(result.returncode, 2)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path))
def _run_test_sqlfile(self, upgrade_script, downgrade_script):
# TODO: add test script that checks if db really changed
repos_path = self.tmp()
repos_name = 'repos'
result = self.env.run('migrate create %s %s' % (repos_path, repos_name))
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path), expect_error=True)
result = self.env.run('migrate version_control %s %s' % (self.url, repos_path))
self.assertEquals(self.run_version(repos_path), 0)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
beforeCount = len(os.listdir(os.path.join(repos_path, 'versions'))) # hmm, this number changes sometimes based on running from svn
result = self.env.run('migrate script_sql %s --repository=%s' % ('postgres', repos_path))
self.assertEquals(self.run_version(repos_path), 1)
self.assertEquals(len(os.listdir(os.path.join(repos_path, 'versions'))), beforeCount + 2)
open('%s/versions/001_postgres_upgrade.sql' % repos_path, 'a').write(upgrade_script)
open('%s/versions/001_postgres_downgrade.sql' % repos_path, 'a').write(downgrade_script)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
self.assertRaises(Exception, self.engine.text('select * from t_table').execute)
result = self.env.run('migrate upgrade %s %s' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 1)
self.engine.text('select * from t_table').execute()
result = self.env.run('migrate downgrade %s %s 0' % (self.url, repos_path))
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
self.assertRaises(Exception, self.engine.text('select * from t_table').execute)
# The tests below are written with some postgres syntax, but the stuff
# being tested (.sql files) ought to work with any db.
@usedb(supported='postgres')
def test_sqlfile(self):
upgrade_script = """
create table t_table (
id serial,
primary key(id)
);
"""
downgrade_script = """
drop table t_table;
"""
self.meta.drop_all()
self._run_test_sqlfile(upgrade_script, downgrade_script)
@usedb(supported='postgres')
def test_sqlfile_comment(self):
upgrade_script = """
-- Comments in SQL break postgres autocommit
create table t_table (
id serial,
primary key(id)
);
"""
downgrade_script = """
-- Comments in SQL break postgres autocommit
drop table t_table;
"""
self._run_test_sqlfile(upgrade_script, downgrade_script)
@usedb()
def test_command_test(self):
repos_name = 'repos_name'
repos_path = self.tmp()
result = self.env.run('migrate create repository_name --repository=%s' % repos_path)
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path), expect_error=True)
result = self.env.run('migrate version_control %s %s' % (self.url, repos_path))
self.assertEquals(self.run_version(repos_path), 0)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
# Empty script should succeed
result = self.env.run('migrate script Desc %s' % repos_path)
result = self.env.run('migrate test %s %s' % (self.url, repos_path))
self.assertEquals(self.run_version(repos_path), 1)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
# Error script should fail
script_path = self.tmp_py()
script_text='''
from sqlalchemy import *
from migrate import *
def upgrade():
print 'fgsfds'
raise Exception()
def downgrade():
print 'sdfsgf'
raise Exception()
'''.replace("\n ", "\n")
file = open(script_path, 'w')
file.write(script_text)
file.close()
result = self.env.run('migrate test %s %s bla' % (self.url, repos_path), expect_error=True)
self.assertEqual(result.returncode, 2)
self.assertEquals(self.run_version(repos_path), 1)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
# Nonempty script using migrate_engine should succeed
script_path = self.tmp_py()
script_text = '''
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
meta = MetaData(migrate_engine)
account = Table('account', meta,
Column('id', Integer, primary_key=True),
Column('login', Text),
Column('passwd', Text),
)
def upgrade():
# Upgrade operations go here. Don't create your own engine; use the engine
# named 'migrate_engine' imported from migrate.
meta.create_all()
def downgrade():
# Operations to reverse the above upgrade go here.
meta.drop_all()
'''.replace("\n ", "\n")
file = open(script_path, 'w')
file.write(script_text)
file.close()
result = self.env.run('migrate test %s %s' % (self.url, repos_path))
self.assertEquals(self.run_version(repos_path), 1)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
@usedb()
def test_rundiffs_in_shell(self):
# This is a variant of the test_schemadiff tests but run through the shell level.
# These shell tests are hard to debug (since they keep forking processes)
# so they shouldn't replace the lower-level tests.
repos_name = 'repos_name'
repos_path = self.tmp()
script_path = self.tmp_py()
model_module = 'migrate.tests.fixture.models:meta_rundiffs'
old_model_module = 'migrate.tests.fixture.models:meta_old_rundiffs'
# Create empty repository.
self.meta = MetaData(self.engine, reflect=True)
self.meta.reflect()
self.meta.drop_all() # in case junk tables are lying around in the test database
result = self.env.run('migrate create %s %s' % (repos_path, repos_name))
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path), expect_error=True)
result = self.env.run('migrate version_control %s %s' % (self.url, repos_path))
self.assertEquals(self.run_version(repos_path), 0)
self.assertEquals(self.run_db_version(self.url, repos_path), 0)
# Setup helper script.
result = self.env.run('migrate manage %s --repository=%s --url=%s --model=%s'\
% (script_path, repos_path, self.url, model_module))
self.assert_(os.path.exists(script_path))
# Model is defined but database is empty.
result = self.env.run('migrate compare_model_to_db %s %s --model=%s' \
% (self.url, repos_path, model_module))
self.assert_("tables missing from database: tmp_account_rundiffs" in result.stdout)
# Test Deprecation
result = self.env.run('migrate compare_model_to_db %s %s --model=%s' \
% (self.url, repos_path, model_module.replace(":", ".")), expect_error=True)
self.assertEqual(result.returncode, 0)
self.assertTrue("DeprecationWarning" in result.stderr)
self.assert_("tables missing from database: tmp_account_rundiffs" in result.stdout)
# Update db to latest model.
result = self.env.run('migrate update_db_from_model %s %s %s'\
% (self.url, repos_path, model_module))
self.assertEquals(self.run_version(repos_path), 0)
self.assertEquals(self.run_db_version(self.url, repos_path), 0) # version did not get bumped yet because new version not yet created
result = self.env.run('migrate compare_model_to_db %s %s %s'\
% (self.url, repos_path, model_module))
self.assert_("No schema diffs" in result.stdout)
result = self.env.run('migrate drop_version_control %s %s' % (self.url, repos_path), expect_error=True)
result = self.env.run('migrate version_control %s %s' % (self.url, repos_path))
result = self.env.run('migrate create_model %s %s' % (self.url, repos_path))
temp_dict = dict()
exec result.stdout in temp_dict
# TODO: breaks on SA06 and SA05 - in need of total refactor - use different approach
# TODO: compare whole table
self.compare_columns_equal(models.tmp_account_rundiffs.c, temp_dict['tmp_account_rundiffs'].c, ['type'])
##self.assertTrue("""tmp_account_rundiffs = Table('tmp_account_rundiffs', meta,
##Column('id', Integer(), primary_key=True, nullable=False),
##Column('login', String(length=None, convert_unicode=False, assert_unicode=None)),
##Column('passwd', String(length=None, convert_unicode=False, assert_unicode=None))""" in result.stdout)
## We're happy with db changes, make first db upgrade script to go from version 0 -> 1.
#result = self.env.run('migrate make_update_script_for_model', expect_error=True)
#self.assertTrue('Not enough arguments' in result.stderr)
#result_script = self.env.run('migrate make_update_script_for_model %s %s %s %s'\
#% (self.url, repos_path, old_model_module, model_module))
#self.assertEqualsIgnoreWhitespace(result_script.stdout,
#'''from sqlalchemy import *
#from migrate import *
#from migrate.changeset import schema
#meta = MetaData()
#tmp_account_rundiffs = Table('tmp_account_rundiffs', meta,
#Column('id', Integer(), primary_key=True, nullable=False),
#Column('login', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)),
#Column('passwd', Text(length=None, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False)),
#)
#def upgrade(migrate_engine):
## Upgrade operations go here. Don't create your own engine; bind migrate_engine
## to your metadata
#meta.bind = migrate_engine
#tmp_account_rundiffs.create()
#def downgrade(migrate_engine):
## Operations to reverse the above upgrade go here.
#meta.bind = migrate_engine
#tmp_account_rundiffs.drop()''')
## Save the upgrade script.
#result = self.env.run('migrate script Desc %s' % repos_path)
#upgrade_script_path = '%s/versions/001_Desc.py' % repos_path
#open(upgrade_script_path, 'w').write(result_script.stdout)
#result = self.env.run('migrate compare_model_to_db %s %s %s'\
#% (self.url, repos_path, model_module))
#self.assert_("No schema diffs" in result.stdout)
self.meta.drop_all() # in case junk tables are lying around in the test database
| apache-2.0 | -8,425,603,622,678,057,000 | 43.992806 | 141 | 0.619444 | false |
immenz/pyload | module/plugins/hoster/CloudzillaTo.py | 2 | 2032 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class CloudzillaTo(SimpleHoster):
__name__ = "CloudzillaTo"
__type__ = "hoster"
__version__ = "0.06"
__pattern__ = r'http://(?:www\.)?cloudzilla\.to/share/file/(?P<ID>[\w^_]+)'
__description__ = """Cloudzilla.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "[email protected]")]
INFO_PATTERN = r'title="(?P<N>.+?)">\1</span> <span class="size">\((?P<S>[\d.]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>File not found...<'
PASSWORD_PATTERN = r'<div id="pwd_protected">'
def checkErrors(self):
m = re.search(self.PASSWORD_PATTERN, self.html)
if m:
self.html = self.load(self.pyfile.url, get={'key': self.getPassword()})
if re.search(self.PASSWORD_PATTERN, self.html):
self.retry(reason="Wrong password")
def handleFree(self, pyfile):
self.html = self.load("http://www.cloudzilla.to/generateticket/",
post={'file_id': self.info['pattern']['ID'], 'key': self.getPassword()})
ticket = dict(re.findall(r'<(.+?)>([^<>]+?)</', self.html))
self.logDebug(ticket)
if 'error' in ticket:
if "File is password protected" in ticket['error']:
self.retry(reason="Wrong password")
else:
self.fail(ticket['error'])
if 'wait' in ticket:
self.wait(ticket['wait'], int(ticket['wait']) > 5)
self.link = "http://%(server)s/download/%(file_id)s/%(ticket_id)s" % {'server' : ticket['server'],
'file_id' : self.info['pattern']['ID'],
'ticket_id': ticket['ticket_id']}
def handlePremium(self, pyfile):
return self.handleFree(pyfile)
getInfo = create_getInfo(CloudzillaTo)
| gpl-3.0 | 8,265,166,232,704,824,000 | 32.311475 | 118 | 0.517224 | false |
isyippee/ceilometer | ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py | 9 | 2206 | #
# Copyright 2013 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from ceilometer.storage.sqlalchemy import migration
from ceilometer.storage.sqlalchemy import models
def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
temp_col_n = 'convert_data_type_temp_col'
# Override column we're going to convert with from_t, since the type we're
# replacing could be custom and we need to tell SQLALchemy how to perform
# CRUD operations with it.
table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
extend_existing=True)
sa.Column(temp_col_n, to_t).create(table)
key_attr = getattr(table.c, pk_attr)
orig_col = getattr(table.c, col)
new_col = getattr(table.c, temp_col_n)
query = sa.select([key_attr, orig_col])
for key, value in migration.paged(query):
(table.update().where(key_attr == key).values({temp_col_n: value}).
execute())
orig_col.drop()
new_col.alter(name=col)
if index:
sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
event = sa.Table('event', meta, autoload=True)
_convert_data_type(event, 'generated', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
trait = sa.Table('trait', meta, autoload=True)
_convert_data_type(trait, 't_datetime', sa.Float(),
models.PreciseTimestamp(),
pk_attr='id', index=True)
| apache-2.0 | 944,523,893,478,122,100 | 38.392857 | 78 | 0.651405 | false |
ineiti/cothorities | byzcoin/simulation/plot_sim.py | 2 | 2675 | import matplotlib.pyplot as plt
import pandas as pd
import os
data_dir = './test_data/'
files = [(data_dir + fname) for fname in os.listdir(data_dir)\
if fname.startswith('coins') and fname.endswith('.csv')]
def read_all_files(files):
df = pd.DataFrame()
for fname in files:
data = pd.read_csv(fname)
# We need to add variables regarding
# batching and keeping here.
batch = '_batch' in fname
keep = not '_nokeep' in fname
rowcount = len(data.index)
b_vals = pd.Series([batch for i in range(rowcount)])
k_vals = pd.Series([keep for i in range(rowcount)])
data = data.assign(batch=b_vals.values)
data = data.assign(keep=k_vals.values)
# If the data frame is empty (first iteration),
# we append no matter what. Otherwise, we append
# IFF the colums are the same.
if df.empty \
or (len(data.columns) == len(df.columns) \
and (data.columns == df.columns).all()):
df = df.append(data, ignore_index=True)
return df
df = read_all_files(files)
delays = list(set(df['delay']))
keep = list(set(df['keep']))
batch = list(set(df['batch']))
for delay in delays:
for k in keep:
for b in batch:
titlestring = 'Transactions: 1000, delay: {}, keep: {}, batch: {}'.format(delay, k, b)
# No whitespace, colons or commata in filenames
namestring = titlestring.replace(' ','').replace(':','-').replace(',','_')
data = df.ix[df['delay'] == delay].sort_values('hosts')
data = data.ix[data['keep'] == k]
data = data.ix[data['batch'] == b]
data = data.reset_index()
ax = data.plot.bar(\
x='hosts',\
y=['prepare_wall_sum','send_wall_sum','confirm_wall_avg'],\
stacked=True)
data.plot(y='round_wall_avg', marker='o', ax=ax)
plt.xlabel('number of hosts')
plt.ylabel('time in seconds')
plt.title(titlestring)
plt.savefig(data_dir + 'barplot_' + namestring + '.png')
plt.close()
ax = data.plot.bar(\
x='hosts',\
y=['prepare_wall_sum','send_wall_sum','confirm_wall_avg'],\
stacked=True)
data.plot(y='round_wall_avg', marker='o', ax=ax)
ax.set_yscale('log')
plt.xlabel('number of hosts')
plt.ylabel('logarithm of time in seconds')
plt.title(titlestring)
plt.savefig(data_dir + 'barplot_log_delay_' + namestring + '.png')
plt.close()
| gpl-2.0 | -6,581,748,470,544,917 | 36.152778 | 98 | 0.538692 | false |
igor-toga/local-snat | neutron/extensions/extraroute.py | 5 | 2402 | # Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib import constants
from neutron_lib import exceptions as nexception
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
# Extra Routes Exceptions
class InvalidRoutes(nexception.InvalidInput):
message = _("Invalid format for routes: %(routes)s, %(reason)s")
class RouterInterfaceInUseByRoute(nexception.InUse):
message = _("Router interface for subnet %(subnet_id)s on router "
"%(router_id)s cannot be deleted, as it is required "
"by one or more routes.")
class RoutesExhausted(nexception.BadRequest):
message = _("Unable to complete operation for %(router_id)s. "
"The number of routes exceeds the maximum %(quota)s.")
# Attribute Map
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
'routes': {'allow_post': False, 'allow_put': True,
'validate': {'type:hostroutes': None},
'convert_to': converters.convert_none_to_empty_list,
'is_visible': True,
'default': constants.ATTR_NOT_SPECIFIED},
}
}
class Extraroute(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron Extra Route"
@classmethod
def get_alias(cls):
return "extraroute"
@classmethod
def get_description(cls):
return "Extra routes configuration for L3 router"
@classmethod
def get_updated(cls):
return "2013-02-01T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
attr.PLURALS.update({'routes': 'route'})
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 | -192,061,816,716,873,900 | 31.026667 | 78 | 0.659034 | false |
wjwwood/open-robotics-platform | dash/lib/builders/toolbar.py | 1 | 8747 | #!/usr/bin/env python -OO
# encoding: utf-8
###########
# ORP - Open Robotics Platform
#
# Copyright (c) 2010 John Harrison, William Woodall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
"""
toolbar.py - Contains the construction of the toolbar
Created by William Woodall on 2010-10-30.
"""
__author__ = "William Woodall"
__copyright__ = "Copyright (c) 2010 John Harrison, William Woodall"
### Imports ###
# Standard Python Libraries
import sys
import os
# Other Libraries
import lib.elements
import lib.events.localfiles as localfiles
import lib.events.servermanagement as servermanagement
import lib.events.remotefiles as remotefiles
import lib.events.controlcode as controlcode
import lib.events.remotecontrol as remotecontrol
try: # try to catch any missing dependancies
# wx for window elements
PKGNAME = 'wxpython'
import wx
import wx.aui as aui
del PKGNAME
except ImportError as PKG_ERROR: # We are missing something, let them know...
sys.stderr.write(str(PKG_ERROR)+"\nYou might not have the "+PKGNAME+" \
module, try 'easy_install "+PKGNAME+"', else consult google.")
### Classes ###
class Toolbars:
"""Class containing Toolbars"""
def __init__(self, parent, mgr):
self.parent = parent
self.mgr = mgr
# Create the toolbars (they are added in reverse order)
self.createControlCodeToolbar()
self.createRemoteFileToolbar()
self.createLocalToolbar()
self.createRemoteServerToolbar()
self.createSyncToolbar()
# Link to elements
lib.elements.TOOLBAR = self
def createSyncToolbar(self):
self.sync_tb = wx.ToolBar(self.parent, -1, wx.DefaultPosition, wx.DefaultSize, wx.TB_FLAT | wx.TB_NODIVIDER)
self.sync_button = wx.Button(self.sync_tb, -1, 'Sync')
self.sync_tb.AddControl(self.sync_button)
self.sync_button.Enabled = False
self.sync_tb.Realize()
self.mgr.AddPane(self.sync_tb, aui.AuiPaneInfo().Name("Sync File").
Caption("Local File Operations").ToolbarPane().Top().Row(2))
def createLocalToolbar(self):
"""Creates the Local Toolbar"""
## Local operations on Current file ##
# Create tool bar
self.local_tb = wx.ToolBar(self.parent, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
lib.elements.LOCAL_FILE_TOOLBAR = self.local_tb
# Save Button
self.save_button = wx.Button(self.local_tb, -1, 'Save')
self.local_tb.AddControl(self.save_button)
self.save_button.Bind(wx.EVT_BUTTON, localfiles.saveFile)
# Run Locally
self.run_locally_button = wx.Button(self.local_tb, -1, 'Run Locally')
self.run_locally_button.Disable()
self.local_tb.AddControl(self.run_locally_button)
self.run_locally_button.Bind(wx.EVT_BUTTON, localfiles.runLocally)
# Add the toolbar to the aui manager
self.local_tb.Realize()
self.mgr.AddPane(self.local_tb, aui.AuiPaneInfo().Name("Local File").
Caption("Local File Operations").ToolbarPane().Top().Row(1))
def createRemoteServerToolbar(self):
"""Creates the Remote Server Toolbar"""
## Remote Server Management ##
# Create Toolbar
self.server_tb = wx.ToolBar(self.parent, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
lib.elements.REMOTE_SERVER_TOOLBAR = self.server_tb
# Connect/Disconnect Button
self.connect_button = wx.Button(self.server_tb, -1, "Connect", size=(100,-1))
self.server_tb.AddControl(self.connect_button)
self.connect_button.Bind(wx.EVT_BUTTON, servermanagement.connect)
# Server Address Text Box
self.server_addr = wx.TextCtrl(self.server_tb, -1, "localhost", size=(-1,-1))
self.server_tb.AddControl(self.server_addr)
# Restart Button
self.restart_button = wx.Button(self.server_tb, -1, "Restart", size=(-1,-1))
self.restart_button.Disable()
self.server_tb.AddControl(self.restart_button)
self.restart_button.Bind(wx.EVT_BUTTON, servermanagement.restart)
# Shutdown Button
self.shutdown_button = wx.Button(self.server_tb, -1, "Shutdown", size=(-1,-1))
self.shutdown_button.Disable()
self.server_tb.AddControl(self.shutdown_button)
self.shutdown_button.Bind(wx.EVT_BUTTON, servermanagement.shutdown)
# R/C Button
self.RC_button = wx.Button(self.server_tb, -1, "Turn on R/C", size=(-1,-1))
self.RC_button.Disable()
self.server_tb.AddControl(self.RC_button)
self.RC_button.Bind(wx.EVT_BUTTON, remotecontrol.toggle)
# Add the toolbar to the aui manager
self.server_tb.Realize()
self.mgr.AddPane(self.server_tb, aui.AuiPaneInfo().Name("Remote Server").
Caption("Remote Server Management").ToolbarPane().Top().Row(1))
def createRemoteFileToolbar(self):
"""Create Remote File Toolbar"""
## Remote File Management ##
# Create Toolbar
self.remote_tb = wx.ToolBar(self.parent, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
lib.elements.REMOTE_FILE_TOOLBAR = self.remote_tb
# Send File Button
self.send_button = wx.Button(self.remote_tb, -1, "Send", size=(-1,-1))
self.send_button.Disable()
self.remote_tb.AddControl(self.send_button)
self.send_button.Bind(wx.EVT_BUTTON, remotefiles.send)
# Configure Robot Button
self.config_button = wx.Button(self.remote_tb, -1, "Configure Robot", size=(-1,-1))
self.config_button.Disable()
self.remote_tb.AddControl(self.config_button)
self.config_button.Bind(wx.EVT_BUTTON, remotefiles.openConfig)
# Add the toolbar to the aui manager
self.remote_tb.Realize()
self.mgr.AddPane(self.remote_tb, aui.AuiPaneInfo().Name("Remote Files").
Caption("Remote File Management").ToolbarPane().Top().Row(2))
def createControlCodeToolbar(self):
"""Creates the Control Code Toolbar"""
## Execution of Control Code
# Create Toolbar
self.cc_tb = wx.ToolBar(self.parent, -1, wx.DefaultPosition, wx.DefaultSize,
wx.TB_FLAT | wx.TB_NODIVIDER)
lib.elements.CONTROL_CODE_TOOLBAR = self.cc_tb
# Add the Run Button
self.run_button = wx.Button(self.cc_tb, -1, "Run")
self.run_button.Disable()
self.cc_tb.AddControl(self.run_button)
self.run_button.Bind(wx.EVT_BUTTON, controlcode.run)
# Add the Pause Button
self.pause_button = wx.Button(self.cc_tb, -1, "Pause")
self.pause_button.Disable()
self.cc_tb.AddControl(self.pause_button)
self.pause_button.Bind(wx.EVT_BUTTON, controlcode.pause)
# Add the Stop Button
self.stop_button = wx.Button(self.cc_tb, -1, "Stop")
self.stop_button.Disable()
self.cc_tb.AddControl(self.stop_button)
self.stop_button.Bind(wx.EVT_BUTTON, controlcode.stop)
# Add the toolbar to the aui manager
self.cc_tb.Realize()
self.mgr.AddPane(self.cc_tb, aui.AuiPaneInfo().Name("Control Code").
Caption("Control Code Control").ToolbarPane().Top().Row(2)) | mit | 7,267,131,997,484,712,000 | 41.057692 | 116 | 0.634275 | false |
xuweiliang/Codelibrary | nova/conf/quota.py | 1 | 7635 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
quota_opts = [
cfg.IntOpt('quota_instances',
min=-1,
default=10,
help="""
The number of instances allowed per project.
Possible Values
* 10 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_cores',
min=-1,
default=20,
help="""
The number of instance cores or VCPUs allowed per project.
Possible values:
* 20 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_ram',
min=-1,
default=50 * 1024,
help="""
The number of megabytes of instance RAM allowed per project.
Possible values:
* 51200 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_floating_ips',
min=-1,
default=10,
help="""
The number of floating IPs allowed per project. Floating IPs are not allocated
to instances by default. Users need to select them from the pool configured by
the OpenStack administrator to attach to their instances.
Possible values:
* 10 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_fixed_ips',
min=-1,
default=-1,
help="""
The number of fixed IPs allowed per project (this should be at least the number
of instances allowed). Unlike floating IPs, fixed IPs are allocated dynamically
by the network component when instances boot up.
Possible values:
* -1 (default) : treated as unlimited.
* Any positive integer.
"""),
cfg.IntOpt('quota_metadata_items',
min=-1,
default=128,
help="""
The number of metadata items allowed per instance. User can associate metadata
while instance creation in the form of key-value pairs.
Possible values:
* 128 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_injected_files',
min=-1,
default=5,
help="""
The number of injected files allowed. It allow users to customize the
personality of an instance by injecting data into it upon boot. Only text
file injection is permitted. Binary or zip files won't work. During file
injection, any existing files that match specified files are renamed to include
.bak extension appended with a timestamp.
Possible values:
* 5 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_injected_file_content_bytes',
min=-1,
default=10 * 1024,
help="""
The number of bytes allowed per injected file.
Possible values:
* 10240 (default) or any positive integer representing number of bytes.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_injected_file_path_length',
min=-1,
default=255,
help="""
The maximum allowed injected file path length.
Possible values:
* 255 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_security_groups',
min=-1,
default=10,
help="""
The number of security groups per project.
Possible values:
* 10 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_security_group_rules',
min=-1,
default=20,
help="""
The number of security rules per security group. The associated rules in each
security group control the traffic to instances in the group.
Possible values:
* 20 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_key_pairs',
min=-1,
default=100,
help="""
The maximum number of key pairs allowed per user. Users can create at least one
key pair for each project and use the key pair for multiple instances that
belong to that project.
Possible values:
* 100 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_server_groups',
min=-1,
default=10,
help="""
Add quota values to constrain the number of server groups per project. Server
group used to control the affinity and anti-affinity scheduling policy for a
group of servers or instances. Reducing the quota will not affect any existing
group, but new servers will not be allowed into groups that have become over
quota.
Possible values:
* 10 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('quota_server_group_members',
min=-1,
default=10,
help="""
Add quota values to constrain the number of servers per server group.
Possible values:
* 10 (default) or any positive integer.
* -1 : treated as unlimited.
"""),
cfg.IntOpt('reservation_expire',
default=86400,
help="""
The number of seconds until a reservation expires. It represents the time
period for invalidating quota reservations.
Possible values:
* 86400 (default) or any positive integer representing number of seconds.
"""),
cfg.IntOpt('until_refresh',
min=0,
default=0,
help="""
The count of reservations until usage is refreshed. This defaults to 0 (off) to
avoid additional load but it is useful to turn on to help keep quota usage
up-to-date and reduce the impact of out of sync usage issues.
Possible values:
* 0 (default) or any positive integer.
"""),
cfg.IntOpt('max_age',
min=0,
default=0,
help="""
The number of seconds between subsequent usage refreshes. This defaults to 0
(off) to avoid additional load but it is useful to turn on to help keep quota
usage up-to-date and reduce the impact of out of sync usage issues. Note that
quotas are not updated on a periodic task, they will update on a new
reservation if max_age has passed since the last reservation.
Possible values:
* 0 (default) or any positive integer representing number of seconds.
"""),
# TODO(pumaranikar): Add a new config to select between the db_driver and
# the no_op driver using stevedore.
cfg.StrOpt('quota_driver',
default='nova.quota.DbQuotaDriver',
deprecated_for_removal=True,
help="""
Provides abstraction for quota checks. Users can configure a specific
driver to use for quota checks.
Possible values:
* nova.quota.DbQuotaDriver (default) or any string representing fully
qualified class name.
"""),
]
def register_opts(conf):
conf.register_opts(quota_opts)
# TODO(pumaranikar): We can consider moving these options to quota group
# and renaming them all to drop the quota bit.
def list_opts():
return {'DEFAULT': quota_opts}
| apache-2.0 | -1,850,030,440,321,036,800 | 29.297619 | 79 | 0.65632 | false |
biln/airflow | tests/operators/subdag_operator.py | 6 | 3517 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import unittest
import airflow
from airflow.models import DAG, DagBag
from airflow.operators import BashOperator, DummyOperator, SubDagOperator
from airflow.jobs import BackfillJob
from airflow.exceptions import AirflowException
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
default_args = dict(
owner='airflow',
start_date=DEFAULT_DATE,
)
class SubDagOperatorTests(unittest.TestCase):
def test_subdag_name(self):
"""
Subdag names must be {parent_dag}.{subdag task}
"""
dag = DAG('parent', default_args=default_args)
subdag_good = DAG('parent.test', default_args=default_args)
subdag_bad1 = DAG('parent.bad', default_args=default_args)
subdag_bad2 = DAG('bad.test', default_args=default_args)
subdag_bad3 = DAG('bad.bad', default_args=default_args)
SubDagOperator(task_id='test', dag=dag, subdag=subdag_good)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad1)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad2)
self.assertRaises(
AirflowException,
SubDagOperator, task_id='test', dag=dag, subdag=subdag_bad3)
def test_subdag_pools(self):
"""
Subdags and subdag tasks can't both have a pool with 1 slot
"""
dag = DAG('parent', default_args=default_args)
subdag = DAG('parent.child', default_args=default_args)
session = airflow.settings.Session()
pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)
pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)
session.add(pool_1)
session.add(pool_10)
session.commit()
dummy_1 = DummyOperator(task_id='dummy', dag=subdag, pool='test_pool_1')
self.assertRaises(
AirflowException,
SubDagOperator,
task_id='child', dag=dag, subdag=subdag, pool='test_pool_1')
# recreate dag because failed subdagoperator was already added
dag = DAG('parent', default_args=default_args)
SubDagOperator(
task_id='child', dag=dag, subdag=subdag, pool='test_pool_10')
session.delete(pool_1)
session.delete(pool_10)
session.commit()
def test_subdag_deadlock(self):
dagbag = DagBag()
dag = dagbag.get_dag('test_subdag_deadlock')
dag.clear()
subdag = dagbag.get_dag('test_subdag_deadlock.subdag')
subdag.clear()
# first make sure subdag is deadlocked
self.assertRaisesRegexp(AirflowException, 'deadlocked', subdag.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
# now make sure dag picks up the subdag error
subdag.clear()
self.assertRaises(AirflowException, dag.run, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
| apache-2.0 | -6,480,159,040,743,236,000 | 35.257732 | 123 | 0.661359 | false |
jawed123/django-stripe-payments | payments/tests/test_middleware.py | 9 | 4083 | # pylint: disable=C0301
import decimal
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import timezone
from django.contrib.auth import authenticate, login, logout
from mock import Mock
from ..middleware import ActiveSubscriptionMiddleware
from ..models import Customer, CurrentSubscription
from ..utils import get_user_model
class DummySession(dict):
def cycle_key(self):
return
def flush(self):
return
class ActiveSubscriptionMiddlewareTests(TestCase):
urls = 'payments.tests.test_urls'
def setUp(self):
self.middleware = ActiveSubscriptionMiddleware()
self.request = Mock()
self.request.META = {}
self.request.session = DummySession()
self.old_urls = settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS
settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS += (
'signup',
'password_reset'
)
user = get_user_model().objects.create_user(username="patrick")
user.set_password("eldarion")
user.save()
user = authenticate(username="patrick", password="eldarion")
login(self.request, user)
def tearDown(self):
settings.SUBSCRIPTION_REQUIRED_EXCEPTION_URLS = self.old_urls
def test_authed_user_with_no_customer_redirects_on_non_exempt_url(self):
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response._headers["location"][1], # pylint: disable=W0212
reverse(settings.SUBSCRIPTION_REQUIRED_REDIRECT)
)
def test_authed_user_with_no_customer_passes_with_exempt_url(self):
self.request.path = "/accounts/signup/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_customer_passes_with_exempt_url_containing_pattern(self):
self.request.path = "/password/reset/confirm/test-token/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_active_subscription_passes_with_exempt_url(self):
Customer.objects.create(stripe_id="cus_1", user=self.request.user)
self.request.path = "/accounts/signup/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_authed_user_with_no_active_subscription_redirects_on_non_exempt_url(self):
Customer.objects.create(stripe_id="cus_1", user=self.request.user)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response._headers["location"][1], # pylint: disable=W0212
reverse(settings.SUBSCRIPTION_REQUIRED_REDIRECT)
)
def test_authed_user_with_active_subscription_redirects_on_non_exempt_url(self):
customer = Customer.objects.create(
stripe_id="cus_1",
user=self.request.user
)
CurrentSubscription.objects.create(
customer=customer,
plan="pro",
quantity=1,
start=timezone.now(),
status="active",
cancel_at_period_end=False,
amount=decimal.Decimal("19.99"),
currency="usd"
)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_unauthed_user_passes(self):
logout(self.request)
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
def test_staff_user_passes(self):
self.request.user.is_staff = True
self.request.path = "/the/app/"
response = self.middleware.process_request(self.request)
self.assertIsNone(response)
| mit | 4,779,521,651,120,723,000 | 34.504348 | 90 | 0.661768 | false |
Lyleo/nupic | tests/external/asteval_test.py | 14 | 1339 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Test asteval module is installed."""
import unittest2 as unittest
class TestCase(unittest.TestCase):
def testImportAndVersions(self):
import asteval
from pkg_resources import parse_version
self.assertGreater(parse_version(asteval.__version__), parse_version("0.9"))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -4,661,468,881,894,785,000 | 32.475 | 80 | 0.664675 | false |
EricSekyere/airmozilla | airmozilla/cronlogger/decorators.py | 15 | 1456 | import time
import sys
import functools
import contextlib
import traceback
from StringIO import StringIO
from .models import CronLog
@contextlib.contextmanager
def redirect_streams(stdout, stderr):
sys.stdout = stdout
sys.stderr = stderr
yield
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
def capture(f):
@functools.wraps(f)
def inner(*args, **kwargs):
stdout = StringIO()
stderr = StringIO()
with redirect_streams(stdout, stderr):
try:
t0 = time.time()
result = f(*args, **kwargs)
t1 = time.time()
CronLog.objects.create(
job=f.func_name,
stdout=stdout.getvalue(),
stderr=stderr.getvalue(),
duration='%.3f' % (t1 - t0),
)
return result
except Exception:
t1 = time.time()
exc_type, exc_value, exc_tb = sys.exc_info()
CronLog.objects.create(
job=f.func_name,
stdout=stdout.getvalue(),
stderr=stderr.getvalue(),
exc_type=str(exc_type),
exc_value=str(exc_value),
exc_traceback=''.join(traceback.format_tb(exc_tb)),
duration='%.3f' % (t1 - t0),
)
raise
return inner
| bsd-3-clause | 6,400,474,332,258,922,000 | 27 | 71 | 0.488324 | false |
harterj/moose | python/MooseDocs/extensions/floats.py | 8 | 6225 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import uuid
import collections
import moosetree
import MooseDocs
from ..common import exceptions
from ..base import components, MarkdownReader, LatexRenderer, Extension
from ..tree import tokens, html, latex
from . import core
def make_extension(**kwargs):
return FloatExtension(**kwargs)
Float = tokens.newToken('Float', img=False, bottom=False, command='figure')
FloatCaption = tokens.newToken('FloatCaption', key='', prefix='', number='?')
def create_float(parent, extension, reader, page, settings, bottom=False, img=False,
token_type=Float, **kwargs):
"""
Helper for optionally creating a float based on the existence of caption and/or id.
Inputs:
parent: The parent token that float should be placed
extension: The extension object (to extract 'prefix' from config items)
reader: The Reader object for tokenization of the heading
page: The Page object for passing to the tokenization routine
settings: The command settings to extract a local 'prefix'
bottom[True|False]: Set flag on the float for placing the caption at the bottom
img[True|False]: Set to True if the contents are an image (Materialize only)
token_type: The type of Token object to create; it should derive from Float
"""
cap, _ = _add_caption(None, extension, reader, page, settings)
if cap:
flt = token_type(parent, img=img, bottom=bottom, **kwargs)
cap.parent = flt
return flt
return parent
def caption_settings():
"""Return settings necessary for captions."""
settings = dict()
settings['caption'] = (None, "The caption text for the float object.")
settings['prefix'] = (None, "The numbered caption label to include prior to the caption text.")
return settings
def _add_caption(parent, extension, reader, page, settings):
"""Helper for adding captions to float tokens."""
cap = settings['caption']
key = settings['id']
prefix = settings.get('prefix')
if prefix is None:
prefix = extension.get('prefix', None)
if prefix is None:
msg = "The 'prefix' must be supplied via the settings or the extension configuration."
raise exceptions.MooseDocsException(msg)
caption = None
if key:
caption = FloatCaption(parent, key=key, prefix=prefix)
if cap:
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
elif cap:
caption = FloatCaption(parent)
reader.tokenize(caption, cap, page, MarkdownReader.INLINE)
return caption, prefix
class FloatExtension(Extension):
"""
Provides ability to add caption float elements (e.g., figures, table, etc.). This is only a
base extension. It does not provide tables for example, just the tools to make floats
in a uniform manner.
"""
def extend(self, reader, renderer):
renderer.add('Float', RenderFloat())
renderer.add('FloatCaption', RenderFloatCaption())
if isinstance(renderer, LatexRenderer):
renderer.addPackage('caption', labelsep='period')
def postTokenize(self, page, ast):
"""Set float number for each counter."""
counts = collections.defaultdict(int)
for node in moosetree.iterate(ast, lambda n: n.name == 'FloatCaption'):
prefix = node.get('prefix', None)
if prefix is not None:
counts[prefix] += 1
node['number'] = counts[prefix]
key = node.get('key')
if key:
shortcut = core.Shortcut(ast.root, key=key, link='#{}'.format(key))
# TODO: This is a bit of a hack to get Figure~\ref{} etc. working in general
if isinstance(self.translator.renderer, LatexRenderer):
shortcut['prefix'] = prefix.title()
else:
tokens.String(shortcut, content='{} {}'.format(prefix.title(), node['number']))
page['counts'] = counts
class RenderFloat(components.RenderComponent):
def createHTML(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('moose-float-div')
if token['bottom']:
cap = token(0)
cap.parent = None # Guarantees that "cap" is removed from the current tree
cap.parent = token
return div
def createMaterialize(self, parent, token, page):
div = html.Tag(parent, 'div', token)
div.addClass('card moose-float')
content = html.Tag(div, 'div')
if token['img']:
content.addClass('card-image')
else:
content.addClass('card-content')
if token['bottom']:
cap = token(0)
cap.parent = None
cap.parent = token
return content
def createLatex(self, parent, token, page):
env = latex.Environment(parent, token['command'])
style = latex.parse_style(token)
width = style.get('width', None)
if width and token(0).name == 'Image':
token(0).set('style', 'width:{};'.format(width))
if style.get('text-align', None) == 'center':
latex.Command(env, 'centering')
return env
class RenderFloatCaption(components.RenderComponent):
def createHTML(self, parent, token, page):
caption = html.Tag(parent, 'p', class_="moose-caption")
prefix = token.get('prefix', None)
if prefix:
heading = html.Tag(caption, 'span', class_="moose-caption-heading")
html.String(heading, content="{} {}: ".format(prefix, token['number']))
return html.Tag(caption, 'span', class_="moose-caption-text")
def createLatex(self, parent, token, page):
caption = latex.Command(parent, 'caption')
if token['key']:
latex.Command(caption, 'label', string=token['key'], escape=True)
return caption
| lgpl-2.1 | 3,005,966,960,112,361,000 | 36.053571 | 99 | 0.631486 | false |
torbjoernk/easybuild-framework | easybuild/tools/toolchain/variables.py | 6 | 6177 | # #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Toolchain specific variables
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.variables import StrList, AbsPathList
class IncludePaths(AbsPathList):
"""Absolute path to directory containing include files"""
PREFIX = '-I'
class LinkLibraryPaths(AbsPathList):
"""Absolute path to directory containing libraries"""
PREFIX = '-L'
class FlagList(StrList):
"""Flag list"""
PREFIX = "-"
class CommandFlagList(FlagList):
"""
Command and flags list
First of the list has no prefix (i.e. the executable)
The remainder of the options are considered flags
"""
def _str_self(self):
"""Like a regular flag list, but set first element to original value"""
tmp_str = [self.str_convert(x) for x in self if self._str_ok(x)]
if len(tmp_str) > 0:
tmp_str[0] = self[0]
return tmp_str
class LibraryList(StrList):
"""Link library list"""
PREFIX = "-l"
SANITIZE_REMOVE_DUPLICATE_KEEP = -1 # sanitize from end
JOIN_BEGIN_END = True
def set_packed_linker_options(self, separator=',', separator_begin_end=',', prefix=None, prefix_begin_end=None):
"""Use packed linker options format"""
if isinstance(self.BEGIN, LinkerFlagList) and isinstance(self.END, LinkerFlagList):
self.log.debug("sanitize: PACKED_LINKER_OPTIONS")
self.BEGIN.PACKED_LINKER_OPTIONS = True
self.END.PACKED_LINKER_OPTIONS = True
if separator_begin_end is not None:
self.BEGIN.SEPARATOR = separator_begin_end
self.END.SEPARATOR = separator_begin_end
if prefix_begin_end is not None:
self.BEGIN.PREFIX = prefix_begin_end
self.END.PREFIX = prefix_begin_end
# this is intentional only on the elements that have BEGIN/END
if separator is not None:
self.SEPARATOR = separator
if prefix is not None:
self.PREFIX = prefix
def change(self, separator=None, separator_begin_end=None, prefix=None, prefix_begin_end=None):
"""Change prefix and/or separator of base and/or BEGIN/END"""
if separator is not None:
self.SEPARATOR = separator
if prefix is not None:
self.PREFIX = prefix
if isinstance(self.BEGIN, LinkerFlagList):
if separator_begin_end is not None:
self.BEGIN.SEPARATOR = separator_begin_end
if prefix_begin_end is not None:
self.BEGIN.PREFIX = prefix_begin_end
if isinstance(self.END, LinkerFlagList):
if separator_begin_end is not None:
self.END.SEPARATOR = separator_begin_end
if prefix_begin_end is not None:
self.END.PREFIX = prefix_begin_end
class CommaStaticLibs(LibraryList):
"""Comma-separated list"""
SEPARATOR = ','
PREFIX = 'lib'
SUFFIX = '.a'
class LinkerFlagList(StrList):
"""Linker flags"""
PREFIX = '-Wl,'
LINKER_TOGGLE_START_STOP_GROUP = None
LINKER_TOGGLE_STATIC_DYNAMIC = None
PACKED_LINKER_OPTIONS = None
IS_BEGIN = None
IS_END = None
def _toggle_map(self, toggle_map, name, descr, idx=None):
"""Append value from toggle_map. Raise if not None and name not found
descr string to add to raise
"""
if toggle_map is not None:
if name in toggle_map:
if idx is None:
self.append(toggle_map[name])
else:
self.insert(idx, toggle_map[name])
else:
raise EasyBuildError("%s name %s not found in map %s", descr, name, toggle_map)
def toggle_startgroup(self):
"""Append start group"""
self._toggle_map(self.LINKER_TOGGLE_START_STOP_GROUP, 'start', 'toggle_startgroup', idx=None)
def toggle_stopgroup(self):
"""Append stop group"""
self._toggle_map(self.LINKER_TOGGLE_START_STOP_GROUP, 'stop', 'toggle_stopgroup', idx=0)
def toggle_static(self):
"""Append static linking flags"""
self._toggle_map(self.LINKER_TOGGLE_STATIC_DYNAMIC, 'static', 'toggle_static', idx=0)
def toggle_dynamic(self):
"""Append dynamic linking flags"""
self._toggle_map(self.LINKER_TOGGLE_STATIC_DYNAMIC, 'dynamic', 'toggle_dynamic', idx=None)
def sanitize(self):
# TODO: rewrite to avoid changing constants
if self.PACKED_LINKER_OPTIONS:
# somehow this should only be run once.
self.PACKED_LINKER_OPTIONS = None
self.log.debug("sanitize: PACKED_LINKER_OPTIONS")
if self.IS_BEGIN and self.SEPARATOR:
self.BEGIN = str(self.PREFIX).rstrip(self.SEPARATOR)
self.PREFIX = None
self.log.debug("sanitize: PACKED_LINKER_OPTIONS IS_BEGIN %s PREFIX %s BEGIN %s" % (self.IS_BEGIN, self.PREFIX, self.BEGIN))
super(LinkerFlagList, self).sanitize()
| gpl-2.0 | 4,122,369,144,506,819,600 | 33.898305 | 135 | 0.639145 | false |
enikulenkov/PaGMO | PyGMO/core/__init__.py | 1 | 16421 | # -*- coding: utf-8 -*-
from _core import *
import threading as _threading
import signal as _signal
import os as _os
__doc__ = 'PyGMO core module.'
__all__ = ['archipelago','base_island','champion','distribution_type','individual','ipy_island','island','local_island','migration_direction','population','py_island']
_orig_signal = _signal.getsignal(_signal.SIGINT)
_main_pid = _os.getpid()
# Alternative signal handler which ignores sigint if called from a child process.
def _sigint_handler(signum,frame):
import os
if os.getpid() == _main_pid:
_orig_signal(signum,frame)
_signal.signal(_signal.SIGINT,_sigint_handler)
# Global lock used when starting processes.
_process_lock = _threading.Lock()
# Raw C++ base island class.
_base_island = _core._base_island
class base_island(_core._base_island):
def __init__(self,*args):
if len(args) == 0:
raise ValueError("Cannot initialise base island without parameters for the constructor.")
_core._base_island.__init__(self,*args)
def get_name(self):
return str(type(self))
def __get_deepcopy__(self):
from copy import deepcopy
return deepcopy(self)
def _generic_island_ctor(self,*args,**kwargs):
"""Unnamed arguments:
#. algorithm
#. problem or population
#. number of individuals (optional and valid only if the second argument is a problem, defaults to 0 if not specified)
Keyword arguments:
* *migr_prob* -- migration probability (defaults to 1)
* *s_policy* -- migration selection policy (defaults to 'best selection' policy)
* *r_policy* -- migration replacement policy (defaults to 'fair replacement' policy)
"""
from PyGMO.algorithm._algorithm import _base as _base_algorithm
from PyGMO.algorithm import base as base_algorithm
from PyGMO.problem._problem import _base as _base_problem
from PyGMO.problem._problem import _base_stochastic as _base_problem_stochastic
from PyGMO.problem import base as base_problem
from PyGMO.problem import base_stochastic as base_problem_stochastic
from PyGMO.migration._migration import best_s_policy, fair_r_policy, _base_s_policy, _base_r_policy
if len(args) < 2 or len(args) > 3:
raise ValueError("Unnamed arguments list must have either 2 or three elements, but %d elements were found instead." % (len(args),))
if not isinstance(args[0],_base_algorithm):
raise TypeError("The first unnamed argument must be an algorithm.")
ctor_args = [args[0]]
if isinstance(args[1],_base_problem) or isinstance(args[1],_base_problem_stochastic):
ctor_args.append(args[1])
if len(args) == 3:
if not isinstance(args[2],int):
raise TypeError("Please provide an integer for the number of individuals in the island.")
ctor_args.append(args[2])
else:
ctor_args.append(0)
elif isinstance(args[1],population):
if len(args) == 3:
raise ValueError("When the second unnamed argument is a population, there cannot be a third unnamed argument.")
ctor_args.append(args[1])
else:
raise TypeError("The second unnamed argument must be either a problem or a population.")
if 'migr_prob' in kwargs:
ctor_args.append(kwargs['migr_prob'])
else:
ctor_args.append(1.)
if not isinstance(ctor_args[-1],float):
raise TypeError("Migration probability must be a float.")
if 's_policy' in kwargs:
ctor_args.append(kwargs['s_policy'])
else:
ctor_args.append(best_s_policy())
if not isinstance(ctor_args[-1],_base_s_policy):
raise TypeError("s_policy must be a migration selection policy.")
if 'r_policy' in kwargs:
ctor_args.append(kwargs['r_policy'])
else:
ctor_args.append(fair_r_policy())
if not isinstance(ctor_args[-1],_base_r_policy):
raise TypeError("r_policy must be a migration replacement policy.")
if isinstance(self,base_island):
super(type(self),self).__init__(*ctor_args)
elif isinstance(self,_base_island):
self.__original_init__(*ctor_args)
else:
assert(self is None)
n_pythonic_items = 0
if isinstance(args[0],base_algorithm):
n_pythonic_items += 1
if isinstance(args[1],base_problem) or isinstance(args[1],base_problem_stochastic):
n_pythonic_items += 1
elif isinstance(args[1],population) and (isinstance(args[1].problem,base_problem) or isinstance(args[1],base_problem_stochastic)):
n_pythonic_items += 1
if n_pythonic_items > 0:
return py_island(*args,**kwargs)
else:
return local_island(*args,**kwargs)
local_island.__original_init__ = local_island.__init__
local_island.__init__ = _generic_island_ctor
# This is the function that will be called by the separate process
# spawned from py_island.
def _process_target(q,a,p):
try:
tmp = a.evolve(p)
q.put(tmp)
except BaseException as e:
q.put(e)
class py_island(base_island):
"""Python island.
This island will launch evolutions using the multiprocessing module, available since Python 2.6.
Each evolution is transparently dispatched to a Python interpreter in a separate process.
"""
__init__ = _generic_island_ctor
def _perform_evolution(self,algo,pop):
try:
import multiprocessing as mp
q = mp.Queue()
# Apparently creating/starting processes is _not_ thread safe:
# http://bugs.python.org/issue1731717
# http://stackoverflow.com/questions/1359795/error-while-using-multiprocessing-module-in-a-python-daemon
# Protect with a global lock.
with _process_lock:
process = mp.Process(target = _process_target, args = (q,algo,pop))
process.start()
retval = q.get()
with _process_lock:
process.join()
if isinstance(retval,BaseException):
raise retval
return retval
except BaseException as e:
print('Exception caught during evolution:')
print(e)
raise RuntimeError()
def get_name(self):
return "Python multiprocessing island"
# This is the function that will be called by the task client
# in ipy_island.
def _maptask_target(a,p):
try:
return a.evolve(p)
except BaseException as e:
return e
class ipy_island(base_island):
"""Parallel IPython island.
This island will launch evolutions using IPython's MapTask interface. The evolution will be dispatched
to IPython engines that, depending on the configuration of IPython/ipcluster, can reside either on the
local machine or on other remote machines.
See: http://ipython.scipy.org/doc/stable/html/parallel/index.html
"""
# NOTE: when using an IPython island, on quitting IPython there might be a warning message
# reporting an exception being ignored. This seems to be a problem in the foolscap library:
# http://foolscap.lothar.com/trac/ticket/147
# Hopefully it will be fixed in the next versions of the library.
__init__ = _generic_island_ctor
def _perform_evolution(self,algo,pop):
try:
from IPython.kernel.client import TaskClient, MapTask
# Create task client.
tc = TaskClient()
# Create the task.
mt = MapTask(_maptask_target,args = (algo,pop))
# Run the task.
task_id = tc.run(mt)
# Get retval.
retval = tc.get_task_result(task_id,block = True)
if isinstance(retval,BaseException):
raise retval
return retval
except BaseException as e:
print('Exception caught during evolution:')
print(e)
raise RuntimeError()
def get_name(self):
return "Parallel IPython island"
def island(*args,**kwargs):
return _generic_island_ctor(None,*args,**kwargs)
island.__doc__ = '\n'.join(['Island factory function.\n\nThis function will return an instance of an island object\nbuilt according to the following rule: '+
'if the arguments include\neither a pythonic problem or a pythonic algorithm, then an instance\nof :class:`py_island` will be returned; '+
'otherwise, an instance of\n:class:`local_island` will be returned.'] + [s.replace('\t','') for s in _generic_island_ctor.__doc__.split('\n')])
del s
def _get_island_list():
from PyGMO import core
names = filter(lambda n: not n.startswith('_') and not n.startswith('base') and n.endswith('_island'),dir(core))
try:
from IPython.kernel.client import TaskClient, MapTask
except ImportError:
names = filter(lambda n: n != 'ipy_island',names)
return [core.__dict__[n] for n in names]
def _generic_archi_ctor(self,*args,**kwargs):
"""
Unnamed arguments (optional):
#. algorithm
#. problem
#. number of islands
#. number individual in the population
Keyword arguments:
* *topology* -- migration topology (defaults to unconnected)
* *distribution_type* -- distribution_type (defaults to distribution_type.point_to_point)
* *migration_direction* -- migration_direction (defaults to migration_direction.destination)
"""
from PyGMO import topology, algorithm,problem
from difflib import get_close_matches
if not((len(args)==4) or (len(args)==0)):
raise ValueError("Unnamed arguments list, when present, must be of length 4, but %d elements were found instead" % (len(args),))
#Append everything in the same list of constructor arguments
ctor_args = []
for i in args:
ctor_args.append(i)
#Pop all known keywords out of kwargs and add a default value if not provided
ctor_args.append(kwargs.pop('topology', topology.unconnected())) #unconnected is default
ctor_args.append(kwargs.pop('distribution_type', distribution_type.point_to_point)) #point-to-point is default
ctor_args.append(kwargs.pop('migration_direction', migration_direction.destination)) #destination is default
#Check for unknown keywords
kwlist = ['topology', 'distribution_type', 'migration_direction']
if kwargs:
s = "The following unknown keyworded argument was passed to the construtor: "
for kw in kwargs:
s += kw
spam = get_close_matches(kw, kwlist)
if spam:
s += " (Did you mean %s?), " % spam[0]
else:
s += ", "
raise ValueError(s[:-2])
#Constructs an empty archipelago with no islands using the C++ constructor
self.__original_init__(*ctor_args[-3:])
#We now push back the correct island type if required
if (len(args))==4:
if not isinstance(args[0],algorithm._base):
raise TypeError("The first unnamed argument must be an algorithm")
if not (isinstance(args[1],problem._base) or isinstance(args[1],problem._base_stochastic)):
raise TypeError("The second unnamed argument must be a problem")
if not isinstance(args[2],int):
raise TypeError("The third unnamed argument must be an integer (i.e. number of islands)")
if not isinstance(args[3],int):
raise TypeError("The fourth unnamed argument must be an integer (i.e. population size)")
for n in range(args[2]):
self.push_back(island(args[0],args[1],args[3]))
archipelago.__original_init__ = archipelago.__init__
archipelago.__init__ = _generic_archi_ctor
def _archipelago_draw(self, layout = 'spring', n_color = 'fitness', n_size = 15, n_alpha = 0.5, e_alpha = 0.1, e_arrows=False, scale_by_degree = False, cmap = 'default'):
"""
Draw a visualization of the archipelago using networkx.
USAGE: pos = archipelago.draw(layout = 'spring', color = 'fitness', n_size = 15, scale_by_degree = False, n_alpha = 0.5, e_alpha = 0.1, cmap = 'default', e_arrows=False)
* layout: Network layout. Can be 'spring' or 'circular' or a list of values pos returned
by a previous call of the method (so that positions of the islands can be kept fixed.
* n_color = Defines the color code for the nodes. Can be one of 'fitness', 'links', ... or the standard matplotlib 'blue' .. etc.
* n_size: The size of nodes. Becomes scaling factor when scale_by_degree=True.
* n_alpha: Transparency of nodes. Takes value between 0 and 1.
* e_arrows: Plots arrows on the edges for directed graphs
* e_elpha: Transparency of edges. Takes value between 0 and 1.
* scale_by_degree: When True, nodes will be sized proportional to their degree.
* cmap: color map. one in matplotlib.pyplot.cm
"""
try:
import networkx as nx
except ImportError:
raise ImportError('Could not import the networkx module.')
try:
import matplotlib.pyplot as pl
except ImportError:
raise ImportError('Could not improt the MatPlotLib module.')
#We set the graph in networkx
t = self.topology
G = t.to_networkx()
#We scale the node sizes
node_sizes = range(nx.number_of_nodes(G))
for i in range(nx.number_of_nodes(G)):
if scale_by_degree:
node_sizes[i] = nx.degree(G,i)*n_size
else:
node_sizes[i] = n_size
#We compute the layout
if layout == 'spring':
pos = nx.spring_layout(G)
elif layout == "circular":
pos = nx.circular_layout(G)
else:
pos = layout
#We compute the color_code
if n_color == 'fitness':
node_colors=[-isl.population.champion.f[0] for isl in self]
m = min(node_colors)
M = max(node_colors)
elif n_color == 'links':
m = min(node_colors)
M = max(node_colors)
node_colors=[t.get_num_adjacent_vertices(i) for i in range(len(self))]
elif n_color == 'rank':
vec = [-isl.population.champion.f[0] for isl in self]
node_colors=sorted(range(len(vec)), key=vec.__getitem__)
M = max(node_colors)
m= min(node_colors)
else:
node_colors=n_color
m=0;
M=0;
if not m==M:
node_colors=[(node_colors[i] - float(m))/(M-m) for i in range(len(self))]
#And we draw the archipelago .....
pl.figure()
if cmap == 'default':
cmap = pl.cm.Reds_r
nx.draw_networkx_nodes(G,pos,nodelist=range(len(self)), node_color=node_colors, cmap=cmap, node_size=node_sizes,alpha=n_alpha)
nx.draw_networkx_edges(G,pos,alpha=e_alpha,arrows=e_arrows)
pl.axis('off')
pl.show()
return pos
archipelago.draw = _archipelago_draw
def _pop_plot_pareto_fronts(pop, rgb=(0,0,0), comp = [0,1], symbol = 'o', size = 6):
"""
Plots the population pareto front in a 2-D graph
USAGE: pop.plot_pareto_front(comp = [0,1], rgb=(0,1,0))
* comp: components of the fitness function to plot in the 2-D window
* rgb: specify the color of the 1st front (use strong colors here)
* symbol: marker for the individual
* size: size of the markersymbol
"""
from numpy import linspace
import matplotlib.pyplot as plt
if len(comp) !=2:
raise ValueError('Invalid components of the objective function selected for plot')
p_dim = pop.problem.f_dimension
if p_dim == 1:
raise ValueError('Pareto fronts of a 1-dimensional problem cannot be plotted')
if not all([c in range(0, p_dim) for c in comp]):
raise ValueError('You need to select valid components of the objective function')
p_list = pop.compute_pareto_fronts()
cl = zip(linspace(0.9 if rgb[0] else 0.1,0.9, len(p_list)),
linspace(0.9 if rgb[1] else 0.1,0.9, len(p_list)),
linspace(0.9 if rgb[2] else 0.1,0.9, len(p_list)))
for id_f,f in enumerate(p_list):
for ind in f:
plt.plot([pop[ind].best_f[comp[0]]],[pop[ind].best_f[comp[1]]], symbol, color=cl[id_f], markersize=size)
x = [pop[ind].best_f[comp[0]] for ind in f]
y = [pop[ind].best_f[comp[1]] for ind in f]
tmp = [(a,b) for a,b in zip(x,y)]
tmp = sorted(tmp, key = lambda k:k[0])
plt.step([c[0] for c in tmp], [c[1] for c in tmp],color=cl[id_f],where='post')
plt.show()
population.plot_pareto_fronts = _pop_plot_pareto_fronts
def _pop_race(self, n_winners, min_trials = 0, max_feval = 500,
delta=0.05, racers_idx = [], race_best=True, screen_output=False):
"""
Races individuals in a population
USAGE: pop.race(n_winners, min_trials = 0, max_feval = 500, delta = 0.05, racers_idx = [], race_best=True, screen_output=False)
* n_winners: number of winners in the race
* min_trials: minimum amount of evaluations before an individual can stop racing
* max_feval: budget for objective function evaluation
* delta: Statistical test confidence
* racers_idx: indices of the individuals in pop to be raced
* race_best: when True winners are the best, otherwise winners are the worst
* screen_output: produces some screen output at each iteration of the race
"""
arg_list=[]
arg_list.append(n_winners)
arg_list.append(min_trials)
arg_list.append(max_feval)
arg_list.append(delta)
arg_list.append(racers_idx)
arg_list.append(race_best)
arg_list.append(screen_output)
return self._orig_race(*arg_list)
population._orig_race = population.race
population.race = _pop_race
def _pop_repair(self, idx, repair_algorithm):
"""
Repairs the individual at the given position
USAGE: pop.repair(idx, repair_algorithm = _algorithm.jde())
* idx: index of the individual to repair
repair_algorithm: optimizer to use as 'repairing' algorithm. It should be able to deal with population of size 1.
"""
arg_list=[]
arg_list.append(idx)
arg_list.append(repair_algorithm)
return self._orig_repair(*arg_list)
population._orig_repair = population.repair
population.repair = _pop_repair
| gpl-3.0 | 2,609,840,863,474,202,000 | 35.010965 | 170 | 0.709336 | false |
walterbender/portfolio | odf/odfmanifest.py | 1 | 3579 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
# This script lists the content of the manifest.xml file
import zipfile
from xml.sax import make_parser, handler
from xml.sax.xmlreader import InputSource
import xml.sax.saxutils
from cStringIO import StringIO
MANIFESTNS = "urn:oasis:names:tc:opendocument:xmlns:manifest:1.0"
#-----------------------------------------------------------------------------
#
# ODFMANIFESTHANDLER
#
#-----------------------------------------------------------------------------
class ODFManifestHandler(handler.ContentHandler):
""" The ODFManifestHandler parses a manifest file and produces a list of
content """
def __init__(self):
self.manifest = {}
# Tags
# FIXME: Also handle encryption data
self.elements = {
(MANIFESTNS, 'file-entry'): (self.s_file_entry, self.donothing),
}
def handle_starttag(self, tag, method, attrs):
method(tag, attrs)
def handle_endtag(self, tag, method):
method(tag)
def startElementNS(self, tag, qname, attrs):
method = self.elements.get(tag, (None, None))[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag, attrs)
def endElementNS(self, tag, qname):
method = self.elements.get(tag, (None, None))[1]
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def donothing(self, tag, attrs=None):
pass
def s_file_entry(self, tag, attrs):
m = attrs.get((MANIFESTNS, 'media-type'), "application/octet-stream")
p = attrs.get((MANIFESTNS, 'full-path'))
self.manifest[p] = {'media-type': m, 'full-path': p}
#-----------------------------------------------------------------------------
#
# Reading the file
#
#-----------------------------------------------------------------------------
def manifestlist(manifestxml):
odhandler = ODFManifestHandler()
parser = make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(odhandler)
parser.setErrorHandler(handler.ErrorHandler())
inpsrc = InputSource()
inpsrc.setByteStream(StringIO(manifestxml))
parser.parse(inpsrc)
return odhandler.manifest
def odfmanifest(odtfile):
z = zipfile.ZipFile(odtfile)
manifest = z.read('META-INF/manifest.xml')
z.close()
return manifestlist(manifest)
if __name__ == "__main__":
import sys
result = odfmanifest(sys.argv[1])
for file in result.values():
print "%-40s %-40s" % (file['media-type'], file['full-path'])
| gpl-3.0 | 8,217,278,869,497,445,000 | 29.581197 | 80 | 0.609558 | false |
keskitalo/healpy | doc/create_images.py | 3 | 1125 | import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
SIZE = 400
DPI = 60
m = np.arange(hp.nside2npix(32))
hp.mollview(m, nest=True, xsize=SIZE, title="Mollview image NESTED")
plt.savefig("static/moll_nside32_nest.png", dpi=DPI)
hp.mollview(m, nest=False, xsize=SIZE, title="Mollview image RING")
plt.savefig("static/moll_nside32_ring.png", dpi=DPI)
wmap_map_I = hp.read_map("../healpy/test/data/wmap_band_imap_r9_7yr_W_v4.fits")
hp.mollview(
wmap_map_I,
coord=["G", "E"],
title="Histogram equalized Ecliptic",
unit="mK",
norm="hist",
min=-1,
max=1,
xsize=SIZE,
)
hp.graticule()
plt.savefig("static/wmap_histeq_ecl.png", dpi=DPI)
mask = hp.read_map(
"../healpy/test/data/wmap_temperature_analysis_mask_r9_7yr_v4.fits"
).astype(np.bool)
wmap_map_I_masked = hp.ma(wmap_map_I)
wmap_map_I_masked.mask = np.logical_not(mask)
LMAX = 1024
cl = hp.anafast(wmap_map_I_masked.filled(), lmax=LMAX)
ell = np.arange(len(cl))
plt.figure()
plt.plot(ell, ell * (ell + 1) * cl)
plt.xlabel("ell")
plt.ylabel("ell(ell+1)cl")
plt.grid()
plt.savefig("static/wmap_powspec.png", dpi=DPI)
| gpl-2.0 | 2,684,837,210,397,966,300 | 24.568182 | 79 | 0.682667 | false |
dntt1/youtube-dl | youtube_dl/extractor/ustudio.py | 23 | 4391 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
unescapeHTML,
)
class UstudioIE(InfoExtractor):
IE_NAME = 'ustudio'
_VALID_URL = r'https?://(?:(?:www|v1)\.)?ustudio\.com/video/(?P<id>[^/]+)/(?P<display_id>[^/?#&]+)'
_TEST = {
'url': 'http://ustudio.com/video/Uxu2my9bgSph/san_francisco_golden_gate_bridge',
'md5': '58bbfca62125378742df01fc2abbdef6',
'info_dict': {
'id': 'Uxu2my9bgSph',
'display_id': 'san_francisco_golden_gate_bridge',
'ext': 'mp4',
'title': 'San Francisco: Golden Gate Bridge',
'description': 'md5:23925500697f2c6d4830e387ba51a9be',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20111107',
'uploader': 'Tony Farley',
}
}
def _real_extract(self, url):
video_id, display_id = re.match(self._VALID_URL, url).groups()
config = self._download_xml(
'http://v1.ustudio.com/embed/%s/ustudio/config.xml' % video_id,
display_id)
def extract(kind):
return [{
'url': unescapeHTML(item.attrib['url']),
'width': int_or_none(item.get('width')),
'height': int_or_none(item.get('height')),
} for item in config.findall('./qualities/quality/%s' % kind) if item.get('url')]
formats = extract('video')
self._sort_formats(formats)
webpage = self._download_webpage(url, display_id)
title = self._og_search_title(webpage)
upload_date = unified_strdate(self._search_regex(
r'(?s)Uploaded by\s*.+?\s*on\s*<span>([^<]+)</span>',
webpage, 'upload date', fatal=False))
uploader = self._search_regex(
r'Uploaded by\s*<a[^>]*>([^<]+)<',
webpage, 'uploader', fatal=False)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
'thumbnails': extract('image'),
'upload_date': upload_date,
'uploader': uploader,
'formats': formats,
}
class UstudioEmbedIE(InfoExtractor):
IE_NAME = 'ustudio:embed'
_VALID_URL = r'https?://(?:(?:app|embed)\.)?ustudio\.com/embed/(?P<uid>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://app.ustudio.com/embed/DeN7VdYRDKhP/Uw7G1kMCe65T',
'md5': '47c0be52a09b23a7f40de9469cec58f4',
'info_dict': {
'id': 'Uw7G1kMCe65T',
'ext': 'mp4',
'title': '5 Things IT Should Know About Video',
'description': 'md5:93d32650884b500115e158c5677d25ad',
'uploader_id': 'DeN7VdYRDKhP',
}
}
def _real_extract(self, url):
uploader_id, video_id = re.match(self._VALID_URL, url).groups()
video_data = self._download_json(
'http://app.ustudio.com/embed/%s/%s/config.json' % (uploader_id, video_id),
video_id)['videos'][0]
title = video_data['name']
formats = []
for ext, qualities in video_data.get('transcodes', {}).items():
for quality in qualities:
quality_url = quality.get('url')
if not quality_url:
continue
height = int_or_none(quality.get('height'))
formats.append({
'format_id': '%s-%dp' % (ext, height) if height else ext,
'url': quality_url,
'width': int_or_none(quality.get('width')),
'height': height,
})
self._sort_formats(formats)
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('url')
if not image_url:
continue
thumbnails.append({
'url': image_url,
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': uploader_id,
'tags': video_data.get('keywords'),
'thumbnails': thumbnails,
'formats': formats,
}
| unlicense | -4,789,828,634,196,561,000 | 34.128 | 103 | 0.513778 | false |
thefab/cpu_cores | cpu_cores/linux.py | 1 | 2219 | # This file is part of cpu_cores released under the MIT license.
# See the LICENSE file for more information.
from cpu_cores.common import CPUCoresCounter
CPUINFO_FILEPATH = "/proc/cpuinfo"
def _core_hash(cpu_infos):
if 'core id' not in cpu_infos and 'physical id' not in cpu_infos:
return "%i" % cpu_infos['processor']
if 'core id' in cpu_infos and 'physical id' not in cpu_infos:
raise Exception("incorrect cpuinfo file :"
" we have a core_id without physical_id")
if 'core id' in cpu_infos:
return "%i_%i" % (cpu_infos['physical id'], cpu_infos['core id'])
else:
return "%i" % cpu_infos['physical id']
def _processor_hash(cpu_infos):
if 'core id' not in cpu_infos and 'physical id' not in cpu_infos:
return "%i" % cpu_infos['processor']
if 'core id' in cpu_infos and 'physical id' not in cpu_infos:
raise Exception("incorrect cpuinfo file :"
" we have a core_id without physical_id")
return "%i" % cpu_infos['physical id']
class LinuxCPUCoresCounter(CPUCoresCounter):
def _count(self, cpuinfo_filepath=None):
if cpuinfo_filepath is None:
cpuinfo_filepath = CPUINFO_FILEPATH
with open(cpuinfo_filepath, 'r') as f:
# we read lines in reversed order to be sure to end with a
# "processor:" line
lines = reversed(f.readlines())
cores = set()
processors = set()
cpu_infos = {}
for line in lines:
tmp = line.strip()
for key in ('processor', 'physical id', 'core id'):
if tmp.startswith(key):
cpu_infos[key] = int(tmp.split(':')[1].strip())
if key == 'processor':
cores.add(_core_hash(cpu_infos))
processors.add(_processor_hash(cpu_infos))
cpu_infos = {}
if len(cores) == 0 or len(processors) == 0:
raise Exception("can't get the cpu cores count (linux)")
self._physical_cores_count = len(cores)
self._physical_processors_count = len(processors)
| mit | 2,211,263,102,269,658,000 | 40.092593 | 73 | 0.561965 | false |
mmnelemane/nova | nova/compute/monitors/__init__.py | 18 | 3283 | # Copyright 2013 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Resource monitor API specification.
"""
from oslo_config import cfg
from oslo_log import log as logging
from stevedore import enabled
from nova.i18n import _LW
compute_monitors_opts = [
cfg.MultiStrOpt('compute_available_monitors',
deprecated_for_removal=True,
default=None,
help='Monitor classes available to the compute which may '
'be specified more than once. This option is '
'DEPRECATED and no longer used. Use setuptools entry '
'points to list available monitor plugins.'),
cfg.ListOpt('compute_monitors',
default=[],
help='A list of monitors that can be used for getting '
'compute metrics. You can use the alias/name from '
'the setuptools entry points for nova.compute.monitors.* '
'namespaces.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_monitors_opts)
LOG = logging.getLogger(__name__)
class MonitorHandler(object):
def __init__(self, resource_tracker):
self.cpu_monitor_loaded = False
ns = 'nova.compute.monitors.cpu'
cpu_plugin_mgr = enabled.EnabledExtensionManager(
namespace=ns,
invoke_on_load=True,
check_func=self.check_enabled_cpu_monitor,
invoke_args=(resource_tracker,)
)
self.monitors = [obj.obj for obj in cpu_plugin_mgr]
def check_enabled_cpu_monitor(self, ext):
if self.cpu_monitor_loaded is not False:
msg = _LW("Excluding CPU monitor %(monitor_name)s. Already "
"loaded %(loaded_cpu_monitor)s.")
msg = msg % {
'monitor_name': ext.name,
'loaded_cpu_monitor': self.cpu_monitor_loaded
}
LOG.warn(msg)
return False
# TODO(jaypipes): Right now, we only have CPU monitors, so we don't
# need to check if the plugin is a CPU monitor or not. Once non-CPU
# monitors are added, change this to check either the base class or
# the set of metric names returned to ensure only a single CPU
# monitor is loaded at any one time.
if ext.name in CONF.compute_monitors:
self.cpu_monitor_loaded = ext.name
return True
msg = _LW("Excluding CPU monitor %(monitor_name)s. Not in the "
"list of enabled monitors (CONF.compute_monitors).")
msg = msg % {
'monitor_name': ext.name,
}
LOG.warn(msg)
return False
| apache-2.0 | -4,939,788,704,445,977,000 | 37.623529 | 79 | 0.604021 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.