repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
lotusk/tushare
|
tushare/futures/domestic_cons.py
|
2
|
5300
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2017年06月04日
@author: debugo
@contact: [email protected]
'''
import re
import datetime
CFFEX_DAILY_URL = 'http://www.cffex.com.cn/fzjy/mrhq/%s/%s/%s_1.csv'
SHFE_DAILY_URL = 'http://www.shfe.com.cn/data/dailydata/kx/kx%s.dat'
SHFE_VWAP_URL = 'http://www.shfe.com.cn/data/dailydata/ck/%sdailyTimePrice.dat'
DCE_DAILY_URL = 'http://www.dce.com.cn//publicweb/quotesdata/dayQuotesCh.html'
CZCE_DAILY_URL = 'http://www.czce.com.cn/portal/DFSStaticFiles/Future/%s/%s/FutureDataDaily.txt'
CZCE_OPTION_URL = 'http://www.czce.com.cn/portal/DFSStaticFiles/Option/%s/%s/OptionDataDaily.txt'
CFFEX_COLUMNS = ['open','high','low','volume','turnover','open_interest','close','settle','change1','change2']
CZCE_COLUMNS = ['pre_settle','open','high','low','close','settle','change1','change2','volume','open_interest','oi_chg','turnover','final_settle']
CZCE_OPTION_COLUMNS = ['pre_settle', 'open', 'high', 'low', 'close', 'settle', 'change1', 'change2', 'volume', 'open_interest', 'oi_chg', 'turnover', 'delta', 'implied_volatility', 'exercise_volume']
SHFE_COLUMNS = {'CLOSEPRICE': 'close', 'HIGHESTPRICE': 'high', 'LOWESTPRICE': 'low', 'OPENINTEREST': 'open_interest', 'OPENPRICE': 'open', 'PRESETTLEMENTPRICE': 'pre_settle', 'SETTLEMENTPRICE': 'settle', 'VOLUME': 'volume'}
SHFE_VWAP_COLUMNS = {':B1': 'date', 'INSTRUMENTID': 'symbol', 'TIME': 'time_range', 'REFSETTLEMENTPRICE': 'vwap'}
DCE_COLUMNS = ['open', 'high', 'low', 'close', 'pre_settle', 'settle', 'change1','change2','volume','open_interest','oi_chg','turnover']
DCE_OPTION_COLUMNS = ['open', 'high', 'low', 'close', 'pre_settle', 'settle', 'change1', 'change2', 'delta', 'volume', 'open_interest', 'oi_chg', 'turnover', 'exercise_volume']
OUTPUT_COLUMNS = ['symbol', 'date', 'open', 'high', 'low', 'close', 'volume', 'open_interest', 'turnover', 'settle', 'pre_settle', 'variety']
OPTION_OUTPUT_COLUMNS = ['symbol', 'date', 'open', 'high', 'low', 'close', 'pre_settle', 'settle', 'delta', 'volume', 'open_interest', 'oi_chg', 'turnover', 'implied_volatility', 'exercise_volume', 'variety']
CLOSE_LOC = 5
PRE_SETTLE_LOC = 11
FUTURE_SYMBOL_PATTERN = re.compile(r'(^[A-Za-z]{1,2})[0-9]+')
DATE_PATTERN = re.compile(r'^([0-9]{4})[-/]?([0-9]{2})[-/]?([0-9]{2})')
SIM_HAEDERS = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
DCE_HEADERS = {
'cache-control': "no-cache",
'postman-token': "153f42ca-148a-8f03-3302-8172cc4a5185"
}
def convert_date(date):
"""
transform a date string to datetime.date object.
:param day, string, e.g. 2016-01-01, 20160101 or 2016/01/01
:return: object of datetime.date(such as 2016-01-01) or None
"""
if isinstance(date, datetime.date):
return date
elif isinstance(date, str):
match = DATE_PATTERN.match(date)
if match:
groups = match.groups()
if len(groups) == 3:
return datetime.date(year=int(groups[0]), month=int(groups[1]), day=int(groups[2]))
return None
DCE_MAP = {
'豆一': 'A',
'豆二': 'B',
'豆粕': 'M',
'豆油': 'Y',
'棕榈油': 'P',
'玉米': 'C',
'玉米淀粉': 'CS',
'鸡蛋': 'JD',
'纤维板': 'FB',
'胶合板': 'BB',
'聚乙烯': 'L',
'聚氯乙烯': 'V',
'聚丙烯': 'PP',
'焦炭': 'J',
'焦煤': 'JM',
'铁矿石': 'I'
}
FUTURE_CODE={
'IH': ('CFFEX', '上证50指数', 300),
'IF': ('CFFEX', '沪深300指数', 300),
'IC': ('CFFEX', '中证500指数', 200),
'T': ('CFFEX', '10年期国债期货', 10000),
'TF': ('CFFEX', '5年期国债期货', 10000),
'CU': ('SHFE', '沪铜' ,5),
'AL': ('SHFE', '沪铝', 5),
'ZN': ('SHFE', '沪锌', 5),
'PB': ('SHFE', '沪铅', 5),
'NI': ('SHFE', '沪镍', 1),
'SN': ('SHFE', '沪锡', 1),
'AU': ('SHFE', '沪金', 1000),
'AG': ('SHFE', '沪银', 15),
'RB': ('SHFE', '螺纹钢', 10),
'WR': ('SHFE', '线材', 10),
'HC': ('SHFE', '热轧卷板', 10),
'FU': ('SHFE', '燃油', 50),
'BU': ('SHFE', '沥青', 10),
'RU': ('SHFE', '橡胶', 10),
'A': ('DCE', '豆一', 10),
'B': ('DCE', '豆二', 10),
'M': ('DCE', '豆粕', 10),
'Y': ('DCE', '豆油', 10),
'P': ('DCE', '棕榈油', 10),
'C': ('DCE', '玉米', 10),
'CS': ('DCE', '玉米淀粉', 10),
'JD': ('DCE', '鸡蛋', 5),
'FB': ('DCE', '纤维板', 500),
'BB': ('DCE', '胶合板', 500),
'L': ('DCE', '聚乙烯', 5),
'V': ('DCE', '聚氯乙烯', 5),
'PP': ('DCE', '聚丙烯', 5),
'J': ('DCE', '焦炭', 100),
'JM': ('DCE', '焦煤', 60),
'I': ('DCE', '铁矿石', 100),
'SR': ('CZCE', '白糖', 10),
'CF': ('CZCE', '棉花',5),
'PM': ('CZCE', '普麦',50),
'WH': ('CZCE', '强麦',20),
'OI': ('CZCE', '菜籽油',10),
'PTA': ('CZCE', 'PTA', 0),
'RI': ('CZCE', '早籼稻',20),
'LR': ('CZCE', '晚籼稻',20),
'MA': ('CZCE', '甲醇', 10),
'FG': ('CZCE', '玻璃', 20),
'RS': ('CZCE', '油菜籽', 10),
'RM': ('CZCE', '籽粕', 10),
'TC': ('CZCE', '动力煤', 200),
'ZC': ('CZCE', '动力煤', 100),
'JR': ('CZCE', '粳稻', 20),
'SF': ('CZCE', '硅铁', 5),
'SM': ('CZCE', '锰硅', 5)
}
|
bsd-3-clause
|
hotzenklotz/pybeerxml
|
pybeerxml/yeast.py
|
1
|
1747
|
from typing import Optional, Text, Any
from pybeerxml.utils import cast_to_bool
class Yeast:
def __init__(self):
self.name: Optional[Text] = None
self.version: Optional[int] = None
self.type: Optional[Text] = None
self.form: Optional[Text] = None # May be "Liquid", "Dry", "Slant" or "Culture"
self.attenuation: Optional[float] = None # Percent
self.notes: Optional[Text] = None
self.laboratory: Optional[Text] = None
self.product_id: Optional[Text] = None
self.flocculation: Optional[
Text
] = None # May be "Low", "Medium", "High" or "Very High"
self.amount: Optional[float] = None
self._amount_is_weight: Optional[bool] = None
self.min_temperature: Optional[float] = None
self.max_temperature: Optional[float] = None
self.best_for: Optional[Text] = None
self.times_cultured: Optional[int] = None
self.max_reuse: Optional[int] = None
self._add_to_secondary: Optional[bool] = None
self.inventory: Optional[Text] = None
self.culture_date: Optional[Text] = None
@property
def amount_is_weight(self) -> Optional[bool]:
if self._amount_is_weight is not None:
return self._amount_is_weight
return None
@amount_is_weight.setter
def amount_is_weight(self, value: Any):
self._amount_is_weight = cast_to_bool(value)
@property
def add_to_secondary(self) -> Optional[bool]:
if self._add_to_secondary is not None:
return self._add_to_secondary
return None
@add_to_secondary.setter
def add_to_secondary(self, value: Any):
self._add_to_secondary = cast_to_bool(value)
|
mit
|
orekyuu/intellij-community
|
python/lib/Lib/whichdb.py
|
240
|
3353
|
# !/usr/bin/env python
"""Guess which db package to use to open a db file."""
import os
import struct
import sys
try:
import dbm
_dbmerror = dbm.error
except ImportError:
dbm = None
# just some sort of valid exception which might be raised in the
# dbm test
_dbmerror = IOError
def whichdb(filename):
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the module name (e.g. "dbm" or "gdbm") if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
"""
# Check for dbm first -- this has a .pag and a .dir file
try:
f = open(filename + os.extsep + "pag", "rb")
f.close()
# dbm linked with gdbm on OS/2 doesn't have .dir file
if not (dbm.library == "GNU gdbm" and sys.platform == "os2emx"):
f = open(filename + os.extsep + "dir", "rb")
f.close()
return "dbm"
except IOError:
# some dbm emulations based on Berkeley DB generate a .db file
# some do not, but they should be caught by the dbhash checks
try:
f = open(filename + os.extsep + "db", "rb")
f.close()
# guarantee we can actually open the file using dbm
# kind of overkill, but since we are dealing with emulations
# it seems like a prudent step
if dbm is not None:
d = dbm.open(filename)
d.close()
return "dbm"
except (IOError, _dbmerror):
pass
# Check for dumbdbm next -- this has a .dir and a .dat file
try:
# First check for presence of files
os.stat(filename + os.extsep + "dat")
size = os.stat(filename + os.extsep + "dir").st_size
# dumbdbm files with no keys are empty
if size == 0:
return "dumbdbm"
f = open(filename + os.extsep + "dir", "rb")
try:
if f.read(1) in ("'", '"'):
return "dumbdbm"
finally:
f.close()
except (OSError, IOError):
pass
# See if the file exists, return None if not
try:
f = open(filename, "rb")
except IOError:
return None
# Read the start of the file -- the magic number
s16 = f.read(16)
f.close()
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
(magic,) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic == 0x13579ace:
return "gdbm"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
if __name__ == "__main__":
for filename in sys.argv[1:]:
print whichdb(filename) or "UNKNOWN", filename
|
apache-2.0
|
cyberark-bizdev/ansible
|
lib/ansible/modules/cloud/amazon/efs.py
|
5
|
24222
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: efs
short_description: create and maintain EFS file systems
description:
- Module allows create, search and destroy Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
- "Artem Kazakov (@akazakov)"
options:
encrypt:
description:
- A boolean value that, if true, creates an encrypted file system. This can not be modfied after the file
system is created.
required: false
default: false
choices: ['yes', 'no']
version_added: 2.5
kms_key_id:
description:
- The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only
required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for
Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN.
required: false
version_added: 2.5
purge_tags:
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter
is not set then tags will not be modified.
required: false
default: yes
choices: [ 'yes', 'no' ]
version_added: 2.5
state:
description:
- Allows to create, search and destroy Amazon EFS file system
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Creation Token of Amazon EFS file system. Required for create and update. Either name or ID required for delete.
required: false
default: None
id:
description:
- ID of Amazon EFS. Either name or ID required for delete.
required: false
default: None
performance_mode:
description:
- File system's performance mode to use. Only takes effect during creation.
required: false
default: 'general_purpose'
choices: ['general_purpose', 'max_io']
tags:
description:
- "List of tags of Amazon EFS. Should be defined as dictionary
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
This data may be modified for existing EFS using state 'present' and new list of mount targets."
required: false
default: None
wait:
description:
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
required: false
default: "no"
choices: ["yes", "no"]
wait_timeout:
description:
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
required: false
default: 0
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# EFS provisioning
- efs:
state: present
name: myTestEFS
tags:
name: myTestNameTag
purpose: file-storage
targets:
- subnet_id: subnet-748c5d03
security_groups: [ "sg-1a2b3c4d" ]
# Modifying EFS data
- efs:
state: present
name: myTestEFS
tags:
name: myAnotherTestTag
targets:
- subnet_id: subnet-7654fdca
security_groups: [ "sg-4c5d6f7a" ]
# Deleting EFS
- efs:
state: absent
name: myTestEFS
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: string
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: string
sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
file_system_id:
description: ID of the file system
returned: always
type: string
sample: "fs-xxxxxxxx"
life_cycle_state:
description: state of the EFS file system
returned: always
type: string
sample: "creating, available, deleting, deleted"
mount_point:
description: url of file system
returned: always
type: string
sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: string
sample: "my-efs"
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: string
sample: "XXXXXXXXXXXX"
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: string
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from time import time as timestamp
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError as e:
pass # Taken care of by ec2.HAS_BOTO3
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info, ansible_dict_to_boto3_tag_list,
compare_aws_tags, boto3_tag_list_to_ansible_dict)
def _index_by_key(key, items):
return dict((item[key], item) for item in items)
class EFSConnection(object):
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
self.region = region
self.wait = module.params.get('wait')
self.wait_timeout = module.params.get('wait_timeout')
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['Name'] = item['CreationToken']
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = self.connection.describe_tags(**kwargs)['Tags']
return tags
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def get_file_system_id(self, name):
"""
Returns ID of instance by instance name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name
))
return info and info['FileSystemId'] or None
def get_file_system_state(self, name, file_system_id=None):
"""
Returns state of filesystem by EFS id/name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name,
FileSystemId=file_system_id
))
return info and info['LifeCycleState'] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
FileSystemId=file_system_id
)
if states:
if not isinstance(states, list):
states = [states]
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
return list(targets)
def create_file_system(self, name, performance_mode, encrypt, kms_key_id):
"""
Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
params = {}
params['CreationToken'] = name
params['PerformanceMode'] = performance_mode
if encrypt:
params['Encrypted'] = encrypt
if kms_key_id is not None:
params['KmsKeyId'] = kms_key_id
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED
)
try:
self.connection.create_file_system(**params)
changed = True
except ClientError as e:
self.module.fail_json(msg="Unable to create file system: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to create file system: {0}".format(to_native(e)),
exception=traceback.format_exc())
# we always wait for the state to be available when creating.
# if we try to take any actions on the file system before it's available
# we'll throw errors
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
self.wait_timeout
)
return changed
def converge_file_system(self, name, tags, purge_tags, targets):
"""
Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
if tags_to_delete:
try:
self.connection.delete_tags(
FileSystemId=fs_id,
TagKeys=tags_to_delete
)
except ClientError as e:
self.module.fail_json(msg="Unable to delete tags: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to delete tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
result = True
if tags_need_modify:
try:
self.connection.create_tags(
FileSystemId=fs_id,
Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
)
except ClientError as e:
self.module.fail_json(msg="Unable to create tags: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to create tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
result = True
if targets is not None:
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
targets = _index_by_key('SubnetId', targets)
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
targets, True)
# To modify mount target it should be deleted and created again
changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
current_targets[sid], targets[sid])]
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
self.connection.delete_mount_target(
MountTargetId=current_targets[sid]['MountTargetId']
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
result = True
if targets_to_create:
for sid in targets_to_create:
self.connection.create_mount_target(
FileSystemId=fs_id,
**targets[sid]
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
self.wait_timeout
)
result = True
# If no security groups were passed into the module, then do not change it.
security_groups_to_update = [sid for sid in intersection if
'SecurityGroups' in targets[sid] and
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
MountTargetId=current_targets[sid]['MountTargetId'],
SecurityGroups=targets[sid].get('SecurityGroups', None)
)
result = True
return result
def delete_file_system(self, name, file_system_id=None):
"""
Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
self.delete_mount_targets(file_system_id)
self.connection.delete_file_system(FileSystemId=file_system_id)
result = True
if self.wait:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
self.wait_timeout
)
return result
def delete_mount_targets(self, file_system_id):
"""
Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
0
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
0
)
return len(targets) > 0
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
else:
raise
def targets_equal(keys, a, b):
"""
Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
return False
return True
def dict_diff(dict1, dict2, by_key=False):
"""
Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
intersection = keys1 & keys2
return keys2 ^ intersection, intersection, keys1 ^ intersection
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
else:
sleep(5)
continue
break
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
encrypt=dict(required=False, type="bool", default=False),
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
kms_key_id=dict(required=False, type='str', default=None),
purge_tags=dict(default=True, type='bool'),
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[]),
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
wait=dict(required=False, type="bool", default=False),
wait_timeout=dict(required=False, type="int", default=0)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
target_translations = {
'ip_address': 'IpAddress',
'security_groups': 'SecurityGroups',
'subnet_id': 'SubnetId'
}
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
performance_mode_translations = {
'general_purpose': 'generalPurpose',
'max_io': 'maxIO'
}
encrypt = module.params.get('encrypt')
kms_key_id = module.params.get('kms_key_id')
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
purge_tags = module.params.get('purge_tags')
changed = False
state = str(module.params.get('state')).lower()
if state == 'present':
if not name:
module.fail_json(msg='Name parameter is required for create')
changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id)
changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets) or changed
result = first_or_default(connection.get_file_systems(CreationToken=name))
elif state == 'absent':
if not name and not fs_id:
module.fail_json(msg='Either name or id parameter is required for delete')
changed = connection.delete_file_system(name, fs_id)
result = None
if result:
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=changed, efs=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
schober2/tc359_helper_methods
|
path/ruby/2.0.0/gems/nokogiri-1.6.6.2/ext/nokogiri/tmp/x86_64-apple-darwin14/ports/libxml2/2.9.2/libxml2-2.9.2/python/tests/build.py
|
37
|
1551
|
#!/usr/bin/python -u
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
doc = libxml2.newDoc("1.0")
comment = doc.newDocComment("This is a generated document")
doc.addChild(comment)
pi = libxml2.newPI("test", "PI content")
doc.addChild(pi)
root = doc.newChild(None, "doc", None)
ns = root.newNs("http://example.com/doc", "my")
root.setNs(ns)
elem = root.newChild(None, "foo", "bar")
elem.setBase("http://example.com/imgs")
elem.setProp("img", "image.gif")
doc.saveFile("tmp.xml")
doc.freeDoc()
doc = libxml2.parseFile("tmp.xml")
comment = doc.children
if comment.type != "comment" or \
comment.content != "This is a generated document":
print("error rereading comment")
sys.exit(1)
pi = comment.next
if pi.type != "pi" or pi.name != "test" or pi.content != "PI content":
print("error rereading PI")
sys.exit(1)
root = pi.next
if root.name != "doc":
print("error rereading root")
sys.exit(1)
ns = root.ns()
if ns.name != "my" or ns.content != "http://example.com/doc":
print("error rereading namespace")
sys.exit(1)
elem = root.children
if elem.name != "foo":
print("error rereading elem")
sys.exit(1)
if elem.getBase(None) != "http://example.com/imgs":
print("error rereading base")
sys.exit(1)
if elem.prop("img") != "image.gif":
print("error rereading property")
sys.exit(1)
doc.freeDoc()
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
mit
|
nivertech/peru
|
peru/resources/plugins/hg/hg_plugin.py
|
4
|
3238
|
#! /usr/bin/env python3
import os
import shutil
import subprocess
import textwrap
CACHE_PATH = os.environ['PERU_PLUGIN_CACHE']
URL = os.environ['PERU_MODULE_URL']
REV = os.environ['PERU_MODULE_REV'] or 'default'
REUP = os.environ['PERU_MODULE_REUP'] or 'default'
def hg(*args, hg_dir=None, capture_output=False):
# Avoid forgetting this arg.
assert hg_dir is None or os.path.isdir(hg_dir)
command = ['hg']
if hg_dir:
command.append('--repository')
command.append(hg_dir)
command.extend(args)
stdout = subprocess.PIPE if capture_output else None
# Always let stderr print to the caller.
process = subprocess.Popen(command, stdin=subprocess.DEVNULL,
stdout=stdout, universal_newlines=True)
output, _ = process.communicate()
if process.returncode != 0:
raise RuntimeError(
'Command exited with error code {0}:\n$ {1}\n{2}'.format(
process.returncode,
' '.join(command),
output))
return output
def clone_if_needed(url, verbose=False):
if not os.path.exists(os.path.join(CACHE_PATH, '.hg')):
try:
if verbose:
print('hg clone', url)
hg('clone', '--noupdate', url, CACHE_PATH)
except:
# Delete the whole thing if the clone failed to avoid confusing the
# cache.
shutil.rmtree(CACHE_PATH)
raise
configure(CACHE_PATH)
def configure(repo_path):
# Set configs needed for cached repos.
hgrc_path = os.path.join(repo_path, '.hg', 'hgrc')
with open(hgrc_path, 'a') as f:
f.write(textwrap.dedent('''\
[ui]
# prevent 'hg archive' from creating '.hg_archival.txt' files.
archivemeta = false
'''))
def hg_pull(url, repo_path):
print('hg pull', url)
hg('pull', hg_dir=repo_path)
def already_has_rev(repo, rev):
try:
output = hg('identify', '--debug', '--rev', rev, hg_dir=repo,
capture_output=True)
except:
return False
# Only return True for revs that are absolute hashes.
# We could consider treating tags the way, but...
# 1) Tags actually can change.
# 2) It's not clear at a glance whether something is a branch or a hash.
# Keep it simple.
return output.split()[0] == rev
def plugin_sync():
dest = os.environ['PERU_SYNC_DEST']
clone_if_needed(URL, verbose=True)
if not already_has_rev(CACHE_PATH, REV):
hg_pull(URL, CACHE_PATH)
# TODO: Should this handle subrepos?
hg('archive', '--type', 'files', '--rev', REV, dest, hg_dir=CACHE_PATH)
def plugin_reup():
reup_output = os.environ['PERU_REUP_OUTPUT']
clone_if_needed(URL, CACHE_PATH)
hg_pull(URL, CACHE_PATH)
output = hg('identify', '--debug', '--rev', REUP, hg_dir=CACHE_PATH,
capture_output=True)
with open(reup_output, 'w') as output_file:
print('rev:', output.split()[0], file=output_file)
command = os.environ['PERU_PLUGIN_COMMAND']
if command == 'sync':
plugin_sync()
elif command == 'reup':
plugin_reup()
else:
raise RuntimeError('Unknown command: ' + repr(command))
|
mit
|
bilalliberty/android_kernel_htc_K2UL
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
11088
|
3246
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
10clouds/edx-platform
|
lms/djangoapps/discussion_api/tests/test_api.py
|
4
|
126488
|
"""
Tests for Discussion API internal interface
"""
from datetime import datetime, timedelta
import itertools
from urlparse import parse_qs, urlparse, urlunparse
from urllib import urlencode
import ddt
import httpretty
import mock
from nose.plugins.attrib import attr
from pytz import UTC
from django.core.exceptions import ValidationError
from django.test.client import RequestFactory
from rest_framework.exceptions import PermissionDenied
from opaque_keys.edx.locator import CourseLocator
from common.test.utils import MockSignalHandlerMixin, disable_signal
from courseware.tests.factories import BetaTesterFactory, StaffFactory
from discussion_api import api
from discussion_api.api import (
create_comment,
create_thread,
delete_comment,
delete_thread,
get_comment_list,
get_course,
get_course_topics,
get_thread_list,
update_comment,
update_thread,
get_thread,
)
from discussion_api.exceptions import DiscussionDisabledError, ThreadNotFoundError, CommentNotFoundError
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_comment,
make_minimal_cs_thread,
make_paginated_api_response
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.lib.exceptions import CourseNotFoundError, PageNotFoundError
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
def _remove_discussion_tab(course, user_id):
"""
Remove the discussion tab for the course.
user_id is passed to the modulestore as the editor of the module.
"""
course.tabs = [tab for tab in course.tabs if not tab.type == 'discussion']
modulestore().update_item(course, user_id)
def _discussion_disabled_course_for(user):
"""
Create and return a course with discussions disabled.
The user passed in will be enrolled in the course.
"""
course_with_disabled_forums = CourseFactory.create()
CourseEnrollmentFactory.create(user=user, course_id=course_with_disabled_forums.id)
_remove_discussion_tab(course_with_disabled_forums, user.id)
return course_with_disabled_forums
@attr('shard_2')
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTest(UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_course"""
@classmethod
def setUpClass(cls):
super(GetCourseTest, cls).setUpClass()
cls.course = CourseFactory.create(org="x", course="y", run="z")
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTest, self).setUp()
self.user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
get_course(self.request, self.course.id)
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
get_course(self.request, _discussion_disabled_course_for(self.user).id)
def test_basic(self):
self.assertEqual(
get_course(self.request, self.course.id),
{
"id": unicode(self.course.id),
"blackouts": [],
"thread_list_url": "http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz",
"following_thread_list_url": (
"http://testserver/api/discussion/v1/threads/?course_id=x%2Fy%2Fz&following=True"
),
"topics_url": "http://testserver/api/discussion/v1/course_topics/x/y/z",
}
)
def test_blackout(self):
# A variety of formats is accepted
self.course.discussion_blackouts = [
["2015-06-09T00:00:00Z", "6-10-15"],
[1433980800000, datetime(2015, 6, 12)],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(
result["blackouts"],
[
{"start": "2015-06-09T00:00:00+00:00", "end": "2015-06-10T00:00:00+00:00"},
{"start": "2015-06-11T00:00:00+00:00", "end": "2015-06-12T00:00:00+00:00"},
]
)
@ddt.data(None, "not a datetime", "2015", [])
def test_blackout_errors(self, bad_value):
self.course.discussion_blackouts = [
[bad_value, "2015-06-09T00:00:00Z"],
["2015-06-10T00:00:00Z", "2015-06-11T00:00:00Z"],
]
modulestore().update_item(self.course, self.user.id)
result = get_course(self.request, self.course.id)
self.assertEqual(result["blackouts"], [])
@attr('shard_2')
@mock.patch.dict("django.conf.settings.FEATURES", {"DISABLE_START_DATES": False})
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCourseTopicsTest(UrlResetMixin, ModuleStoreTestCase):
"""Test for get_course_topics"""
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCourseTopicsTest, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.partition = UserPartition(
0,
"partition",
"Test Partition",
[Group(0, "Cohort A"), Group(1, "Cohort B")],
scheme_id="cohort"
)
self.course = CourseFactory.create(
org="x",
course="y",
run="z",
start=datetime.now(UTC),
discussion_topics={"Test Topic": {"id": "non-courseware-topic-id"}},
user_partitions=[self.partition],
cohort_config={"cohorted": True},
days_early_for_beta=3
)
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def make_discussion_module(self, topic_id, category, subcategory, **kwargs):
"""Build a discussion module in self.course"""
ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id=topic_id,
discussion_category=category,
discussion_target=subcategory,
**kwargs
)
def get_thread_list_url(self, topic_id_list):
"""
Returns the URL for the thread_list_url field, given a list of topic_ids
"""
path = "http://testserver/api/discussion/v1/threads/"
query_list = [("course_id", unicode(self.course.id))] + [("topic_id", topic_id) for topic_id in topic_id_list]
return urlunparse(("", "", path, "", urlencode(query_list), ""))
def get_course_topics(self):
"""
Get course topics for self.course, using the given user or self.user if
not provided, and generating absolute URIs with a test scheme/host.
"""
return get_course_topics(self.request, self.course.id)
def make_expected_tree(self, topic_id, name, children=None):
"""
Build an expected result tree given a topic id, display name, and
children
"""
topic_id_list = [topic_id] if topic_id else [child["id"] for child in children]
children = children or []
node = {
"id": topic_id,
"name": name,
"children": children,
"thread_list_url": self.get_thread_list_url(topic_id_list)
}
return node
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_course_topics(self.request, CourseLocator.from_string("non/existent/course"))
def test_not_enrolled(self):
unenrolled_user = UserFactory.create()
self.request.user = unenrolled_user
with self.assertRaises(CourseNotFoundError):
self.get_course_topics()
def test_discussions_disabled(self):
_remove_discussion_tab(self.course, self.user.id)
with self.assertRaises(DiscussionDisabledError):
self.get_course_topics()
def test_without_courseware(self):
actual = self.get_course_topics()
expected = {
"courseware_topics": [],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_with_courseware(self):
self.make_discussion_module("courseware-topic-id", "Foo", "Bar")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"Foo",
[self.make_expected_tree("courseware-topic-id", "Bar")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic")
],
}
self.assertEqual(actual, expected)
def test_many(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"A": {"id": "non-courseware-1"},
"B": {"id": "non-courseware-2"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "A", "1")
self.make_discussion_module("courseware-2", "A", "2")
self.make_discussion_module("courseware-3", "B", "1")
self.make_discussion_module("courseware-4", "B", "2")
self.make_discussion_module("courseware-5", "C", "1")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"A",
[
self.make_expected_tree("courseware-1", "1"),
self.make_expected_tree("courseware-2", "2"),
]
),
self.make_expected_tree(
None,
"B",
[
self.make_expected_tree("courseware-3", "1"),
self.make_expected_tree("courseware-4", "2"),
]
),
self.make_expected_tree(
None,
"C",
[self.make_expected_tree("courseware-5", "1")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-1", "A"),
self.make_expected_tree("non-courseware-2", "B"),
],
}
self.assertEqual(actual, expected)
def test_sort_key(self):
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.course.discussion_topics = {
"W": {"id": "non-courseware-1", "sort_key": "Z"},
"X": {"id": "non-courseware-2"},
"Y": {"id": "non-courseware-3", "sort_key": "Y"},
"Z": {"id": "non-courseware-4", "sort_key": "W"},
}
self.store.update_item(self.course, self.user.id)
self.make_discussion_module("courseware-1", "First", "A", sort_key="D")
self.make_discussion_module("courseware-2", "First", "B", sort_key="B")
self.make_discussion_module("courseware-3", "First", "C", sort_key="E")
self.make_discussion_module("courseware-4", "Second", "A", sort_key="F")
self.make_discussion_module("courseware-5", "Second", "B", sort_key="G")
self.make_discussion_module("courseware-6", "Second", "C")
self.make_discussion_module("courseware-7", "Second", "D", sort_key="A")
actual = self.get_course_topics()
expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "B"),
self.make_expected_tree("courseware-1", "A"),
self.make_expected_tree("courseware-3", "C"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-7", "D"),
self.make_expected_tree("courseware-6", "C"),
self.make_expected_tree("courseware-4", "A"),
self.make_expected_tree("courseware-5", "B"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-4", "Z"),
self.make_expected_tree("non-courseware-2", "X"),
self.make_expected_tree("non-courseware-3", "Y"),
self.make_expected_tree("non-courseware-1", "W"),
],
}
self.assertEqual(actual, expected)
def test_access_control(self):
"""
Test that only topics that a user has access to are returned. The
ways in which a user may not have access are:
* Module is visible to staff only
* Module has a start date in the future
* Module is accessible only to a group the user is not in
Also, there is a case that ensures that a category with no accessible
subcategories does not appear in the result.
"""
beta_tester = BetaTesterFactory.create(course_key=self.course.id)
CourseEnrollmentFactory.create(user=beta_tester, course_id=self.course.id)
staff = StaffFactory.create(course_key=self.course.id)
for user, group_idx in [(self.user, 0), (beta_tester, 1)]:
cohort = CohortFactory.create(
course_id=self.course.id,
name=self.partition.groups[group_idx].name,
users=[user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=cohort,
partition_id=self.partition.id,
group_id=self.partition.groups[group_idx].id
)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.make_discussion_module("courseware-1", "First", "Everybody")
self.make_discussion_module(
"courseware-2",
"First",
"Cohort A",
group_access={self.partition.id: [self.partition.groups[0].id]}
)
self.make_discussion_module(
"courseware-3",
"First",
"Cohort B",
group_access={self.partition.id: [self.partition.groups[1].id]}
)
self.make_discussion_module("courseware-4", "Second", "Staff Only", visible_to_staff_only=True)
self.make_discussion_module(
"courseware-5",
"Second",
"Future Start Date",
start=datetime.now(UTC) + timedelta(days=1)
)
student_actual = self.get_course_topics()
student_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(student_actual, student_expected)
self.request.user = beta_tester
beta_actual = self.get_course_topics()
beta_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[self.make_expected_tree("courseware-5", "Future Start Date")]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(beta_actual, beta_expected)
self.request.user = staff
staff_actual = self.get_course_topics()
staff_expected = {
"courseware_topics": [
self.make_expected_tree(
None,
"First",
[
self.make_expected_tree("courseware-2", "Cohort A"),
self.make_expected_tree("courseware-3", "Cohort B"),
self.make_expected_tree("courseware-1", "Everybody"),
]
),
self.make_expected_tree(
None,
"Second",
[
self.make_expected_tree("courseware-5", "Future Start Date"),
self.make_expected_tree("courseware-4", "Staff Only"),
]
),
],
"non_courseware_topics": [
self.make_expected_tree("non-courseware-topic-id", "Test Topic"),
],
}
self.assertEqual(staff_actual, staff_expected)
@attr('shard_2')
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetThreadListTest(CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Test for get_thread_list"""
@classmethod
def setUpClass(cls):
super(GetThreadListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetThreadListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
self.cohort = CohortFactory.create(course_id=self.course.id)
def get_thread_list(
self,
threads,
page=1,
page_size=1,
num_pages=1,
course=None,
topic_id_list=None,
):
"""
Register the appropriate comments service response, then call
get_thread_list and return the result.
"""
course = course or self.course
self.register_get_threads_response(threads, page, num_pages)
ret = get_thread_list(self.request, course.id, page, page_size, topic_id_list)
return ret
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
get_thread_list(self.request, CourseLocator.from_string("non/existent/course"), 1, 1)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_thread_list([])
def test_discussions_disabled(self):
with self.assertRaises(DiscussionDisabledError):
self.get_thread_list([], course=_discussion_disabled_course_for(self.user))
def test_empty(self):
self.assertEqual(
self.get_thread_list([], num_pages=0).data,
{
"pagination": {
"next": None,
"previous": None,
"num_pages": 0,
"count": 0
},
"results": [],
"text_search_rewrite": None,
}
)
def test_get_threads_by_topic_id(self):
self.get_thread_list([], topic_id_list=["topic_x", "topic_meow"])
self.assertEqual(urlparse(httpretty.last_request().path).path, "/api/v1/threads")
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["1"],
"recursive": ["False"],
"commentable_ids": ["topic_x,topic_meow"]
})
def test_basic_query_params(self):
self.get_thread_list([], page=6, page_size=14)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": ["desc"],
"page": ["6"],
"per_page": ["14"],
"recursive": ["False"],
})
def test_thread_content(self):
source_threads = [
{
"type": "thread",
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"commentable_id": "topic_x",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"endorsed": True,
"read": True,
},
{
"type": "thread",
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"commentable_id": "topic_y",
"group_id": self.cohort.id,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"thread_type": "question",
"title": "Another Test Title",
"body": "More content",
"pinned": False,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 9},
"comments_count": 18,
"unread_comments_count": 0,
"endorsed": False,
"read": False,
},
]
expected_threads = [
{
"id": "test_thread_id_0",
"course_id": unicode(self.course.id),
"topic_id": "topic_x",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 6,
"unread_comment_count": 3,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_0",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"has_endorsed": True,
"read": True,
},
{
"id": "test_thread_id_1",
"course_id": unicode(self.course.id),
"topic_id": "topic_y",
"group_id": self.cohort.id,
"group_name": self.cohort.name,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T22:22:22Z",
"updated_at": "2015-04-28T00:33:33Z",
"type": "question",
"title": "Another Test Title",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 9,
"comment_count": 19,
"unread_comment_count": 1,
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread_id_1&endorsed=False"
),
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"has_endorsed": False,
"read": False,
},
]
expected_result = make_paginated_api_response(
results=expected_threads, count=2, num_pages=1, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list(source_threads).data,
expected_result
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_request_group(self, role_name, course_is_cohorted):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.get_thread_list([], course=cohort_course)
actual_has_group = "group_id" in httpretty.last_request().querystring
expected_has_group = (course_is_cohorted and role_name == FORUM_ROLE_STUDENT)
self.assertEqual(actual_has_group, expected_has_group)
def test_pagination(self):
# N.B. Empty thread list is not realistic but convenient for this test
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link="http://testserver/test_path?page=2", previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=1, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[],
count=0,
num_pages=3,
next_link="http://testserver/test_path?page=3",
previous_link="http://testserver/test_path?page=1"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=2, num_pages=3).data,
expected_result
)
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=3, next_link=None, previous_link="http://testserver/test_path?page=2"
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
self.get_thread_list([], page=3, num_pages=3).data,
expected_result
)
# Test page past the last one
self.register_get_threads_response([], page=3, num_pages=3)
with self.assertRaises(PageNotFoundError):
get_thread_list(self.request, self.course.id, page=4, page_size=10)
@ddt.data(None, "rewritten search string")
def test_text_search(self, text_search_rewrite):
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": text_search_rewrite})
self.register_get_threads_search_response([], text_search_rewrite, num_pages=0)
self.assertEqual(
get_thread_list(
self.request,
self.course.id,
page=1,
page_size=10,
text_search="test search string"
).data,
expected_result
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["10"],
"recursive": ["False"],
"text": ["test search string"],
})
def test_following(self):
self.register_subscribed_threads_response(self.user, [], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
following=True,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/users/{}/subscribed_threads".format(self.user.id)
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
})
@ddt.data("unanswered", "unread")
def test_view_query(self, query):
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
view=query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(
result,
expected_result
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
query: ["true"],
})
@ddt.data(
("last_activity_at", "activity"),
("comment_count", "comments"),
("vote_count", "votes")
)
@ddt.unpack
def test_order_by_query(self, http_query, cc_query):
"""
Tests the order_by parameter
Arguments:
http_query (str): Query string sent in the http request
cc_query (str): Query string used for the comments client service
"""
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_by=http_query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": [cc_query],
"sort_order": ["desc"],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
})
@ddt.data("asc", "desc")
def test_order_direction_query(self, http_query):
self.register_get_threads_response([], page=1, num_pages=0)
result = get_thread_list(
self.request,
self.course.id,
page=1,
page_size=11,
order_direction=http_query,
).data
expected_result = make_paginated_api_response(
results=[], count=0, num_pages=0, next_link=None, previous_link=None
)
expected_result.update({"text_search_rewrite": None})
self.assertEqual(result, expected_result)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads"
)
self.assert_last_query_params({
"user_id": [unicode(self.user.id)],
"course_id": [unicode(self.course.id)],
"sort_key": ["activity"],
"sort_order": [http_query],
"page": ["1"],
"per_page": ["11"],
"recursive": ["False"],
})
@attr('shard_2')
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class GetCommentListTest(CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Test for get_comment_list"""
@classmethod
def setUpClass(cls):
super(GetCommentListTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(GetCommentListTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.author = UserFactory.create()
def make_minimal_cs_thread(self, overrides=None):
"""
Create a thread with the given overrides, plus the course_id if not
already in overrides.
"""
overrides = overrides.copy() if overrides else {}
overrides.setdefault("course_id", unicode(self.course.id))
return make_minimal_cs_thread(overrides)
def get_comment_list(self, thread, endorsed=None, page=1, page_size=1):
"""
Register the appropriate comments service response, then call
get_comment_list and return the result.
"""
self.register_get_thread_response(thread)
return get_comment_list(self.request, thread["id"], endorsed, page, page_size)
def test_nonexistent_thread(self):
thread_id = "nonexistent_thread"
self.register_get_thread_error_response(thread_id, 404)
with self.assertRaises(ThreadNotFoundError):
get_comment_list(self.request, thread_id, endorsed=False, page=1, page_size=1)
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread({"course_id": "non/existent/course"}))
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
self.get_comment_list(self.make_minimal_cs_thread())
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
with self.assertRaises(DiscussionDisabledError):
self.get_comment_list(
self.make_minimal_cs_thread(
overrides={"course_id": unicode(disabled_course.id)}
)
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(
self,
role_name,
course_is_cohorted,
topic_is_cohorted,
thread_group_state
):
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
thread = self.make_minimal_cs_thread({
"course_id": unicode(cohort_course.id),
"commentable_id": "test_topic",
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
topic_is_cohorted and
thread_group_state == "different_group"
)
try:
self.get_comment_list(thread)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(True, False)
def test_discussion_endorsed(self, endorsed_value):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "discussion"}),
endorsed=endorsed_value
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field may not be specified for discussion threads."]}
)
def test_question_without_endorsed(self):
with self.assertRaises(ValidationError) as assertion:
self.get_comment_list(
self.make_minimal_cs_thread({"thread_type": "question"}),
endorsed=None
)
self.assertEqual(
assertion.exception.message_dict,
{"endorsed": ["This field is required for question threads."]}
)
def test_empty(self):
discussion_thread = self.make_minimal_cs_thread(
{"thread_type": "discussion", "children": [], "resp_total": 0}
)
self.assertEqual(
self.get_comment_list(discussion_thread).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
question_thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [],
"non_endorsed_responses": [],
"non_endorsed_resp_total": 0
})
self.assertEqual(
self.get_comment_list(question_thread, endorsed=False).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
self.assertEqual(
self.get_comment_list(question_thread, endorsed=True).data,
make_paginated_api_response(results=[], count=0, num_pages=1, next_link=None, previous_link=None)
)
def test_basic_query_params(self):
self.get_comment_list(
self.make_minimal_cs_thread({
"children": [make_minimal_cs_comment()],
"resp_total": 71
}),
page=6,
page_size=14
)
self.assert_query_params_equal(
httpretty.httpretty.latest_requests[-2],
{
"recursive": ["False"],
"user_id": [str(self.user.id)],
"mark_as_read": ["False"],
"resp_skip": ["70"],
"resp_limit": ["14"],
}
)
def test_discussion_content(self):
source_comments = [
{
"type": "comment",
"id": "test_comment_1",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"child_count": 0,
"children": [],
},
{
"type": "comment",
"id": "test_comment_2",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": True,
"anonymous_to_peers": False,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"body": "More content",
"endorsed": False,
"abuse_flaggers": [str(self.user.id)],
"votes": {"up_count": 7},
"child_count": 0,
"children": [],
}
]
expected_comments = [
{
"id": "test_comment_1",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-05-11T00:00:00Z",
"updated_at": "2015-05-11T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
{
"id": "test_comment_2",
"thread_id": "test_thread",
"parent_id": None,
"author": None,
"author_label": None,
"created_at": "2015-05-11T22:22:22Z",
"updated_at": "2015-05-11T33:33:33Z",
"raw_body": "More content",
"rendered_body": "<p>More content</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": True,
"voted": False,
"vote_count": 7,
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
"children": [],
},
]
actual_comments = self.get_comment_list(
self.make_minimal_cs_thread({"children": source_comments})
).data["results"]
self.assertEqual(actual_comments, expected_comments)
def test_question_content(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [make_minimal_cs_comment({"id": "endorsed_comment"})],
"non_endorsed_responses": [make_minimal_cs_comment({"id": "non_endorsed_comment"})],
"non_endorsed_resp_total": 1,
})
endorsed_actual = self.get_comment_list(thread, endorsed=True).data
self.assertEqual(endorsed_actual["results"][0]["id"], "endorsed_comment")
non_endorsed_actual = self.get_comment_list(thread, endorsed=False).data
self.assertEqual(non_endorsed_actual["results"][0]["id"], "non_endorsed_comment")
def test_endorsed_by_anonymity(self):
"""
Ensure thread anonymity is properly considered in serializing
endorsed_by.
"""
thread = self.make_minimal_cs_thread({
"anonymous": True,
"children": [
make_minimal_cs_comment({
"endorsement": {"user_id": str(self.author.id), "time": "2015-05-18T12:34:56Z"}
})
]
})
actual_comments = self.get_comment_list(thread).data["results"]
self.assertIsNone(actual_comments[0]["endorsed_by"])
@ddt.data(
("discussion", None, "children", "resp_total"),
("question", False, "non_endorsed_responses", "non_endorsed_resp_total"),
)
@ddt.unpack
def test_cs_pagination(self, thread_type, endorsed_arg, response_field, response_total_field):
"""
Test cases in which pagination is done by the comments service.
thread_type is the type of thread (question or discussion).
endorsed_arg is the value of the endorsed argument.
repsonse_field is the field in which responses are returned for the
given thread type.
response_total_field is the field in which the total number of responses
is returned for the given thread type.
"""
# N.B. The mismatch between the number of children and the listed total
# number of responses is unrealistic but convenient for this test
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [make_minimal_cs_comment()],
response_total_field: 5,
})
# Only page
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=5).data
self.assertIsNone(actual["pagination"]["next"])
self.assertIsNone(actual["pagination"]["previous"])
# First page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=1, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=2")
self.assertIsNone(actual["pagination"]["previous"])
# Middle page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=2).data
self.assertEqual(actual["pagination"]["next"], "http://testserver/test_path?page=3")
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=1")
# Last page of many
actual = self.get_comment_list(thread, endorsed=endorsed_arg, page=3, page_size=2).data
self.assertIsNone(actual["pagination"]["next"])
self.assertEqual(actual["pagination"]["previous"], "http://testserver/test_path?page=2")
# Page past the end
thread = self.make_minimal_cs_thread({
"thread_type": thread_type,
response_field: [],
response_total_field: 5
})
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=endorsed_arg, page=2, page_size=5)
def test_question_endorsed_pagination(self):
thread = self.make_minimal_cs_thread({
"thread_type": "question",
"endorsed_responses": [
make_minimal_cs_comment({"id": "comment_{}".format(i)}) for i in range(10)
]
})
def assert_page_correct(page, page_size, expected_start, expected_stop, expected_next, expected_prev):
"""
Check that requesting the given page/page_size returns the expected
output
"""
actual = self.get_comment_list(thread, endorsed=True, page=page, page_size=page_size).data
result_ids = [result["id"] for result in actual["results"]]
self.assertEqual(
result_ids,
["comment_{}".format(i) for i in range(expected_start, expected_stop)]
)
self.assertEqual(
actual["pagination"]["next"],
"http://testserver/test_path?page={}".format(expected_next) if expected_next else None
)
self.assertEqual(
actual["pagination"]["previous"],
"http://testserver/test_path?page={}".format(expected_prev) if expected_prev else None
)
# Only page
assert_page_correct(
page=1,
page_size=10,
expected_start=0,
expected_stop=10,
expected_next=None,
expected_prev=None
)
# First page of many
assert_page_correct(
page=1,
page_size=4,
expected_start=0,
expected_stop=4,
expected_next=2,
expected_prev=None
)
# Middle page of many
assert_page_correct(
page=2,
page_size=4,
expected_start=4,
expected_stop=8,
expected_next=3,
expected_prev=1
)
# Last page of many
assert_page_correct(
page=3,
page_size=4,
expected_start=8,
expected_stop=10,
expected_next=None,
expected_prev=2
)
# Page past the end
with self.assertRaises(PageNotFoundError):
self.get_comment_list(thread, endorsed=True, page=2, page_size=10)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'thread_created')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_thread"""
@classmethod
def setUpClass(cls):
super(CreateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
@mock.patch("eventtracking.tracker.emit")
def test_basic(self, mock_emit):
cs_thread = make_minimal_cs_thread({
"id": "test_id",
"username": self.user.username,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
})
self.register_post_thread_response(cs_thread)
with self.assert_signal_sent(api, 'thread_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_thread(self.request, self.minimal_data)
expected = {
"id": "test_id",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-19T00:00:00Z",
"updated_at": "2015-05-19T00:00:00Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 1,
"unread_comment_count": 1,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted"],
'read': False,
'has_endorsed': False,
'response_count': 0,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.created")
self.assertEqual(
event_data,
{
"commentable_id": "test_topic",
"group_id": None,
"thread_type": "discussion",
"title": "Test Title",
"anonymous": False,
"anonymous_to_peers": False,
"options": {"followed": False},
"id": "test_id",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
["no_group_set", "group_is_none", "group_is_set"],
)
)
@ddt.unpack
def test_group_id(self, role_name, course_is_cohorted, topic_is_cohorted, data_group_state):
"""
Tests whether the user has permission to create a thread with certain
group_id values.
If there is no group, user cannot create a thread.
Else if group is None or set, and the course is not cohorted and/or the
role is a student, user can create a thread.
"""
cohort_course = CourseFactory.create(
discussion_topics={"Test Topic": {"id": "test_topic"}},
cohort_config={
"cohorted": course_is_cohorted,
"cohorted_discussions": ["test_topic"] if topic_is_cohorted else [],
}
)
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
if course_is_cohorted:
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_post_thread_response({})
data = self.minimal_data.copy()
data["course_id"] = unicode(cohort_course.id)
if data_group_state == "group_is_none":
data["group_id"] = None
elif data_group_state == "group_is_set":
if course_is_cohorted:
data["group_id"] = cohort.id + 1
else:
data["group_id"] = 1 # Set to any value since there is no cohort
expected_error = (
data_group_state in ["group_is_none", "group_is_set"] and
(not course_is_cohorted or role_name == FORUM_ROLE_STUDENT)
)
try:
create_thread(self.request, data)
self.assertFalse(expected_error)
actual_post_data = httpretty.last_request().parsed_body
if data_group_state == "group_is_set":
self.assertEqual(actual_post_data["group_id"], [str(data["group_id"])])
elif data_group_state == "no_group_set" and course_is_cohorted and topic_is_cohorted:
self.assertEqual(actual_post_data["group_id"], [str(cohort.id)])
else:
self.assertNotIn("group_id", actual_post_data)
except ValidationError as ex:
if not expected_error:
self.fail("Unexpected validation error: {}".format(ex))
def test_following(self):
self.register_post_thread_response({"id": "test_id"})
self.register_subscription_response(self.user)
data = self.minimal_data.copy()
data["following"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["following"], True)
cs_request = httpretty.last_request()
self.assertEqual(
urlparse(cs_request.path).path,
"/api/v1/users/{}/subscriptions".format(self.user.id)
)
self.assertEqual(cs_request.method, "POST")
self.assertEqual(
cs_request.parsed_body,
{"source_type": ["thread"], "source_id": ["test_id"]}
)
def test_voted(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_votes_response("test_id")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'thread_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_thread(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_thread_response({"id": "test_id"})
self.register_thread_flag_response("test_id")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_thread(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/threads/test_id/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_course_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["This field is required."]})
def test_course_id_invalid(self):
with self.assertRaises(ValidationError) as assertion:
create_thread(self.request, {"course_id": "invalid!"})
self.assertEqual(assertion.exception.message_dict, {"course_id": ["Invalid value."]})
def test_nonexistent_course(self):
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, {"course_id": "non/existent/course"})
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_thread(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.minimal_data["course_id"] = unicode(disabled_course.id)
with self.assertRaises(DiscussionDisabledError):
create_thread(self.request, self.minimal_data)
def test_invalid_field(self):
data = self.minimal_data.copy()
data["type"] = "invalid_type"
with self.assertRaises(ValidationError):
create_thread(self.request, data)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'comment_created')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class CreateCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for create_comment"""
@classmethod
def setUpClass(cls):
super(CreateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CreateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
})
)
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
@ddt.data(None, "test_parent")
@mock.patch("eventtracking.tracker.emit")
def test_success(self, parent_id, mock_emit):
if parent_id:
self.register_get_comment_response({"id": parent_id, "thread_id": "test_thread"})
self.register_post_comment_response(
{
"id": "test_comment",
"username": self.user.username,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
},
thread_id="test_thread",
parent_id=parent_id
)
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
with self.assert_signal_sent(api, 'comment_created', sender=None, user=self.user, exclude_args=('post',)):
actual = create_comment(self.request, data)
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-27T00:00:00Z",
"updated_at": "2015-05-27T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
expected_url
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)]
}
)
expected_event_name = (
"edx.forum.comment.created" if parent_id else
"edx.forum.response.created"
)
expected_event_data = {
"discussion": {"id": "test_thread"},
"commentable_id": "test_topic",
"options": {"followed": False},
"id": "test_comment",
"truncated": False,
"body": "Test body",
"url": "",
"user_forums_roles": [FORUM_ROLE_STUDENT],
"user_course_roles": [],
}
if parent_id:
expected_event_data["response"] = {"id": parent_id}
actual_event_name, actual_event_data = mock_emit.call_args[0]
self.assertEqual(actual_event_name, expected_event_name)
self.assertEqual(actual_event_data, expected_event_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
)
)
@ddt.unpack
def test_endorsed(self, role_name, is_thread_author, thread_type):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"thread_type": thread_type,
"user_id": str(self.user.id) if is_thread_author else str(self.user.id + 1),
})
)
self.register_post_comment_response({}, "test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(not is_thread_author or thread_type == "discussion")
)
try:
create_comment(self.request, data)
self.assertEqual(httpretty.last_request().parsed_body["endorsed"], ["True"])
self.assertFalse(expected_error)
except ValidationError:
self.assertTrue(expected_error)
def test_voted(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_votes_response("test_comment")
data = self.minimal_data.copy()
data["voted"] = "True"
with self.assert_signal_sent(api, 'comment_voted', sender=None, user=self.user, exclude_args=('post',)):
result = create_comment(self.request, data)
self.assertEqual(result["voted"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/votes")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(
cs_request.parsed_body,
{"user_id": [str(self.user.id)], "value": ["up"]}
)
def test_abuse_flagged(self):
self.register_post_comment_response({"id": "test_comment"}, "test_thread")
self.register_comment_flag_response("test_comment")
data = self.minimal_data.copy()
data["abuse_flagged"] = "True"
result = create_comment(self.request, data)
self.assertEqual(result["abuse_flagged"], True)
cs_request = httpretty.last_request()
self.assertEqual(urlparse(cs_request.path).path, "/api/v1/comments/test_comment/abuse_flag")
self.assertEqual(cs_request.method, "PUT")
self.assertEqual(cs_request.parsed_body, {"user_id": [str(self.user.id)]})
def test_thread_id_missing(self):
with self.assertRaises(ValidationError) as assertion:
create_comment(self.request, {})
self.assertEqual(assertion.exception.message_dict, {"thread_id": ["This field is required."]})
def test_thread_id_not_found(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
create_comment(self.request, self.minimal_data)
def test_nonexistent_course(self):
self.register_get_thread_response(
make_minimal_cs_thread({"id": "test_thread", "course_id": "non/existent/course"})
)
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_not_enrolled(self):
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
create_comment(self.request, self.minimal_data)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_get_thread_response(
make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(disabled_course.id),
"commentable_id": "test_topic",
})
)
with self.assertRaises(DiscussionDisabledError):
create_comment(self.request, self.minimal_data)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread({
"id": "cohort_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}))
self.register_post_comment_response({}, thread_id="cohort_thread")
data = self.minimal_data.copy()
data["thread_id"] = "cohort_thread"
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
create_comment(self.request, data)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
def test_invalid_field(self):
data = self.minimal_data.copy()
del data["raw_body"]
with self.assertRaises(ValidationError):
create_comment(self.request, data)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'thread_edited')
@disable_signal(api, 'thread_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_thread"""
@classmethod
def setUpClass(cls):
super(UpdateThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_put_thread_response(cs_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
# Ensure that the default following value of False is not applied implicitly
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_thread()
update_thread(self.request, "test_thread", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_thread(self.request, "test_thread", {"raw_body": "Edited body"})
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "original_topic",
"group_id": None,
"group_name": None,
"author": self.user.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"type": "discussion",
"title": "Original Title",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"pinned": False,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"comment_count": 1,
"unread_comment_count": 0,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted"],
'read': False,
'has_endorsed': False,
'response_count': 0
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Edited body"],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"read": ["False"],
"requested_user_id": [str(self.user.id)],
}
)
def test_nonexistent_thread(self):
self.register_get_thread_error_response("test_thread", 404)
with self.assertRaises(ThreadNotFoundError):
update_thread(self.request, "test_thread", {})
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_thread(self.request, "test_thread", {})
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
update_thread(self.request, "test_thread", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_thread(self.request, "test_thread", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_author_only_fields(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
data = {field: "edited" for field in ["topic_id", "title", "raw_body"]}
data["type"] = "question"
expected_error = role_name == FORUM_ROLE_STUDENT
try:
update_thread(self.request, "test_thread", data)
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{field: ["This field is not editable."] for field in data.keys()}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_following(self, old_following, new_following):
"""
Test attempts to edit the "following" field.
old_following indicates whether the thread should be followed at the
start of the test. new_following indicates the value for the "following"
field in the update. If old_following and new_following are the same, no
update should be made. Otherwise, a subscription should be POSTed or
DELETEd according to the new_following value.
"""
if old_following:
self.register_get_user_response(self.user, subscribed_thread_ids=["test_thread"])
self.register_subscription_response(self.user)
self.register_thread()
data = {"following": new_following}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["following"], new_following)
last_request_path = urlparse(httpretty.last_request().path).path
subscription_url = "/api/v1/users/{}/subscriptions".format(self.user.id)
if old_following == new_following:
self.assertNotEqual(last_request_path, subscription_url)
else:
self.assertEqual(last_request_path, subscription_url)
self.assertEqual(
httpretty.last_request().method,
"POST" if new_following else "DELETE"
)
request_data = (
httpretty.last_request().parsed_body if new_following else
parse_qs(urlparse(httpretty.last_request().path).query)
)
request_data.pop("request_id", None)
self.assertEqual(
request_data,
{"source_type": ["thread"], "source_id": ["test_thread"]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the thread should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
self.register_thread_votes_response("test_thread")
self.register_thread()
data = {"voted": new_vote_status}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/threads/test_thread/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.thread.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'original_topic',
'id': 'test_thread'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
starting_vote_count = 1
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_thread(self.request, "test_thread", data)
self.register_thread(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
#setup
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_thread"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_thread_votes_response("test_thread")
self.register_thread(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_thread(request, "test_thread", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_thread"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the thread should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_thread_flag_response("test_thread")
self.register_thread({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_thread(self.request, "test_thread", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/threads/test_thread/abuse_flag"
unflag_url = "/api/v1/threads/test_thread/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
def test_invalid_field(self):
self.register_thread()
with self.assertRaises(ValidationError) as assertion:
update_thread(self.request, "test_thread", {"raw_body": ""})
self.assertEqual(
assertion.exception.message_dict,
{"raw_body": ["This field may not be blank."]}
)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'comment_edited')
@disable_signal(api, 'comment_voted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class UpdateCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for update_comment"""
@classmethod
def setUpClass(cls):
super(UpdateCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(UpdateCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment(self, overrides=None, thread_overrides=None, course=None):
"""
Make a comment with appropriate data overridden by the overrides
parameter and register mock responses for both GET and PUT on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
if course is None:
course = self.course
cs_thread_data = make_minimal_cs_thread({
"id": "test_thread",
"course_id": unicode(course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": "test_comment",
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"body": "Original body",
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_put_comment_response(cs_comment_data)
def test_empty(self):
"""Check that an empty update does not make any modifying requests."""
self.register_comment()
update_comment(self.request, "test_comment", {})
for request in httpretty.httpretty.latest_requests:
self.assertEqual(request.method, "GET")
@ddt.data(None, "test_parent")
def test_basic(self, parent_id):
self.register_comment({"parent_id": parent_id})
with self.assert_signal_sent(api, 'comment_edited', sender=None, user=self.user, exclude_args=('post',)):
actual = update_comment(self.request, "test_comment", {"raw_body": "Edited body"})
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": parent_id,
"author": self.user.username,
"author_label": None,
"created_at": "2015-06-03T00:00:00Z",
"updated_at": "2015-06-03T00:00:00Z",
"raw_body": "Edited body",
"rendered_body": "<p>Edited body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"children": [],
"editable_fields": ["abuse_flagged", "raw_body", "voted"],
"child_count": 0,
}
self.assertEqual(actual, expected)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_nonexistent_comment(self):
self.register_get_comment_error_response("test_comment", 404)
with self.assertRaises(CommentNotFoundError):
update_comment(self.request, "test_comment", {})
def test_nonexistent_course(self):
self.register_comment(thread_overrides={"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_unenrolled(self):
self.register_comment()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
update_comment(self.request, "test_comment", {})
def test_discussions_disabled(self):
self.register_comment(course=_discussion_disabled_course_for(self.user))
with self.assertRaises(DiscussionDisabledError):
update_comment(self.request, "test_comment", {})
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_get_thread_response(make_minimal_cs_thread())
self.register_comment(
{"thread_id": "test_thread"},
thread_overrides={
"id": "test_thread",
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
update_comment(self.request, "test_comment", {})
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
[True, False],
))
@ddt.unpack
def test_raw_body_access(self, role_name, is_thread_author, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1))
}
)
expected_error = role_name == FORUM_ROLE_STUDENT and not is_comment_author
try:
update_comment(self.request, "test_comment", {"raw_body": "edited"})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"raw_body": ["This field is not editable."]}
)
@ddt.data(*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["question", "discussion"],
[True, False],
))
@ddt.unpack
def test_endorsed_access(self, role_name, is_thread_author, thread_type, is_comment_author):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment(
{"user_id": str(self.user.id if is_comment_author else (self.user.id + 1))},
thread_overrides={
"thread_type": thread_type,
"user_id": str(self.user.id if is_thread_author else (self.user.id + 1)),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
(thread_type == "discussion" or not is_thread_author)
)
try:
update_comment(self.request, "test_comment", {"endorsed": True})
self.assertFalse(expected_error)
except ValidationError as err:
self.assertTrue(expected_error)
self.assertEqual(
err.message_dict,
{"endorsed": ["This field is not editable."]}
)
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
@mock.patch("eventtracking.tracker.emit")
def test_voted(self, current_vote_status, new_vote_status, mock_emit):
"""
Test attempts to edit the "voted" field.
current_vote_status indicates whether the comment should be upvoted at
the start of the test. new_vote_status indicates the value for the
"voted" field in the update. If current_vote_status and new_vote_status
are the same, no update should be made. Otherwise, a vote should be PUT
or DELETEd according to the new_vote_status value.
"""
vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": new_vote_status}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if new_vote_status else 0)
self.assertEqual(result["voted"], new_vote_status)
last_request_path = urlparse(httpretty.last_request().path).path
votes_url = "/api/v1/comments/test_comment/votes"
if current_vote_status == new_vote_status:
self.assertNotEqual(last_request_path, votes_url)
else:
self.assertEqual(last_request_path, votes_url)
self.assertEqual(
httpretty.last_request().method,
"PUT" if new_vote_status else "DELETE"
)
actual_request_data = (
httpretty.last_request().parsed_body if new_vote_status else
parse_qs(urlparse(httpretty.last_request().path).query)
)
actual_request_data.pop("request_id", None)
expected_request_data = {"user_id": [str(self.user.id)]}
if new_vote_status:
expected_request_data["value"] = ["up"]
self.assertEqual(actual_request_data, expected_request_data)
event_name, event_data = mock_emit.call_args[0]
self.assertEqual(event_name, "edx.forum.response.voted")
self.assertEqual(
event_data,
{
'undo_vote': not new_vote_status,
'url': '',
'target_username': self.user.username,
'vote_value': 'up',
'user_forums_roles': [FORUM_ROLE_STUDENT],
'user_course_roles': [],
'commentable_id': 'dummy',
'id': 'test_comment'
}
)
@ddt.data(*itertools.product([True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count(self, current_vote_status, first_vote, second_vote):
"""
Tests vote_count increases and decreases correctly from the same user
"""
#setup
starting_vote_count = 0
if current_vote_status:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
starting_vote_count = 1
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": starting_vote_count}})
#first vote
data = {"voted": first_vote}
result = update_comment(self.request, "test_comment", data)
self.register_comment(overrides={"voted": first_vote})
self.assertEqual(result["vote_count"], 1 if first_vote else 0)
#second vote
data = {"voted": second_vote}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["vote_count"], 1 if second_vote else 0)
@ddt.data(*itertools.product([True, False], [True, False], [True, False], [True, False]))
@ddt.unpack
def test_vote_count_two_users(
self,
current_user1_vote,
current_user2_vote,
user1_vote,
user2_vote
):
"""
Tests vote_count increases and decreases correctly from different users
"""
user2 = UserFactory.create()
self.register_get_user_response(user2)
request2 = RequestFactory().get("/test_path")
request2.user = user2
CourseEnrollmentFactory.create(user=user2, course_id=self.course.id)
vote_count = 0
if current_user1_vote:
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
vote_count += 1
if current_user2_vote:
self.register_get_user_response(user2, upvoted_ids=["test_comment"])
vote_count += 1
for (current_vote, user_vote, request) in \
[(current_user1_vote, user1_vote, self.request),
(current_user2_vote, user2_vote, request2)]:
self.register_comment_votes_response("test_comment")
self.register_comment(overrides={"votes": {"up_count": vote_count}})
data = {"voted": user_vote}
result = update_comment(request, "test_comment", data)
if current_vote == user_vote:
self.assertEqual(result["vote_count"], vote_count)
elif user_vote:
vote_count += 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=["test_comment"])
else:
vote_count -= 1
self.assertEqual(result["vote_count"], vote_count)
self.register_get_user_response(self.user, upvoted_ids=[])
@ddt.data(*itertools.product([True, False], [True, False]))
@ddt.unpack
def test_abuse_flagged(self, old_flagged, new_flagged):
"""
Test attempts to edit the "abuse_flagged" field.
old_flagged indicates whether the comment should be flagged at the start
of the test. new_flagged indicates the value for the "abuse_flagged"
field in the update. If old_flagged and new_flagged are the same, no
update should be made. Otherwise, a PUT should be made to the flag or
or unflag endpoint according to the new_flagged value.
"""
self.register_get_user_response(self.user)
self.register_comment_flag_response("test_comment")
self.register_comment({"abuse_flaggers": [str(self.user.id)] if old_flagged else []})
data = {"abuse_flagged": new_flagged}
result = update_comment(self.request, "test_comment", data)
self.assertEqual(result["abuse_flagged"], new_flagged)
last_request_path = urlparse(httpretty.last_request().path).path
flag_url = "/api/v1/comments/test_comment/abuse_flag"
unflag_url = "/api/v1/comments/test_comment/abuse_unflag"
if old_flagged == new_flagged:
self.assertNotEqual(last_request_path, flag_url)
self.assertNotEqual(last_request_path, unflag_url)
else:
self.assertEqual(
last_request_path,
flag_url if new_flagged else unflag_url
)
self.assertEqual(httpretty.last_request().method, "PUT")
self.assertEqual(
httpretty.last_request().parsed_body,
{"user_id": [str(self.user.id)]}
)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'thread_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_thread"""
@classmethod
def setUpClass(cls):
super(DeleteThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for both GET and DELETE on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"user_id": str(self.user.id),
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
self.register_delete_thread_response(cs_data["id"])
def test_basic(self):
self.register_thread()
with self.assert_signal_sent(api, 'thread_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_thread(self.request, self.thread_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/threads/{}".format(self.thread_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
delete_thread(self.request, "missing_thread")
def test_nonexistent_course(self):
self.register_thread({"course_id": "non/existent/course"})
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_not_enrolled(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_thread(self.request, self.thread_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_thread(overrides={"course_id": unicode(disabled_course.id)})
with self.assertRaises(DiscussionDisabledError):
delete_thread(self.request, self.thread_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_thread({"user_id": str(self.user.id + 1)})
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a thread
All privileged roles are able to delete a thread. A student role can
only delete a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@attr('shard_2')
@ddt.ddt
@disable_signal(api, 'comment_deleted')
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class DeleteCommentTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase,
MockSignalHandlerMixin
):
"""Tests for delete_comment"""
@classmethod
def setUpClass(cls):
super(DeleteCommentTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(DeleteCommentTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/test_path")
self.request.user = self.user
self.thread_id = "test_thread"
self.comment_id = "test_comment"
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
def register_comment_and_thread(self, overrides=None, thread_overrides=None):
"""
Make a comment with appropriate data overridden by the override
parameters and register mock responses for both GET and DELETE on its
endpoint. Also mock GET for the related thread with thread_overrides.
"""
cs_thread_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id)
})
cs_thread_data.update(thread_overrides or {})
self.register_get_thread_response(cs_thread_data)
cs_comment_data = make_minimal_cs_comment({
"id": self.comment_id,
"course_id": cs_thread_data["course_id"],
"thread_id": cs_thread_data["id"],
"username": self.user.username,
"user_id": str(self.user.id),
})
cs_comment_data.update(overrides or {})
self.register_get_comment_response(cs_comment_data)
self.register_delete_comment_response(self.comment_id)
def test_basic(self):
self.register_comment_and_thread()
with self.assert_signal_sent(api, 'comment_deleted', sender=None, user=self.user, exclude_args=('post',)):
self.assertIsNone(delete_comment(self.request, self.comment_id))
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/comments/{}".format(self.comment_id)
)
self.assertEqual(httpretty.last_request().method, "DELETE")
def test_comment_id_not_found(self):
self.register_get_comment_error_response("missing_comment", 404)
with self.assertRaises(CommentNotFoundError):
delete_comment(self.request, "missing_comment")
def test_nonexistent_course(self):
self.register_comment_and_thread(
thread_overrides={"course_id": "non/existent/course"}
)
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_not_enrolled(self):
self.register_comment_and_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
delete_comment(self.request, self.comment_id)
def test_discussions_disabled(self):
disabled_course = _discussion_disabled_course_for(self.user)
self.register_comment_and_thread(
thread_overrides={"course_id": unicode(disabled_course.id)},
overrides={"course_id": unicode(disabled_course.id)}
)
with self.assertRaises(DiscussionDisabledError):
delete_comment(self.request, self.comment_id)
@ddt.data(
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
)
def test_non_author_delete_allowed(self, role_name):
role = Role.objects.create(name=role_name, course_id=self.course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"user_id": str(self.user.id + 1)}
)
expected_error = role_name == FORUM_ROLE_STUDENT
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except PermissionDenied:
self.assertTrue(expected_error)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for deleting a comment
All privileged roles are able to delete a comment. A student role can
only delete a comment if,
the student role is the author and the comment is not in a cohort,
the student role is the author and the comment is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.user, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.user])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.user]
self.register_comment_and_thread(
overrides={"thread_id": "test_thread"},
thread_overrides={
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
}
)
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
delete_comment(self.request, self.comment_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
@attr('shard_2')
@ddt.ddt
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
class RetrieveThreadTest(
CommentsServiceMockMixin,
UrlResetMixin,
SharedModuleStoreTestCase
):
"""Tests for get_thread"""
@classmethod
def setUpClass(cls):
super(RetrieveThreadTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(RetrieveThreadTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.thread_author = UserFactory.create()
self.register_get_user_response(self.thread_author)
self.request = RequestFactory().get("/test_path")
self.request.user = self.thread_author
self.thread_id = "test_thread"
CourseEnrollmentFactory.create(user=self.thread_author, course_id=self.course.id)
def register_thread(self, overrides=None):
"""
Make a thread with appropriate data overridden by the overrides
parameter and register mock responses for GET on its
endpoint.
"""
cs_data = make_minimal_cs_thread({
"id": self.thread_id,
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"username": self.thread_author.username,
"user_id": str(self.thread_author.id),
"title": "Test Title",
"body": "Test body",
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"resp_total": 0,
})
cs_data.update(overrides or {})
self.register_get_thread_response(cs_data)
def test_basic(self):
expected_response_data = {
"author": self.thread_author.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"editable_fields": ["abuse_flagged", "following", "raw_body", "read", "title", "topic_id", "type", "voted"],
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"title": "Test Title",
"pinned": False,
"closed": False,
"following": False,
"comment_count": 1,
"unread_comment_count": 1,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"read": False,
"has_endorsed": False,
"id": "test_thread",
"type": "discussion",
"response_count": 2,
}
self.register_thread({"resp_total": 2})
self.assertEqual(get_thread(self.request, self.thread_id), expected_response_data)
self.assertEqual(httpretty.last_request().method, "GET")
def test_thread_id_not_found(self):
self.register_get_thread_error_response("missing_thread", 404)
with self.assertRaises(ThreadNotFoundError):
get_thread(self.request, "missing_thread")
def test_nonauthor_enrolled_in_course(self):
expected_response_data = {
"author": self.thread_author.username,
"author_label": None,
"created_at": "2015-05-29T00:00:00Z",
"updated_at": "2015-05-29T00:00:00Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"abuse_flagged": False,
"voted": False,
"vote_count": 0,
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"title": "Test Title",
"pinned": False,
"closed": False,
"following": False,
"comment_count": 1,
"unread_comment_count": 1,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"read": False,
"has_endorsed": False,
"id": "test_thread",
"type": "discussion",
"response_count": 0,
}
non_author_user = UserFactory.create()
self.register_get_user_response(non_author_user)
CourseEnrollmentFactory.create(user=non_author_user, course_id=self.course.id)
self.register_thread()
self.request.user = non_author_user
self.assertEqual(get_thread(self.request, self.thread_id), expected_response_data)
self.assertEqual(httpretty.last_request().method, "GET")
def test_not_enrolled_in_course(self):
self.register_thread()
self.request.user = UserFactory.create()
with self.assertRaises(CourseNotFoundError):
get_thread(self.request, self.thread_id)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False],
["no_group", "match_group", "different_group"],
)
)
@ddt.unpack
def test_group_access(self, role_name, course_is_cohorted, thread_group_state):
"""
Tests group access for retrieving a thread
All privileged roles are able to retrieve a thread. A student role can
only retrieve a thread if,
the student role is the author and the thread is not in a cohort,
the student role is the author and the thread is in the author's cohort.
"""
cohort_course = CourseFactory.create(cohort_config={"cohorted": course_is_cohorted})
CourseEnrollmentFactory.create(user=self.thread_author, course_id=cohort_course.id)
cohort = CohortFactory.create(course_id=cohort_course.id, users=[self.thread_author])
role = Role.objects.create(name=role_name, course_id=cohort_course.id)
role.users = [self.thread_author]
self.register_thread({
"course_id": unicode(cohort_course.id),
"group_id": (
None if thread_group_state == "no_group" else
cohort.id if thread_group_state == "match_group" else
cohort.id + 1
),
})
expected_error = (
role_name == FORUM_ROLE_STUDENT and
course_is_cohorted and
thread_group_state == "different_group"
)
try:
get_thread(self.request, self.thread_id)
self.assertFalse(expected_error)
except ThreadNotFoundError:
self.assertTrue(expected_error)
|
agpl-3.0
|
Johnzero/OE7
|
openerp/addons-modules/account_asset/account_asset.py
|
14
|
29289
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from tools.translate import _
class account_asset_category(osv.osv):
_name = 'account.asset.category'
_description = 'Asset category'
_columns = {
'name': fields.char('Name', size=64, required=True, select=1),
'note': fields.text('Note'),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account'),
'account_asset_id': fields.many2one('account.account', 'Asset Account', required=True),
'account_depreciation_id': fields.many2one('account.account', 'Depreciation Account', required=True),
'account_expense_depreciation_id': fields.many2one('account.account', 'Depr. Expense Account', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="State here the time between 2 depreciations, in months", required=True),
'method_progress_factor': fields.float('Degressive Factor'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_end': fields.date('Ending date'),
'prorata':fields.boolean('Prorata Temporis', help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'open_asset': fields.boolean('Skip Draft State', help="Check this if you want to automatically confirm the assets of this category when created by invoices."),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.category', context=context),
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
}
def onchange_account_asset(self, cr, uid, ids, account_asset_id, context=None):
res = {'value':{}}
if account_asset_id:
res['value'] = {'account_depreciation_id': account_asset_id}
return res
account_asset_category()
class account_asset_asset(osv.osv):
_name = 'account.asset.asset'
_description = 'Asset'
def unlink(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.account_move_line_ids:
raise osv.except_osv(_('Error!'), _('You cannot delete an asset that contains posted depreciation lines.'))
return super(account_asset_asset, self).unlink(cr, uid, ids, context=context)
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {}, account_period_prefer_normal=True)
periods = self.pool.get('account.period').find(cr, uid, context=ctx)
if periods:
return periods[0]
else:
return False
def _get_last_depreciation_date(self, cr, uid, ids, context=None):
"""
@param id: ids of a account.asset.asset objects
@return: Returns a dictionary of the effective dates of the last depreciation entry made for given asset ids. If there isn't any, return the purchase date of this asset
"""
cr.execute("""
SELECT a.id as id, COALESCE(MAX(l.date),a.purchase_date) AS date
FROM account_asset_asset a
LEFT JOIN account_move_line l ON (l.asset_id = a.id)
WHERE a.id IN %s
GROUP BY a.id, a.purchase_date """, (tuple(ids),))
return dict(cr.fetchall())
def _compute_board_amount(self, cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=None):
#by default amount = 0
amount = 0
if i == undone_dotation_number:
amount = residual_amount
else:
if asset.method == 'linear':
amount = amount_to_depr / (undone_dotation_number - len(posted_depreciation_line_ids))
if asset.prorata:
amount = amount_to_depr / asset.method_number
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (amount_to_depr / asset.method_number) / total_days * days
elif i == undone_dotation_number:
amount = (amount_to_depr / asset.method_number) / total_days * (total_days - days)
elif asset.method == 'degressive':
amount = residual_amount * asset.method_progress_factor
if asset.prorata:
days = total_days - float(depreciation_date.strftime('%j'))
if i == 1:
amount = (residual_amount * asset.method_progress_factor) / total_days * days
elif i == undone_dotation_number:
amount = (residual_amount * asset.method_progress_factor) / total_days * (total_days - days)
return amount
def _compute_board_undone_dotation_nb(self, cr, uid, asset, depreciation_date, total_days, context=None):
undone_dotation_number = asset.method_number
if asset.method_time == 'end':
end_date = datetime.strptime(asset.method_end, '%Y-%m-%d')
undone_dotation_number = 0
while depreciation_date <= end_date:
depreciation_date = (datetime(depreciation_date.year, depreciation_date.month, depreciation_date.day) + relativedelta(months=+asset.method_period))
undone_dotation_number += 1
if asset.prorata:
undone_dotation_number += 1
return undone_dotation_number
def compute_depreciation_board(self, cr, uid, ids, context=None):
depreciation_lin_obj = self.pool.get('account.asset.depreciation.line')
currency_obj = self.pool.get('res.currency')
for asset in self.browse(cr, uid, ids, context=context):
if asset.value_residual == 0.0:
continue
posted_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_check', '=', True)],order='depreciation_date desc')
old_depreciation_line_ids = depreciation_lin_obj.search(cr, uid, [('asset_id', '=', asset.id), ('move_id', '=', False)])
if old_depreciation_line_ids:
depreciation_lin_obj.unlink(cr, uid, old_depreciation_line_ids, context=context)
amount_to_depr = residual_amount = asset.value_residual
if asset.prorata:
depreciation_date = datetime.strptime(self._get_last_depreciation_date(cr, uid, [asset.id], context)[asset.id], '%Y-%m-%d')
else:
# depreciation_date = 1st January of purchase year
purchase_date = datetime.strptime(asset.purchase_date, '%Y-%m-%d')
#if we already have some previous validated entries, starting date isn't 1st January but last entry + method period
if (len(posted_depreciation_line_ids)>0):
last_depreciation_date = datetime.strptime(depreciation_lin_obj.browse(cr,uid,posted_depreciation_line_ids[0],context=context).depreciation_date, '%Y-%m-%d')
depreciation_date = (last_depreciation_date+relativedelta(months=+asset.method_period))
else:
depreciation_date = datetime(purchase_date.year, 1, 1)
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
total_days = (year % 4) and 365 or 366
undone_dotation_number = self._compute_board_undone_dotation_nb(cr, uid, asset, depreciation_date, total_days, context=context)
for x in range(len(posted_depreciation_line_ids), undone_dotation_number):
i = x + 1
amount = self._compute_board_amount(cr, uid, asset, i, residual_amount, amount_to_depr, undone_dotation_number, posted_depreciation_line_ids, total_days, depreciation_date, context=context)
company_currency = asset.company_id.currency_id.id
current_currency = asset.currency_id.id
# compute amount into company currency
amount = currency_obj.compute(cr, uid, current_currency, company_currency, amount, context=context)
residual_amount -= amount
vals = {
'amount': amount,
'asset_id': asset.id,
'sequence': i,
'name': str(asset.id) +'/' + str(i),
'remaining_value': residual_amount,
'depreciated_value': (asset.purchase_value - asset.salvage_value) - (residual_amount + amount),
'depreciation_date': depreciation_date.strftime('%Y-%m-%d'),
}
depreciation_lin_obj.create(cr, uid, vals, context=context)
# Considering Depr. Period as months
depreciation_date = (datetime(year, month, day) + relativedelta(months=+asset.method_period))
day = depreciation_date.day
month = depreciation_date.month
year = depreciation_date.year
return True
def validate(self, cr, uid, ids, context=None):
if context is None:
context = {}
return self.write(cr, uid, ids, {
'state':'open'
}, context)
def set_to_close(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'close'}, context=context)
def set_to_draft(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def _amount_residual(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
l.asset_id as id, SUM(abs(l.debit-l.credit)) AS amount
FROM
account_move_line l
WHERE
l.asset_id IN %s GROUP BY l.asset_id """, (tuple(ids),))
res=dict(cr.fetchall())
for asset in self.browse(cr, uid, ids, context):
res[asset.id] = asset.purchase_value - res.get(asset.id, 0.0) - asset.salvage_value
for id in ids:
res.setdefault(id, 0.0)
return res
def onchange_company_id(self, cr, uid, ids, company_id=False, context=None):
val = {}
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
if company.currency_id.company_id and company.currency_id.company_id.id != company_id:
val['currency_id'] = False
else:
val['currency_id'] = company.currency_id.id
return {'value': val}
def onchange_purchase_salvage_value(self, cr, uid, ids, purchase_value, salvage_value, context=None):
val = {}
for asset in self.browse(cr, uid, ids, context=context):
if purchase_value:
val['value_residual'] = purchase_value - salvage_value
if salvage_value:
val['value_residual'] = purchase_value - salvage_value
return {'value': val}
_columns = {
'account_move_line_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.char('Asset Name', size=64, required=True, readonly=True, states={'draft':[('readonly',False)]}),
'code': fields.char('Reference', size=32, readonly=True, states={'draft':[('readonly',False)]}),
'purchase_value': fields.float('Gross Value', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'currency_id': fields.many2one('res.currency','Currency',required=True, readonly=True, states={'draft':[('readonly',False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'note': fields.text('Note'),
'category_id': fields.many2one('account.asset.category', 'Asset Category', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'parent_id': fields.many2one('account.asset.asset', 'Parent Asset', readonly=True, states={'draft':[('readonly',False)]}),
'child_ids': fields.one2many('account.asset.asset', 'parent_id', 'Children Assets'),
'purchase_date': fields.date('Purchase Date', required=True, readonly=True, states={'draft':[('readonly',False)]}),
'state': fields.selection([('draft','Draft'),('open','Running'),('close','Close')], 'Status', required=True,
help="When an asset is created, the status is 'Draft'.\n" \
"If the asset is confirmed, the status goes in 'Running' and the depreciation lines can be posted in the accounting.\n" \
"You can manually close an asset when the depreciation is over. If the last line of depreciation is posted, the asset automatically goes in that status."),
'active': fields.boolean('Active'),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True, states={'draft':[('readonly',False)]}),
'method': fields.selection([('linear','Linear'),('degressive','Degressive')], 'Computation Method', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="Choose the method to use to compute the amount of depreciation lines.\n"\
" * Linear: Calculated on basis of: Gross Value / Number of Depreciations\n" \
" * Degressive: Calculated on basis of: Residual Value * Degressive Factor"),
'method_number': fields.integer('Number of Depreciations', readonly=True, states={'draft':[('readonly',False)]}, help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Number of Months in a Period', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The amount of time between two depreciations, in months"),
'method_end': fields.date('Ending Date', readonly=True, states={'draft':[('readonly',False)]}),
'method_progress_factor': fields.float('Degressive Factor', readonly=True, states={'draft':[('readonly',False)]}),
'value_residual': fields.function(_amount_residual, method=True, digits_compute=dp.get_precision('Account'), string='Residual Value'),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True, readonly=True, states={'draft':[('readonly',False)]},
help="Choose the method to use to compute the dates and number of depreciation lines.\n"\
" * Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
" * Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'prorata':fields.boolean('Prorata Temporis', readonly=True, states={'draft':[('readonly',False)]}, help='Indicates that the first depreciation entry for this asset have to be done from the purchase date instead of the first January'),
'history_ids': fields.one2many('account.asset.history', 'asset_id', 'History', readonly=True),
'depreciation_line_ids': fields.one2many('account.asset.depreciation.line', 'asset_id', 'Depreciation Lines', readonly=True, states={'draft':[('readonly',False)],'open':[('readonly',False)]}),
'salvage_value': fields.float('Salvage Value', digits_compute=dp.get_precision('Account'), help="It is the amount you plan to have that you cannot depreciate.", readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'code': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.asset.code'),
'purchase_date': lambda obj, cr, uid, context: time.strftime('%Y-%m-%d'),
'active': True,
'state': 'draft',
'method': 'linear',
'method_number': 5,
'method_time': 'number',
'method_period': 12,
'method_progress_factor': 0.3,
'currency_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.currency_id.id,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'account.asset.asset',context=context),
}
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_asset_asset, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
def _check_prorata(self, cr, uid, ids, context=None):
for asset in self.browse(cr, uid, ids, context=context):
if asset.prorata and asset.method_time != 'number':
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive assets.', ['parent_id']),
(_check_prorata, 'Prorata temporis can be applied only for time method "number of depreciations".', ['prorata']),
]
def onchange_category_id(self, cr, uid, ids, category_id, context=None):
res = {'value':{}}
asset_categ_obj = self.pool.get('account.asset.category')
if category_id:
category_obj = asset_categ_obj.browse(cr, uid, category_id, context=context)
res['value'] = {
'method': category_obj.method,
'method_number': category_obj.method_number,
'method_time': category_obj.method_time,
'method_period': category_obj.method_period,
'method_progress_factor': category_obj.method_progress_factor,
'method_end': category_obj.method_end,
'prorata': category_obj.prorata,
}
return res
def onchange_method_time(self, cr, uid, ids, method_time='number', context=None):
res = {'value': {}}
if method_time != 'number':
res['value'] = {'prorata': False}
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if context is None:
context = {}
default.update({'depreciation_line_ids': [], 'state': 'draft'})
return super(account_asset_asset, self).copy(cr, uid, id, default, context=context)
def _compute_entries(self, cr, uid, ids, period_id, context=None):
result = []
period_obj = self.pool.get('account.period')
depreciation_obj = self.pool.get('account.asset.depreciation.line')
period = period_obj.browse(cr, uid, period_id, context=context)
depreciation_ids = depreciation_obj.search(cr, uid, [('asset_id', 'in', ids), ('depreciation_date', '<=', period.date_stop), ('depreciation_date', '>=', period.date_start), ('move_check', '=', False)], context=context)
if context is None:
context = {}
context.update({'depreciation_date':period.date_stop})
return depreciation_obj.create_move(cr, uid, depreciation_ids, context=context)
def create(self, cr, uid, vals, context=None):
asset_id = super(account_asset_asset, self).create(cr, uid, vals, context=context)
self.compute_depreciation_board(cr, uid, [asset_id], context=context)
return asset_id
def open_entries(self, cr, uid, ids, context=None):
if context is None:
context = {}
context.update({'search_default_asset_id': ids, 'default_asset_id': ids})
return {
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'context': context,
}
account_asset_asset()
class account_asset_depreciation_line(osv.osv):
_name = 'account.asset.depreciation.line'
_description = 'Asset depreciation line'
def _get_move_check(self, cr, uid, ids, name, args, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = bool(line.move_id)
return res
_columns = {
'name': fields.char('Depreciation Name', size=64, required=True, select=1),
'sequence': fields.integer('Sequence', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True, ondelete='cascade'),
'parent_state': fields.related('asset_id', 'state', type='char', string='State of Asset'),
'amount': fields.float('Current Depreciation', digits_compute=dp.get_precision('Account'), required=True),
'remaining_value': fields.float('Next Period Depreciation', digits_compute=dp.get_precision('Account'),required=True),
'depreciated_value': fields.float('Amount Already Depreciated', required=True),
'depreciation_date': fields.date('Depreciation Date', select=1),
'move_id': fields.many2one('account.move', 'Depreciation Entry'),
'move_check': fields.function(_get_move_check, method=True, type='boolean', string='Posted', store=True)
}
def create_move(self, cr, uid, ids, context=None):
can_close = False
if context is None:
context = {}
asset_obj = self.pool.get('account.asset.asset')
period_obj = self.pool.get('account.period')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
created_move_ids = []
asset_ids = []
for line in self.browse(cr, uid, ids, context=context):
depreciation_date = context.get('depreciation_date') or time.strftime('%Y-%m-%d')
ctx = dict(context, account_period_prefer_normal=True)
period_ids = period_obj.find(cr, uid, depreciation_date, context=ctx)
company_currency = line.asset_id.company_id.currency_id.id
current_currency = line.asset_id.currency_id.id
context.update({'date': depreciation_date})
amount = currency_obj.compute(cr, uid, current_currency, company_currency, line.amount, context=context)
sign = (line.asset_id.category_id.journal_id.type == 'purchase' and 1) or -1
asset_name = line.asset_id.name
reference = line.name
move_vals = {
'name': asset_name,
'date': depreciation_date,
'ref': reference,
'period_id': period_ids and period_ids[0] or False,
'journal_id': line.asset_id.category_id.journal_id.id,
}
move_id = move_obj.create(cr, uid, move_vals, context=context)
journal_id = line.asset_id.category_id.journal_id.id
partner_id = line.asset_id.partner_id.id
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_depreciation_id.id,
'debit': 0.0,
'credit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and - sign * line.amount or 0.0,
'date': depreciation_date,
})
move_line_obj.create(cr, uid, {
'name': asset_name,
'ref': reference,
'move_id': move_id,
'account_id': line.asset_id.category_id.account_expense_depreciation_id.id,
'credit': 0.0,
'debit': amount,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id,
'partner_id': partner_id,
'currency_id': company_currency != current_currency and current_currency or False,
'amount_currency': company_currency != current_currency and sign * line.amount or 0.0,
'analytic_account_id': line.asset_id.category_id.account_analytic_id.id,
'date': depreciation_date,
'asset_id': line.asset_id.id
})
self.write(cr, uid, line.id, {'move_id': move_id}, context=context)
created_move_ids.append(move_id)
asset_ids.append(line.asset_id.id)
# we re-evaluate the assets to determine whether we can close them
for asset in asset_obj.browse(cr, uid, list(set(asset_ids)), context=context):
if currency_obj.is_zero(cr, uid, asset.currency_id, asset.value_residual):
asset.write({'state': 'close'})
return created_move_ids
account_asset_depreciation_line()
class account_move_line(osv.osv):
_inherit = 'account.move.line'
_columns = {
'asset_id': fields.many2one('account.asset.asset', 'Asset', ondelete="restrict"),
'entry_ids': fields.one2many('account.move.line', 'asset_id', 'Entries', readonly=True, states={'draft':[('readonly',False)]}),
}
account_move_line()
class account_asset_history(osv.osv):
_name = 'account.asset.history'
_description = 'Asset history'
_columns = {
'name': fields.char('History name', size=64, select=1),
'user_id': fields.many2one('res.users', 'User', required=True),
'date': fields.date('Date', required=True),
'asset_id': fields.many2one('account.asset.asset', 'Asset', required=True),
'method_time': fields.selection([('number','Number of Depreciations'),('end','Ending Date')], 'Time Method', required=True,
help="The method to use to compute the dates and number of depreciation lines.\n"\
"Number of Depreciations: Fix the number of depreciation lines and the time between 2 depreciations.\n" \
"Ending Date: Choose the time between 2 depreciations and the date the depreciations won't go beyond."),
'method_number': fields.integer('Number of Depreciations', help="The number of depreciations needed to depreciate your asset"),
'method_period': fields.integer('Period Length', help="Time in month between two depreciations"),
'method_end': fields.date('Ending date'),
'note': fields.text('Note'),
}
_order = 'date desc'
_defaults = {
'date': lambda *args: time.strftime('%Y-%m-%d'),
'user_id': lambda self, cr, uid, ctx: uid
}
account_asset_history()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bakkou-badri/dataminingproject
|
env/lib/python2.7/site-packages/werkzeug/testsuite/exceptions.py
|
100
|
3325
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The tests for the exception classes.
TODO:
- This is undertested. HTML is never checked
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug import exceptions
from werkzeug.wrappers import Response
from werkzeug._compat import text_type
class ExceptionsTestCase(WerkzeugTestCase):
def test_proxy_exception(self):
orig_resp = Response('Hello World')
try:
exceptions.abort(orig_resp)
except exceptions.HTTPException as e:
resp = e.get_response({})
else:
self.fail('exception not raised')
self.assert_true(resp is orig_resp)
self.assert_equal(resp.get_data(), b'Hello World')
def test_aborter(self):
abort = exceptions.abort
self.assert_raises(exceptions.BadRequest, abort, 400)
self.assert_raises(exceptions.Unauthorized, abort, 401)
self.assert_raises(exceptions.Forbidden, abort, 403)
self.assert_raises(exceptions.NotFound, abort, 404)
self.assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
self.assert_raises(exceptions.NotAcceptable, abort, 406)
self.assert_raises(exceptions.RequestTimeout, abort, 408)
self.assert_raises(exceptions.Gone, abort, 410)
self.assert_raises(exceptions.LengthRequired, abort, 411)
self.assert_raises(exceptions.PreconditionFailed, abort, 412)
self.assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
self.assert_raises(exceptions.RequestURITooLarge, abort, 414)
self.assert_raises(exceptions.UnsupportedMediaType, abort, 415)
self.assert_raises(exceptions.UnprocessableEntity, abort, 422)
self.assert_raises(exceptions.InternalServerError, abort, 500)
self.assert_raises(exceptions.NotImplemented, abort, 501)
self.assert_raises(exceptions.BadGateway, abort, 502)
self.assert_raises(exceptions.ServiceUnavailable, abort, 503)
myabort = exceptions.Aborter({1: exceptions.NotFound})
self.assert_raises(LookupError, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
myabort = exceptions.Aborter(extra={1: exceptions.NotFound})
self.assert_raises(exceptions.NotFound, myabort, 404)
self.assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr(self):
exc = exceptions.NotFound()
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
exc = exceptions.NotFound('Not There')
self.assert_equal(text_type(exc), '404: Not Found')
self.assert_equal(repr(exc), "<NotFound '404: Not Found'>")
def test_special_exceptions(self):
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
self.assert_equal(h['Allow'], 'GET, HEAD, POST')
self.assert_true('The method is not allowed' in exc.get_description())
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ExceptionsTestCase))
return suite
|
gpl-2.0
|
LuminateWireless/bazel
|
third_party/py/concurrent/futures/_compat.py
|
179
|
4645
|
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
|
apache-2.0
|
wackymaster/QTClock
|
Libraries/numpy/distutils/tests/test_fcompiler_intel.py
|
146
|
1224
|
from __future__ import division, absolute_import, print_function
import numpy.distutils.fcompiler
from numpy.testing import TestCase, run_module_suite, assert_
intel_32bit_version_strings = [
("Intel(R) Fortran Intel(R) 32-bit Compiler Professional for applications"
"running on Intel(R) 32, Version 11.1", '11.1'),
]
intel_64bit_version_strings = [
("Intel(R) Fortran IA-64 Compiler Professional for applications"
"running on IA-64, Version 11.0", '11.0'),
("Intel(R) Fortran Intel(R) 64 Compiler Professional for applications"
"running on Intel(R) 64, Version 11.1", '11.1')
]
class TestIntelFCompilerVersions(TestCase):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
class TestIntelEM64TFCompilerVersions(TestCase):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
v = fc.version_match(vs)
assert_(v == version)
if __name__ == '__main__':
run_module_suite()
|
mit
|
seaotterman/tensorflow
|
tensorflow/python/kernel_tests/diag_op_test.py
|
56
|
19710
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class MatrixDiagTest(test.TestCase):
_use_gpu = False
def testVector(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
def testBatchVector(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_diag(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 1-dim"):
array_ops.matrix_diag(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3,), (7, 4))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class MatrixDiagGpuTest(MatrixDiagTest):
_use_gpu = True
class MatrixSetDiagTest(test.TestCase):
_use_gpu = False
def testSquare(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.array([[0.0, 1.0, 0.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 1.0]])
mat_set_diag = np.array([[1.0, 1.0, 0.0],
[1.0, 2.0, 1.0],
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, output.eval())
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]])
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, output.eval())
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, output.eval())
def testSquareBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[-1.0, -2.0, -3.0],
[-4.0, -5.0, -6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 3.0],
[0.0, 2.0, 0.0],
[1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0],
[0.0, 5.0, 0.0],
[2.0, 0.0, 6.0]]])
mat_set_diag_batch = np.array(
[[[-1.0, 0.0, 3.0],
[0.0, -2.0, 0.0],
[1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0],
[2.0, 0.0, -6.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
def testRectangularBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[-1.0, -2.0],
[-4.0, -5.0]])
mat_batch = np.array(
[[[1.0, 0.0, 3.0],
[0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0],
[0.0, 5.0, 0.0]]])
mat_set_diag_batch = np.array(
[[[-1.0, 0.0, 3.0],
[0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0],
[0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_set_diag(0, [0])
with self.assertRaisesRegexp(ValueError, "must be at least rank 1"):
array_ops.matrix_set_diag([[0]], 0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_set_diag(v, [v]).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError(
r"but received input shape: \[1,1\] and diagonal shape: \[\]"):
array_ops.matrix_set_diag([[v]], v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3, 4, 4), (3, 3, 4), (3, 4, 3), (7, 4, 8, 8))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = constant_op.constant(
np.random.rand(*shape), dtype=dtypes_lib.float32)
diag_shape = shape[:-2] + (min(shape[-2:]),)
x_diag = constant_op.constant(
np.random.rand(*diag_shape), dtype=dtypes_lib.float32)
y = array_ops.matrix_set_diag(x, x_diag)
error_x = gradient_checker.compute_gradient_error(
x, x.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error_x, 1e-4)
error_x_diag = gradient_checker.compute_gradient_error(
x_diag, x_diag.get_shape().as_list(), y, y.get_shape().as_list())
self.assertLess(error_x_diag, 1e-4)
def testGradWithNoShapeInformation(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
v = array_ops.placeholder(dtype=dtypes_lib.float32)
mat = array_ops.placeholder(dtype=dtypes_lib.float32)
grad_input = array_ops.placeholder(dtype=dtypes_lib.float32)
output = array_ops.matrix_set_diag(mat, v)
grads = gradients_impl.gradients(output, [mat, v], grad_ys=grad_input)
grad_input_val = np.random.rand(3, 3).astype(np.float32)
grad_vals = sess.run(grads,
feed_dict={
v: 2 * np.ones(3),
mat: np.ones((3, 3)),
grad_input: grad_input_val
})
self.assertAllEqual(np.diag(grad_input_val), grad_vals[1])
self.assertAllEqual(grad_input_val - np.diag(np.diag(grad_input_val)),
grad_vals[0])
class MatrixSetDiagGpuTest(MatrixSetDiagTest):
_use_gpu = True
class MatrixDiagPartTest(test.TestCase):
_use_gpu = False
def testSquare(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
def testSquareBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0],
[0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]])
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
def testRectangularBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0],
[4.0, 5.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
array_ops.matrix_diag_part(0)
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
y = array_ops.matrix_diag_part(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
class MatrixDiagPartGpuTest(MatrixDiagPartTest):
_use_gpu = True
class DiagTest(test.TestCase):
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
def testEmptyTensor(self):
x = np.array([])
expected_ans = np.empty([0, 0])
self.diagOp(x, np.int32, expected_ans)
def testRankOneIntTensor(self):
x = np.array([1, 2, 3])
expected_ans = np.array(
[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankOneFloatTensor(self):
x = np.array([1.1, 2.2, 3.3])
expected_ans = np.array(
[[1.1, 0, 0],
[0, 2.2, 0],
[0, 0, 3.3]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankOneComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j], dtype=dtype)
expected_ans = np.array(
[[1.1 + 1.1j, 0 + 0j, 0 + 0j],
[0 + 0j, 2.2 + 2.2j, 0 + 0j],
[0 + 0j, 0 + 0j, 3.3 + 3.3j]], dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankTwoIntTensor(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
expected_ans = np.array(
[[[[1, 0, 0], [0, 0, 0]],
[[0, 2, 0], [0, 0, 0]],
[[0, 0, 3], [0, 0, 0]]],
[[[0, 0, 0], [4, 0, 0]],
[[0, 0, 0], [0, 5, 0]],
[[0, 0, 0], [0, 0, 6]]]])
self.diagOp(x, np.int32, expected_ans)
self.diagOp(x, np.int64, expected_ans)
def testRankTwoFloatTensor(self):
x = np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]])
expected_ans = np.array(
[[[[1.1, 0, 0], [0, 0, 0]],
[[0, 2.2, 0], [0, 0, 0]],
[[0, 0, 3.3], [0, 0, 0]]],
[[[0, 0, 0], [4.4, 0, 0]],
[[0, 0, 0], [0, 5.5, 0]],
[[0, 0, 0], [0, 0, 6.6]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankTwoComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([[1.1 + 1.1j, 2.2 + 2.2j, 3.3 + 3.3j],
[4.4 + 4.4j, 5.5 + 5.5j, 6.6 + 6.6j]], dtype=dtype)
expected_ans = np.array(
[[[[1.1 + 1.1j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 2.2 + 2.2j, 0 + 0j], [0 + 0j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 3.3 + 3.3j], [0 + 0j, 0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j, 0 + 0j], [4.4 + 4.4j, 0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 5.5 + 5.5j, 0 + 0j]],
[[0 + 0j, 0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j, 6.6 + 6.6j]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
def testRankThreeFloatTensor(self):
x = np.array([[[1.1, 2.2], [3.3, 4.4]],
[[5.5, 6.6], [7.7, 8.8]]])
expected_ans = np.array(
[[[[[[1.1, 0], [0, 0]], [[0, 0], [0, 0]]],
[[[0, 2.2], [0, 0]], [[0, 0], [0, 0]]]],
[[[[0, 0], [3.3, 0]], [[0, 0], [0, 0]]],
[[[0, 0], [0, 4.4]], [[0, 0], [0, 0]]]]],
[[[[[0, 0], [0, 0]], [[5.5, 0], [0, 0]]],
[[[0, 0], [0, 0]], [[0, 6.6], [0, 0]]]],
[[[[0, 0], [0, 0]], [[0, 0], [7.7, 0]]],
[[[0, 0], [0, 0]], [[0, 0], [0, 8.8]]]]]])
self.diagOp(x, np.float32, expected_ans)
self.diagOp(x, np.float64, expected_ans)
def testRankThreeComplexTensor(self):
for dtype in [np.complex64, np.complex128]:
x = np.array([[[1.1 + 1.1j, 2.2 + 2.2j], [3.3 + 3.3j, 4.4 + 4.4j]],
[[5.5 + 5.5j, 6.6 + 6.6j], [7.7 + 7.7j, 8.8 + 8.8j]]],
dtype=dtype)
expected_ans = np.array(
[[[[[[1.1 + 1.1j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 2.2 + 2.2j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]],
[[[[0 + 0j, 0 + 0j], [3.3 + 3.3j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 4.4 + 4.4j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]]]]],
[[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[5.5 + 5.5j, 0 + 0j], [0 + 0j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 6.6 + 6.6j], [0 + 0j, 0 + 0j]]]],
[[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [7.7 + 7.7j, 0 + 0j]]],
[[[0 + 0j, 0 + 0j], [0 + 0j, 0 + 0j]],
[[0 + 0j, 0 + 0j], [0 + 0j, 8.8 + 8.8j]]]]]],
dtype=dtype)
self.diagOp(x, dtype, expected_ans)
class DiagPartOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = tf_ans_inv.eval()
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.test_session(use_gpu=False):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = tf_ans.eval()
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
class DiagGradOpTest(test.TestCase):
def testDiagGrad(self):
np.random.seed(0)
shapes = ((3,), (3, 3), (3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag(x1)
error = gradient_checker.compute_gradient_error(
x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
class DiagGradPartOpTest(test.TestCase):
def testDiagPartGrad(self):
np.random.seed(0)
shapes = ((3, 3), (3, 3, 3, 3))
dtypes = (dtypes_lib.float32, dtypes_lib.float64)
with self.test_session(use_gpu=False):
errors = []
for shape in shapes:
for dtype in dtypes:
x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
y = array_ops.diag_part(x1)
error = gradient_checker.compute_gradient_error(
x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
tf_logging.info("error = %f", error)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
markdoerr/testing
|
lara/generators/momentum_code_generator.py
|
1
|
42959
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# PROJECT: LARA
# CLASS: momentum_code_generator
# FILENAME: momentum_code_generator.py
#
# CATEGORY:
#
# AUTHOR: mark doerr
# EMAIL: [email protected]
#
# VERSION: 0.2.4
#
# CREATION_DATE: 2013/05/14
# LASTMODIFICATION_DATE: 2014/11/21
#
# BRIEF_DESCRIPTION: Code Generator for Thermo Scientific Momentum
# DETAILED_DESCRIPTION:
#
# ____________________________________________________________________________
#
# Copyright:
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
# INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE.
#
# For further Information see COPYING file that comes with this distribution.
#_______________________________________________________________________________
import logging
import datetime
import singledispatch # will be included in python 3.4 functools library; for python < 3.3 plese use pip install singledispatch
import lara_codegenerator as lcg
from .. import lara_process
from .. import lara_material as lam
#import lara_experiment
class LA_GenerateMomentumCode(lcg.LA_CodeGenerator):
"""Code generator class for generating Thermo Fisher Momentum (Ver. 3.2.2) code using the visitor pattern"""
def __init__(self, experiment, automation_sys, container_db, container_location_db ):
super(LA_GenerateMomentumCode, self).__init__(experiment)
self.initDefDispatcher()
self.experiment = experiment
self.automation_sys = automation_sys
self.container_db = container_db
self.container_location_db = container_location_db
self.lockset_global = set()
self.lockset_local = set()
self.proc_var_dict = {}
self.loop_counter = 1
self.curr_loop_lock_var = ""
self.incubation_counter = 1
self.centrifugation_counter = 1
self.primeVolume_counter = 1
self.dispenseVolume_counter = 1
self.tool_container = False
self.current_user = "MDO"
self.momentum_loop_containers = ""
self.momentum_tool_containers = ""
self.momentum_profileHead = ""
self.momentum_profileDevices = ""
self.momentum_profileVariables = ""
self.momentum_processHead = ""
self.momentum_processContainers = ""
self.momentum_process_variables = ""
self.momentum_process = ""
self.momentum_experiment_head = ""
self.momentum_experiment_start_end = ""
self.momentum_nest_restrictions = ""
self.momentum_experiment_tail = ""
# experiment should be generated before process (with original nest locations)
self.experiment_head()
self.experiment_nest_restrictions()
self.experiment_start_end()
self.experimentTail()
self.profileHead()
self.profileDevices()
logging.debug("mcg: ps devs" )
self.processHead()
logging.debug("mcg: outputfile is %s, now traversing ...", self.experiment.name() )
# core process generation, traversing trough subprocess list
self.traverse()
self.process_variables()
logging.debug("mcg: ps var" )
self.processContainers()
logging.debug("mcg: ps container" )
self.profileVariables() # needs to be processed after process !!
logging.debug("mcg: prof var" )
self.writeProcess()
self.writeExperiment()
logging.debug("mcg: ps write" )
def initDefDispatcher(self):
self.generate = singledispatch.singledispatch(self.generate)
self.generate.register(lara_process.LogEntry,self.genLogEntry)
self.generate.register(lara_process.BeginSubProcess,self.switchDispatcher)
self.generate.register(lara_process.BeginProcess,self.genBeginProcess)
self.generate.register(lara_process.BeginLockedTask,self.genBeginLockedTask)
self.generate.register(lara_process.EndLockedTask,self.genEndLockedTask)
self.generate.register(lara_process.BeginLoop,self.genBeginLoop)
self.generate.register(lara_process.EndLoop,self.genEndLoop)
self.generate.register(lara_process.BeginParallel,self.genBeginParallel)
self.generate.register(lara_process.EndParallel,self.genEndParallel)
self.generate.register(lara_process.BeginThread,self.genBeginThread)
self.generate.register(lara_process.EndThread,self.genEndThread)
self.generate.register(lara_process.Delay,self.genDelay)
self.generate.register(lara_process.ReadBarcode,self.genReadBarcode)
self.generate.register(lara_process.MovePlate,self.genLoadDevice)
self.generate.register(lara_process.AutoLoad,self.genAutoLoadDevice)
self.generate.register(lara_process.Dispense,self.genDispenseCombi)
self.generate.register(lara_process.Shake,self.genShake)
self.generate.register(lara_process.Incubate,self.genIncubate)
self.generate.register(lara_process.RunExternalProtocol,self.genRunProtocol)
self.generate.register(lara_process.Centrifuge,self.genCentrifuge)
self.generate.register(lara_process.EndProcess,self.genEndProcess)
def generate(self,item):
logging.debug("mcg: generic item")
#raise TypeError("Type not supported.")
def switchDispatcher(self, subprocess):
logging.info("This could be implemented as a stack")
if subprocess.interpreter() == lara_process.LA_ProcessElement.momentum32 :
logging.debug("switching to momentum interpreter")
self.genBeginSubProcess(subprocess)
else :
logging.debug("switching off momentum interpreter")
#self.generate.register(lara_process.BeginSubProcess,self.switchDispatcher)
def insertLogEntry(self, step, log_entry=""):
self.lockset_global.add("logLock")
logfile_entry = """\t\tcomment ('"{log_entry}({step_name})::{description}"');
\t\tacquire ({lock_name}) ;\n\n
\t\tset log_entry= '"\\\\""+Experiment + "_" + uid + "\\\\";\\\\"" + Format(Now, "yyMMdd_HHmmss") +"\\\\";\\\\"" + curr_user + "\\\\";\\\\"{step_name}{log_entry}{description} "';\n
\t\tFile_Mgr [Write File]
\t\t\t(InputVariable = 'log_entry', FileType = 'Text File',
\t\t\tOutputFileRaw = 'status_filename', Header = 'No', FileWriteMode = 'Append',
\t\t\tDisplayErrors = 'Yes', WaitDuration = '00:00:01', Duration = '00:00:01',
\t\t\tComments = '', Enabled = 'Yes') ;\n
\t\trelease ({lock_name});\n\n""".format(step_name=step.name(), log_entry=log_entry, description=step.description(), lock_name="logLock")
return(logfile_entry)
def genLogEntry(self, process_step):
self.momentum_process += self.insertLogEntry(process_step)
def genBeginProcess(self, begin_process):
self.lockset_global.add("logLock")
logging.debug("This is just the beginning")
#~ set log_filename = '"C:\\\\Users\\\\Thermo\\\\Documents\\\\MDO\\\\momentum_logfiles\\\\" + Format(Now, "yyMMdd") + Experiment +"_log"+ "\\\\"+Format(Now, "yyMMdd_HHmm")+"_log.txt"' ;
self.momentum_process += """\n\t\t// setting up unique process identifier\n
\t\tacquire ({lock_name}) ;\n\n
\t\tset uid = '""+Format(Now, "yyMMddHHmmss")' ;
\t\tset log_filename = '"C:\\\\\\\\Users\\\\\\\\Thermo\\\\\\\\Documents\\\\\\\\MDO\\\\\\\\momentum_logfiles\\\\\\\\" + Format(Now, "yyMMdd") + Experiment +"_log"+ "\\\\\\\\"+Format(Now, "yyMMdd_HHmm")+"_log.txt"' ;
\t\t// set status_filename = '"D:\\\\robot_data\\\\momentum_status\\\\current_momentum_status.txt"' ;
\t\tset status_filename = '"C:\\\\\\\\Users\\\\\\\\Public\\\\\\\\Documents\\\\\\\\MDO\\\\\\\\current_momentum_status.txt"' ;\n
\t\tset log_entry= '"\\\\""+Experiment + "_" + uid + "\\\\";\\\\"" + Format(Now, "yyMMdd_HHmmss") + "\\\\";\\\\"" + curr_user + "\\\\";\\\\"" +"=== Started at: " + Format(Now, "yy/MM/dd HH:mm:ss") + "===" + "|Profile:" + Profile';\n
\t\t//File_Mgr [Write File]
\t\t\t//(InputVariable = 'log_entry', FileType = 'Text File',
\t\t\t//OutputFileRaw = 'log_filename', Header = 'Yes', FileWriteMode = 'Append',
\t\t\t//DisplayErrors = 'Yes', WaitDuration = '00:00:01', Duration = '00:00:01',
\t\t\t//Comments = '', Enabled = 'Yes') ;\n
\t\t//File_Mgr [Write File]
\t\t\t//(InputVariable = 'log_entry', FileType = 'Text File',
\t\t\t//OutputFileRaw = 'status_filename', Header = 'No', FileWriteMode = 'Append',
\t\t\t//DisplayErrors = 'Yes', WaitDuration = '00:00:01', Duration = '00:00:01',
\t\t\t//Comments = '', Enabled = 'Yes') ;\n\n
\t\trelease ({lock_name}) ;\n\n""".format(lock_name="logLock")
def genEndProcess(self,process_step):
logging.debug("This is THE END")
self.momentum_process += self.insertLogEntry(process_step, "========== FIN =============") +"\t}\n}\n" # could generate trouble with locking of other processes ...
#self.momentum_process += "\t}\n}\n"
def genBeginSubProcess(self, begin_process):
logging.debug("generating begin subprocess - room for logging - name: " )
self.momentum_process += self.insertLogEntry(begin_process, "=== Begin Subprocess ===")
def genEndSubProcess(self,process_step):
logging.debug("End of subprocess - should now switch back to upper subprocess")
self.momentum_process += self.insertLogEntry(begin_process, "--- End Subprocess ---")
def genBeginLockedTask(self,process_step):
lock_name = process_step.name() + "Lock"
if process_step.scope() == "global":
self.lockset_global.add(lock_name)
if process_step.scope() == "local":
self.lockset_local.add(lock_name)
self.momentum_process += """\t\tacquire ({lock_name}) ;\n\n""".format(lock_name=lock_name)
def genEndLockedTask(self,process_step):
lock_name = process_step.name() + "Lock"
self.momentum_process += """\t\trelease ({lock_name}) ;\n\n""".format(lock_name=lock_name)
def genBeginLoop(self,process_step):
self.momentum_loop_containers += """\t\t\tplate counterPlate{loop_counter}
( NumberOfWellRows = '8', NumberOfWellColumns ='12',
WellNumberingMethod = 'Rows', BarCodeRegularExpression = '', BarCodeFile = '',
BarCodeAutoExpression = '"NC" + Format(Now, "yyMMddHHmmss") + "_" + Format(WallClock, "fff")',
GripOffset = 'Identity', GripForce = '0', MoverLiddingGripOffset = '3.0',
Height ='15.0', StackHeight = '13.13', WithLidOffset = '-5', WithLidHeight = '17',
Thickness = '1', SealThickness = '0', SetSize = '{max_iterations}',
Attributes = '' );\n""".format(loop_counter=self.loop_counter, max_iterations=process_step.maxIterations())
max_loop_var = "max_loop%i" %self.loop_counter
self.proc_var_dict[max_loop_var] = [str(process_step.maxIterations()), "Integer"]
self.proc_var_dict["loop_counter"] = ["0", "Integer"]
loop_lock_var = "loopLock%i" % self.loop_counter
self.curr_loop_lock_var = loop_lock_var
self.lockset_local.add(loop_lock_var)
self.momentum_process += """\t\tcomment ('{comment}') ;\n
\t\tforeach counterPlate{loop_counter} (ReverseContainerSet = 'No', DelayBetweenLoops = '00:00:01')\n\t\t{{
\t\t\tcomment ('forcing into a serial process') ; \n
\t\t\tacquire ({loop_lock_var}) ;\n
\t\t\tif ('loop_counter < {max_loop_var}')\n\t\t\t{{\n""".format(loop_counter=self.loop_counter, comment=process_step.description(),
loop_lock_var=loop_lock_var, max_loop_var=max_loop_var)
self.loop_counter += 1
def genEndLoop(self,process_step):
self.momentum_process += """\t\t\t}} //if\n\t\t\telse
{{\n\t\t\tcomment ('skip when max. number of iterations is reached') ;\t\t\t}}\n
set loop_counter = 'loop_counter + 1' ;\n
comment ('one reference to a counterPlate is required by the program (=workaround)') ;\n
virtualStorage [Load]
(Comments = '', Enabled = 'Yes')
counterPlate{loop_counter} 'Unlidded' ;\n
release ({loop_lock_var}) ;\n\t\t}} //end loop/foreach \n\t\tset loop_counter = 'loop_counter = 0';\n\n""".format(loop_counter=self.loop_counter-1,loop_lock_var=self.curr_loop_lock_var)
def genBeginParallel(self,process_step):
self.momentum_process += "\t\t\tparallel\n\t\t\t{\n"
def genEndParallel(self,process_step):
self.momentum_process += "\t\t\t} //end parallel\n"
def genBeginThread(self,process_step):
self.momentum_process += "\t\t\t\tbranch\n\t\t\t\t{\n"
def genEndThread(self,process_step):
self.momentum_process += "\t\t\t\t} //end branch\n"
def genDelay(self,process_step):
self.momentum_process += """\t\t\tdelay (MinDelay = '00:00:00', MaxDelaySpecified = 'Yes', RequestedMaxDelay = '00:00:45',
SpoilIfMaxDelayExceeded = 'False') ;\n"""
def genReadBarcode(self,process_step):
self.momentum_process += "\t\tBCR [Scan Barcode]\n\t\t\t(" \
+ "NoReadWarningWhenUnattended = 'No', OverrideUnattendedMode = 'No', \n\t\t\t" \
+ "RunOnAbortedIteration = 'No', Duration = '00:00:20', " \
+ "Comments = '', Enabled = 'Yes')\n\t\t\t" \
+ process_step.container(0) + " '%s' in 'Nest' ;\n\n" % process_step.lidding()
def genShake(self,process_step):
nest_location = "in 'Nest'"
self.momentum_process += "\t\t" + process_step.device() + " [Shake]\n\t\t\t(" \
+ "ShakeDistance = '2', ShakeFrequency = '13', \n\t\t\t\t" \
+ "RunOnAbortedIteration = 'No', Duration = '00:01:01',\n\t\t\t\t" \
+ "Comments = '')\n\t\t\t" \
+ process_step.container(0) + " '" + process_step.lidding() + "' " + nest_location + ";\n\n"
def genLoadDevice(self,process_step):
single_nest_devices = set(["Omega","Varioskan", "Combi"])
multi_nest_devices = set(["Bravo"])
if process_step.device() in single_nest_devices:
nest_location = "' in 'Nest"
elif process_step.device() in multi_nest_devices:
nest_location = "' in 'Nest " + process_step.platePosition()
else :
nest_location = ""
self.momentum_process += "\t\t" + process_step.device() + " [Load]\n\t\t\t(" \
+ "Comments = '', Enabled = 'Yes')\n\t\t\t" \
+ process_step.container(0) + " '" + process_step.lidding() + nest_location + "' ;" \
+ "\n\n"
def genAutoLoadDevice(self,process_step):
nest_location = "' in 'Nest " + self.container_location_db.autoLoad(process_step.container(0),process_step.device(),1,"ext")
plate_orientation = ""
if process_step.device() == "CombiHotel":
plate_orientation = "_" + process_step.orientation()
elif process_step.device() == "Rotanta":
nest_location = "' in 'Transfer Nest " + self.container_location_db.autoLoad(process_step.container(0),process_step.device(),1,"ext") + " - System Path"
self.momentum_process += "\t\t" + process_step.device() + " [Load]\n\t\t\t" \
+ "(Comments = '', Enabled = 'Yes')\n\t\t\t" \
+ process_step.container(0) + " '"+ process_step.lidding() + nest_location \
+ plate_orientation +"' ;" \
+ "\n\n"
def genDispenseCombi(self,process_step):
logging.warning("!!! adujust dispense hight depending on container type !!!")
medium = "medium%s" % process_step.liquidsChannel()
self.proc_var_dict[medium] = [str(process_step.liquidsChannel()), "String"]
combi_prime_vol = "combi_prime_vol%s" % self.primeVolume_counter
self.proc_var_dict[combi_prime_vol] = [str(process_step.primeVolume()), "Double"]
self.primeVolume_counter += 1
combi_vol = "combi_vol%s" % self.dispenseVolume_counter
self.proc_var_dict[combi_vol] = [str(process_step.volume()), "Double"]
self.dispenseVolume_counter += 1
if process_step.mode() == "Dispense":
self.momentum_process += """\t\tCombi [Dispense]\n\t\t\t(
\t\t\tPlateType = '96 standard (15mm)', DispenseOrder = 'No',
\t\t\tFluid = ${medium}, PrimeVolume = ${combi_prime_vol},
\t\t\tPrimeEnabled = 'Yes', PumpSpeed = '50',
\t\t\tDispenseHeight = '1700', DispenseXOffset = '0',
\t\t\tDispenseYOffset = '0', DefaultToColumn1 = 'Yes',
\t\t\tColumn_1 = ${combi_vol}, Column_2 = ${combi_vol}, Column_3 = ${combi_vol},
\t\t\tColumn_4 = ${combi_vol}, Column_5 = ${combi_vol}, Column_6 = ${combi_vol},
\t\t\tColumn_7 = ${combi_vol}, Column_8 = ${combi_vol}, Column_9 = ${combi_vol},
\t\t\tColumn_10 = ${combi_vol}, Column_11 = ${combi_vol}, Column_12 = ${combi_vol},
\t\t\tRunOnAbortedIteration = 'No', Duration = '00:00:45',
\t\t\tComments = '', Enabled = 'Yes')
\t\t\t{container} 'Unlidded' in 'Nest' ; \n\n\n""".format(medium=medium,combi_prime_vol=combi_prime_vol, combi_vol=combi_vol, container=process_step.container(0))
elif process_step.mode() == "Empty":
self.momentum_process += """\t\tCombi [Empty]
(Volume = '{volume}', Fluid = ${medium}, RunOnAbortedIteration = 'No',
Duration = '00:00:30', Comments = '', Enabled = 'Yes') ;\n""".format(volume=process_step.emptyVolume(), medium=medium )
elif process_step.mode() == "Prime":
self.momentum_process += """\t\tCombi [Prime]
(Volume = '{volume}', Fluid = ${medium}, RunOnAbortedIteration = 'No',
Duration = '00:00:30', Comments = '', Enabled = 'Yes') ;\n""".format(volume=process_step.primeVolume(), medium=medium )
def genIncubate(self,process_step):
curr_time_factor_var = "incubation_duration_factor%i" % self.incubation_counter
self.proc_var_dict["incubation_duration"] = ["00:01:00", "Duration"]
self.proc_var_dict["curr_incubation_duration"] = ["00:01:00", "Duration"]
self.proc_var_dict[curr_time_factor_var] = [str(process_step.incubationDuration()), "Integer"]
self.momentum_process += "\t\tset curr_incubation_duration = 'incubation_duration * " + curr_time_factor_var + "' ;\n"
self.momentum_process += "\t\t" + process_step.device() +" [Incubate]\n\t\t\t(" \
+ "RunOnAbortedIteration = 'No', Duration = $curr_incubation_duration , MinDelay = '00:00:00', MaxDelaySpecified = 'No',"\
+ "Comments = '', Enabled = 'Yes')\n\t\t\t" \
+ process_step.container(0) + " 'Lidded';" \
+ "\n\n"
self.incubation_counter += 1
def genRunProtocol(self,process_step):
if process_step.device() == "Omega":
omega_outputfile_name = "<BARCODE>" #% self.current_user # C:\\\\tmp
self.proc_var_dict["omega_input_path"] = ["B:\\\\Program Files\\\\BMG\\\\Omega\\\\User\\\\Definit", "String"] # C:\\\\tmp
omega_out_path = "\\\\\\\\Thermo-pc\\\\bmg\\\\%s\\\\" % self.current_user
self.proc_var_dict["omega_output_path"] = [omega_out_path, "String"] # C:\\\\tmp
self.proc_var_dict["omega_outputfile"] = [omega_outputfile_name, "String"]
self.momentum_process += """\t\tOmega [Run Protocol]
\t\t\t(ProtocolName = '{protocol_name}',
\t\t\tInputPath = $omega_input_path,
\t\t\tOutputPath = $omega_output_path,
\t\t\tOutputName = $omega_outputfile,
\t\t\tRunOnAbortedIteration = 'No', Duration = '00:01:01',
\t\t\tComments = '', Enabled = 'Yes')
\t\t\t{container} 'Unlidded' in 'Nest' ;\n\n""".format(protocol_name=process_step.protocol(), container=process_step.container(0))
if process_step.device() == "Varioskan":
varioskan_outputfile_name = "<BC>" #% self.current_user C:\\\\tmp
self.proc_var_dict["varioskan_outputfile"] = [varioskan_outputfile_name, "String"]
self.momentum_process += """\t\tVarioskan [Run Protocol]
\t\t\t(ProtoName = '{protocol_name}', MaximumReadTime = '00:00:00',
\t\t\tOutputNameFormat = $varioskan_outputfile,
\t\t\tRunOnAbortedIteration = 'No', Duration = '00:01:01',
\t\t\tComments = '')
\t\t\t{container} 'Unlidded' in 'Nest' ;\n\n""".format(protocol_name=process_step.protocol(), container=process_step.container(0))
elif process_step.device() == "Bravo":
self.momentum_process += """\t\t\tBravo [RunProtocol]
\t\t\t(Protocol = '{protocol_name}', RunOnAbortedIteration = 'No',
\t\t\tDuration = '00:00:20', Comments = 'remove supernatant',
\t\t\tEnabled = 'Yes') ;\n\n""".format(protocol_name=process_step.protocol())
def genCentrifuge(self, process_step):
if not self.tool_container:
self.momentum_tool_containers += """\t\tplate Tool
(WithLidOffset = '-5', MoverLiddingGripOffset = '3',
WithLidHeight = '17', Thickness = '1', SealThickness = '0',
NumberOfWellRows = '8', NumberOfWellColumns = '12',
WellNumberingMethod = 'Rows', BarCodeRegularExpression = '',
BarCodeFile = '', BarCodeAutoExpression = '"NC" + Format(Now, "yyMMddHHmmss") + "_" + Format(WallClock, "fff")',
GripOffset = 'Identity', GripForce = '0', Height = '15',
StackHeight = '13.13', SetSize = '1', Attributes = '') ;\n"""
self.tool_container = True
curr_centr_time_factor_var = "centrifugation_time_factor%i" % self.centrifugation_counter
self.proc_var_dict["centrifugation_time"] = ["00:01:00", "Duration"]
self.proc_var_dict["curr_centr_time"] = ["00:01:00", "Duration"]
self.proc_var_dict[curr_centr_time_factor_var] = [str(process_step.duration()), "Integer"]
self.momentum_process += "\t\tset curr_centr_time = 'centrifugation_time * " + curr_centr_time_factor_var + "' ;\n"
self.momentum_process += """\t\t\tRotanta [Load to Rotor Nests]
\t\t\t\t(RunOnAbortedIteration = 'No', Duration = '00:03:01',
\t\t\t\tComments = '', Enabled = 'Yes')
\t\t\t\tTool 'Unlidded' in 'Tool Nest' ;\n
\t\t\tTool_Hotel [Load]
\t\t\t\t(Comments = '', Enabled = 'Yes')
\t\t\t\tTool ;\n
\t\t\tRotanta [Run]
\t\t\t\t(ProgramRunMode = 'User Defined Run Parameters',
\t\t\t\tSpinDuration = $curr_centr_time, RPM = '{centr_speed}',
\t\t\t\tTemperature = '{centr_temp}', AccelerationLevel = '9',
\t\t\t\tDecelerationLevel = '9', RunOnAbortedIteration = 'No',
\t\t\t\tDuration = '00:01:00', Comments = '', Enabled = 'Yes') ;\n
\t\t\tRotanta [Unload from Rotor Nests]
\t\t\t\t(RunOnAbortedIteration = 'No', Duration = '00:02:01',
\t\t\t\tComments = '', Enabled = 'Yes')
\t\t\t\tTool 'Unlidded' in 'Tool Nest' ;\n
\t\t\tTool_Hotel [Load]
\t\t\t\t(Comments = '', Enabled = 'Yes')
\t\t\t\tTool ;\n\n""".format(centr_speed=process_step.speed(), centr_temp=int(process_step.temperature()))
self.centrifugation_counter += 1
# now filling the rest of the process
def profileHead(self):
self.momentum_profileHead = """// Generated: {curr_date} by LARA code generator written by mark doerr ([email protected])\n\nprofile UniversityGreifswaldProfile1\n{{\n
\t// Runtime settings\n
runtime
(Mode = 'Normal', IsAccelerated = 'Yes', AuditOnSimulate = 'Yes',
LogOnSimulate = 'Yes', HibernateOnSimulate = 'No', EnableFixedStartTime = 'Yes',
SimulationStartTime = '11/07/2013 12:00 AM', AllowNewIterationsOnDeviceError = 'No',
EnableCongestionDetection = 'Yes', CongestionClearQueueTimeThreshold = '00:02:00',
MaxQueueTimeThreshold = '00:05:00', EnableVerboseLogging = 'Yes') ;\n\n""".format(curr_date=str(datetime.datetime.now()))
def profileDevices(self):
"""This is the Greifswald automation system - it needs to be adjusted to the particular system"""
self.momentum_profileDevices = """\t// Devices and settings\n
\tdevices
\t{
BarCodeReaderMS3 BCR
(Mode = 'Normal', Color = '255, 255, 128') ;
Beacon Beacon
(Mode = 'Normal', Color = '128, 0, 255') ;
Bravo Bravo
(ParkProtocol = 'Head_Left.pro', ProtocolPath = 'B:\\\\VWorks Workspace\\\\Protocol Files',
UseAccessProtocol = 'Yes', ParkDuration = '00:00:10',
NestAccessProtocol = 'B:\\\\VWorks Workspace\\\\Protocol Files',
Mode = 'Normal', Color = '192, 192, 192') ;
Hotel BufferNestsLBravo
(Mode = 'Normal', Color = '128, 128, 0') ;
Hotel BufferNestsRBravo
(Mode = 'Normal', Color = '128, 128, 64') ;
Dim4Carousel Carousel
(Speed = '50', Acceleration = '5', Mode = 'Normal',
Color = '154, 205, 50') ;
MultidropCombi Combi
(ValvePortsUI = '6', PrimeWhenIdle = 'Yes', PrimeOnInitialization = 'No',
PrimeVolumeWhenIdle = '10', PrimeIntervalWhenIdle = '5',
CassetteUI = '1', Mode = 'Normal', Color = '0, 128, 255') ;
Biomek CombiHotel
(Project = 'BiomekNXP Span', ParkMethod = 'sim', Mode = 'Simulation',
Color = '255, 105, 180') ;
ContainerDataDriver Container
(SummaryFormat = 'CSV', SummaryFilename = '', SummaryColumns = 'DateTime,Location',
Mode = 'Normal', Color = '0, 128, 128') ;
Cytomat2C4 Cytomat_2
(CO2Deadband = '2', CO2Enable = 'No', CO2HiHiLimit = '55',
CO2HiLimit = '50', CO2LoLimit = '40', CO2LoLoLimit = '35',
HumidityDeadband = '2', HumidityEnable = 'No', HumidityHiHiLimit = '75',
HumidityHiLimit = '70', HumidityLoLimit = '30', HumidityLoLoLimit = '25',
O2Deadband = '2', O2Enable = 'No', O2HiHiLimit = '75',
O2HiLimit = '70', O2LoLimit = '60', O2LoLoLimit = '55',
TemperatureDeadband = '2', TemperatureEnable = 'No',
TemperatureHiHiLimit = '45', TemperatureHiLimit = '39',
TemperatureLoLimit = '35', TemperatureLoLoLimit = '30',
ShakeDuringIncubate = 'No', RPMT1 = '100', RPMT2 = '100',
FAMModeEnabled = 'Yes', SearchMode = 'Allowed Nests Only',
HotelsOccupancyLabel = '<Click to Edit ...>', ContainersParticipationLabel = '<Click to Edit ...>',
Mode = 'Normal', Color = '0, 255, 255') ;
Cytomat2C4 Cytomat1550_1
(CO2Deadband = '2', CO2Enable = 'No', CO2HiHiLimit = '55',
CO2HiLimit = '50', CO2LoLimit = '40', CO2LoLoLimit = '35',
HumidityDeadband = '2', HumidityEnable = 'No', HumidityHiHiLimit = '75',
HumidityHiLimit = '70', HumidityLoLimit = '30', HumidityLoLoLimit = '25',
O2Deadband = '2', O2Enable = 'No', O2HiHiLimit = '75',
O2HiLimit = '70', O2LoLimit = '60', O2LoLoLimit = '55',
TemperatureDeadband = '2', TemperatureEnable = 'No',
TemperatureHiHiLimit = '45', TemperatureHiLimit = '39',
TemperatureLoLimit = '35', TemperatureLoLoLimit = '30',
ShakeDuringIncubate = 'Yes', RPMT1 = '700', RPMT2 = '700',
Mode = 'Normal', Color = '255, 0, 0') ;
Cytomat2C4 Cytomat1550_2
(CO2Deadband = '2', CO2Enable = 'No', CO2HiHiLimit = '55',
CO2HiLimit = '50', CO2LoLimit = '40', CO2LoLoLimit = '35',
HumidityDeadband = '2', HumidityEnable = 'No', HumidityHiHiLimit = '75',
HumidityHiLimit = '70', HumidityLoLimit = '30', HumidityLoLoLimit = '25',
O2Deadband = '2', O2Enable = 'No', O2HiHiLimit = '75',
O2HiLimit = '70', O2LoLimit = '60', O2LoLoLimit = '55',
TemperatureDeadband = '2', TemperatureEnable = 'No',
TemperatureHiHiLimit = '45', TemperatureHiLimit = '39',
TemperatureLoLimit = '35', TemperatureLoLoLimit = '30',
ShakeDuringIncubate = 'Yes', RPMT1 = '720', RPMT2 = '720',
Mode = 'Normal', Color = '255, 128, 0') ;
Cytomat2C4 Cytomat470
(CO2Deadband = '2', CO2Enable = 'No', CO2HiHiLimit = '55',
CO2HiLimit = '50', CO2LoLimit = '40', CO2LoLoLimit = '35',
HumidityDeadband = '2', HumidityEnable = 'No', HumidityHiHiLimit = '75',
HumidityHiLimit = '70', HumidityLoLimit = '30', HumidityLoLoLimit = '25',
O2Deadband = '2', O2Enable = 'No', O2HiHiLimit = '75',
O2HiLimit = '70', O2LoLimit = '60', O2LoLoLimit = '55',
TemperatureDeadband = '2', TemperatureEnable = 'No',
TemperatureHiHiLimit = '45', TemperatureHiLimit = '39',
TemperatureLoLimit = '35', TemperatureLoLoLimit = '30',
ShakeDuringIncubate = 'Yes', RPMT1 = '730', RPMT2 = '730',
Mode = 'Normal', Color = '255, 128, 64') ;
GenericMover F5T
(ParkLocation = 'STDloc:safe', ParkMoverAtEndOfRun = 'Yes',
MotionSettings = 'Velocity: 20%, Acceleration: 15%, Jerk: 10%',
Mode = 'Normal', Color = '221, 160, 221') ;
FileManager File_Mgr
(Mode = 'Normal', Color = '95, 158, 160') ;
Wtio GPSYSIO_1
(Mode = 'Normal', Color = '255, 218, 185') ;
Wtio GPSYSIO_2
(Mode = 'Normal', Color = '255, 160, 122') ;
Hotel Hotel_1
(Mode = 'Normal', Color = '135, 206, 235') ;
InputMonitoring Inout_Monitor
(Mode = 'Normal', Color = '50, 205, 50') ;
DataMiner Miner
(Mode = 'Normal', Color = '186, 85, 211') ;
Wtio MultiWay
(Mode = 'Normal', Color = '240, 128, 128') ;
Omega Omega
(ProtocolPathListUI = '<Click Button to Edit>', Mode = 'Simulation',
Color = '70, 130, 180') ;
MomentumOperator Operator
(Mode = 'Normal', Color = '154, 205, 50') ;
FreeNest Recovery
(Mode = 'Normal', Color = '255, 105, 180') ;
Regrip Regrip
(Mode = 'Normal', Color = '240, 230, 140') ;
Centrifuge Rotanta
(Mode = 'Normal', Color = '210, 180, 140') ;
Hotel Tip_Hotel
(Mode = 'Normal', Color = '143, 188, 139') ;
SmartStorage Tip_Storage
(FAMModeEnabled = 'Yes', SearchMode = 'Entire Device',
HotelsOccupancyLabel = '<Click to Edit ...>', ContainersParticipationLabel = '<Click to Edit ...>',
Mode = 'Normal', Color = '100, 149, 237') ;
Hotel Tool_Hotel
(Mode = 'Normal', Color = '221, 160, 221') ;
Varioskan Varioskan
(Mode = 'Normal', Color = '95, 158, 160') ;
GenericMover virtualMover
(ParkLocation = 'STDloc:safe', ParkMoverAtEndOfRun = 'Yes',
MotionSettings = 'Velocity: 20%, Acceleration: 20%, Jerk: 100%',
Mode = 'Simulation', Color = '255, 218, 185') ;
Hotel virtualStorage
(Mode = 'Normal', Color = '255, 160, 122') ;
Waste virtualWaste
(Mode = 'Simulation', Color = '135, 206, 235') ;
Waste Waste
(Mode = 'Normal', Color = '50, 205, 50') ;
\n\t}\n\n"""
def profileDevicePools(self):
"""There are no device pools in the Greifswald automation system """
self.momentum_profileDevices = """\t// Device Pools\n\tpools\n\t{\n\t}\n\n"""
def profileVariables(self):
logging.debug("profile variables")
self.momentum_profileVariables = """\t// Profile variables\n\tvariables\n\t{"""
# the lock variables - it is important that they are on profile level
for lock_name in self.lockset_global :
self.momentum_profileVariables += \
"""\tBoolean {lock_name}
\t\t\t(DefaultValue = 'No', PromptForValue = 'No', Persist = 'No',
\t\t\tComments = '') ;\n\t""".format(lock_name=lock_name)
# now some useful variables
self.momentum_profileVariables += """\t\t\n\t}\n\n"""
def processHead(self):
self.momentum_processHead = """\t// ****************** Version 1 ******************\n
\tprocess {process_name}\n\t{{\n""".format(process_name="P_" + self.experiment.name())
return()
def processContainers(self):
self.momentum_processContainers = "\t\t// Containers\n\t\tcontainers\n\t\t{\n"
#self.container_db.data(self.container_db.index(i,0)).toString()
for i in range(self.container_db.rowCount()):
cont = lam.StandardContainer(container_name=str(self.container_db.data(self.container_db.index(i,0)).toString()),
container_type=str(self.container_db.data(self.container_db.index(i,3)).toString()),
lidding_state=str(self.container_db.data(self.container_db.index(i,4)).toString()) )
logging.debug(" processing %s" % cont.name())
if cont.contClass() == "lid":
self.momentum_processContainers += """\t\t\t{cont_class} {cont_name}
\t\t\t\t( BarCodeRegularExpression = '', BarCodeFile = '',
\t\t\t\tBarCodeAutoExpression = '"AC" + Format(Now, "yyMMdd_HHmmss")',
\t\t\t\tGripOffset = '{{[0, 0, -6], ([0, 0, 0], 1)}}', GripForce = '32',
\t\t\t\tHeight ='{cont_height}', StackHeight = '{cont_stackHeight}',
\t\t\t\tAttributes = '');\n\n""".format( cont_class=cont.contClass(),
cont_name=cont.name(), cont_height=cont.height(),
cont_stackHeight = cont.stackHeight() )
logging.debug("mcg - containers: processing %s done" % cont.name() )
else:
if cont.liddingState() == "L":
cont_name_lid = " uses_lid %sLid" % cont.name()
else:
cont_name_lid = ""
self.momentum_processContainers += """\t\t\t {cont_class} {cont_name} {cont_name_lid}
\t\t\t\t( NumberOfWellRows = '{cont_rowsNum}', NumberOfWellColumns ='{cont_colsNum}',
\t\t\t\tWellNumberingMethod = 'Rows', BarCodeRegularExpression = '', BarCodeFile = '',
\t\t\t\tBarCodeAutoExpression = '{cont_stdBarcodeTemplate}',
\t\t\t\tGripOffset = 'Identity', GripForce = '0', MoverLiddingGripOffset = '{cont_movLidGripOffset}',
\t\t\t\tHeight ='{cont_height}', StackHeight = '{cont_stackHeight}', WithLidOffset = '-5', WithLidHeight = '17',
\t\t\t\tThickness = '1', SealThickness = '0', SetSize = '1',
\t\t\t\tAttributes = '' );\n\n""".format(cont_class=cont.contClass(), cont_name=cont.name(),
cont_name_lid=cont_name_lid, cont_rowsNum = cont.rowsNum(),
cont_colsNum = cont.colsNum(), cont_stdBarcodeTemplate=cont.stdBarcodeTemplate(),
cont_movLidGripOffset=3 + cont.height() - cont.stdContainerHeight('Greiner96NwFb'),
cont_height=cont.height(), cont_stackHeight=cont.stackHeight() )
cont_name_lid = ""
self.momentum_processContainers += self.momentum_loop_containers
self.momentum_processContainers += self.momentum_tool_containers
self.momentum_processContainers += "\t\t}\n\n"
def process_variables(self):
self.momentum_process_variables = """\t\t// Process variables\n\t\tvariables\n\t\t{\n"""
for var_name, var_parm in self.proc_var_dict.iteritems() :
self.momentum_process_variables += \
"""\t\t\t{var_type} {var_name}
\t\t\t\t(DefaultValue = '{def_value}', PromptForValue = 'No', Persist = 'No',
\t\t\t\tComments = '') ;\n""".format(var_type=var_parm[1], var_name=var_name, def_value=var_parm[0])
# the lock variables - special loop lock variables
for lock_name in self.lockset_local :
self.momentum_process_variables += \
"""\tBoolean {lock_name}
\t\t\t(DefaultValue = 'No', PromptForValue = 'No', Persist = 'No',
\t\t\tComments = '') ;\n\t""".format(lock_name=lock_name)
self.momentum_process_variables += """\t\t\tString uid
\t\t\t\t(DefaultValue = '""', PromptForValue = 'No', Comments = '') ;
\t\t\tString log_entry\n\t\t\t\t(DefaultValue = '', PromptForValue = 'No', Comments = '') ;
\t\t\tString log_filename\n\t\t\t\t(DefaultValue = '', PromptForValue = 'No', Comments = '') ;
\t\t\tString status_filename\n\t\t\t\t(DefaultValue = 'D:\\\\robot_data\\\\momentum_status\\\\current_momentum_status.csv', PromptForValue = 'No', Comments = '') ;
\t\t\tString summary_file\n\t\t\t\t(DefaultValue = '', PromptForValue = 'No', Comments = '') ;
\t\t\tString curr_user\n\t\t\t\t(DefaultValue = '{current_user}', PromptForValue = 'No', Comments = '') ;
\n\t\t}}\n\n""".format(current_user=self.current_user)
# experiment generation
def experiment_head(self):
self.momentum_experiment_head = """// Generated: {curr_date}\n\nexperiment E_{momentum_process}\n{{
\t// Experiment settings\n\tsettings
(Process = 'P_{momentum_process}',
Iterations = '1', IterationsLockedForWorkUnits = 'No',
MinimumDelayBetweenIterations = '0', Priority = '10', ValidForUse = 'Yes',
EstimatedDuration = '05:55:14') ;\n\n""".format(curr_date=str(datetime.datetime.now()), momentum_process=self.experiment.name())
return()
def experiment_start_end(self):
self.momentum_experiment_start_end = """\t// Start/End Instrument\n\n\tstartend\n\t{\n"""
logging.debug("experiment start end ------> ")
for plate in self.container_location_db.iterkeys() :
curr_device = self.container_location_db[plate][0]
try:
curr_device_name = curr_device.name()
print(curr_device)
print(curr_device_name)
self.momentum_experiment_start_end += """\t{plate}\n\t\t(start='{device}') ;\n""".format(plate=plate, device=curr_device_name)
except AttributeError:
pass
self.momentum_experiment_start_end += "\n\t}\n\n"
def experiment_nest_restrictions(self):
self.momentum_nest_restrictions = """\t// Nest Restrictions\n\n\tNests\n\t{"""+ "\n\t}\n"
#~ + "MasterPlate1 ('Carousel.Column1_Hotel:Nest 1'); \n" \
#~ + "ExpressionPlate1Dw ('Carousel.Column2_Hotel:Nest 1'); \n" \
#~ + "InductorTrough1 ('Cytomat_2.Stack 1:Nest 1'); \n" \
#~
def experimentTail(self):
self.momentum_experiment_tail = "}\n"
return()
def writeProcess(self):
try:
with open('P_'+ self.experiment.name() + ".cxx", 'w') as file_output:
file_output.write(self.momentum_profileHead)
file_output.write(self.momentum_profileDevices)
file_output.write(self.momentum_profileVariables)
file_output.write(self.momentum_processHead)
file_output.write(self.momentum_processContainers)
file_output.write(self.momentum_process_variables)
file_output.write(self.momentum_process)
file_output.close()
logging.debug("mcg: outputfiel P_%s.cxx written" % self.experiment.name())
except IOError:
logging.Error("Cannot write momentum code file %s !!!" % experiment.name())
def writeExperiment(self):
try:
with open('E_'+ self.experiment.name() + ".cxx", 'w') as file_output:
#logging.debug(self.momentum_experiment_head)
file_output.write(self.momentum_experiment_head)
#logging.debug(self.momentum_experiment_head)
file_output.write(self.momentum_experiment_start_end)
file_output.write(self.momentum_nest_restrictions)
file_output.write(self.momentum_experiment_tail)
file_output.close()
logging.debug("mcg: Experiment outputfiel E_%s.cxx written" % self.experiment.name())
except IOError:
logging.Error("Cannot write momentum code file %s !!!" % experiment.name())
|
gpl-2.0
|
BackupGGCode/python-for-android
|
python-modules/twisted/twisted/test/test_twistd.py
|
49
|
41163
|
# Copyright (c) 2007-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.application.app} and L{twisted.scripts.twistd}.
"""
import signal, inspect, errno
import os, sys, cPickle, StringIO
try:
import pwd, grp
except ImportError:
pwd = grp = None
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial import unittest
from twisted.application import service, app
from twisted.scripts import twistd
from twisted.python import log
from twisted.python.usage import UsageError
from twisted.python.log import ILogObserver
from twisted.python.versions import Version
from twisted.python.components import Componentized
from twisted.internet.defer import Deferred
from twisted.python.fakepwd import UserDatabase
try:
from twisted.python import syslog
except ImportError:
syslog = None
try:
from twisted.scripts import _twistd_unix
except ImportError:
_twistd_unix = None
else:
from twisted.scripts._twistd_unix import UnixApplicationRunner
from twisted.scripts._twistd_unix import UnixAppLogger
try:
import profile
except ImportError:
profile = None
try:
import hotshot
import hotshot.stats
except (ImportError, SystemExit):
# For some reasons, hotshot.stats seems to raise SystemExit on some
# distributions, probably when considered non-free. See the import of
# this module in twisted.application.app for more details.
hotshot = None
try:
import pstats
import cProfile
except ImportError:
cProfile = None
if getattr(os, 'setuid', None) is None:
setuidSkip = "Platform does not support --uid/--gid twistd options."
else:
setuidSkip = None
def patchUserDatabase(patch, user, uid, group, gid):
"""
Patch L{pwd.getpwnam} so that it behaves as though only one user exists
and patch L{grp.getgrnam} so that it behaves as though only one group
exists.
@param patch: A function like L{TestCase.patch} which will be used to
install the fake implementations.
@type user: C{str}
@param user: The name of the single user which will exist.
@type uid: C{int}
@param uid: The UID of the single user which will exist.
@type group: C{str}
@param group: The name of the single user which will exist.
@type gid: C{int}
@param gid: The GID of the single group which will exist.
"""
# Try not to be an unverified fake, but try not to depend on quirks of
# the system either (eg, run as a process with a uid and gid which
# equal each other, and so doesn't reliably test that uid is used where
# uid should be used and gid is used where gid should be used). -exarkun
pwent = pwd.getpwuid(os.getuid())
grent = grp.getgrgid(os.getgid())
database = UserDatabase()
database.addUser(
user, pwent.pw_passwd, uid, pwent.pw_gid,
pwent.pw_gecos, pwent.pw_dir, pwent.pw_shell)
def getgrnam(name):
result = list(grent)
result[result.index(grent.gr_name)] = group
result[result.index(grent.gr_gid)] = gid
result = tuple(result)
return {group: result}[name]
patch(pwd, "getpwnam", database.getpwnam)
patch(grp, "getgrnam", getgrnam)
class MockServiceMaker(object):
"""
A non-implementation of L{twisted.application.service.IServiceMaker}.
"""
tapname = 'ueoa'
def makeService(self, options):
"""
Take a L{usage.Options} instance and return a
L{service.IService} provider.
"""
self.options = options
self.service = service.Service()
return self.service
class CrippledAppLogger(app.AppLogger):
"""
@see: CrippledApplicationRunner.
"""
def start(self, application):
pass
class CrippledApplicationRunner(twistd._SomeApplicationRunner):
"""
An application runner that cripples the platform-specific runner and
nasty side-effect-having code so that we can use it without actually
running any environment-affecting code.
"""
loggerFactory = CrippledAppLogger
def preApplication(self):
pass
def postApplication(self):
pass
class ServerOptionsTest(unittest.TestCase):
"""
Non-platform-specific tests for the pltaform-specific ServerOptions class.
"""
def test_postOptionsSubCommandCausesNoSave(self):
"""
postOptions should set no_save to True when a subcommand is used.
"""
config = twistd.ServerOptions()
config.subCommand = 'ueoa'
config.postOptions()
self.assertEquals(config['no_save'], True)
def test_postOptionsNoSubCommandSavesAsUsual(self):
"""
If no sub command is used, postOptions should not touch no_save.
"""
config = twistd.ServerOptions()
config.postOptions()
self.assertEquals(config['no_save'], False)
def test_reportProfileDeprecation(self):
"""
Check that the --report-profile option prints a C{DeprecationWarning}.
"""
config = twistd.ServerOptions()
self.assertWarns(
DeprecationWarning, "--report-profile option is deprecated and "
"a no-op since Twisted 8.0.", app.__file__,
config.parseOptions, ["--report-profile", "foo"])
def test_listAllProfilers(self):
"""
All the profilers that can be used in L{app.AppProfiler} are listed in
the help output.
"""
config = twistd.ServerOptions()
helpOutput = str(config)
for profiler in app.AppProfiler.profilers:
self.assertIn(profiler, helpOutput)
def test_defaultUmask(self):
"""
The default value for the C{umask} option is C{None}.
"""
config = twistd.ServerOptions()
self.assertEqual(config['umask'], None)
def test_umask(self):
"""
The value given for the C{umask} option is parsed as an octal integer
literal.
"""
config = twistd.ServerOptions()
config.parseOptions(['--umask', '123'])
self.assertEqual(config['umask'], 83)
config.parseOptions(['--umask', '0123'])
self.assertEqual(config['umask'], 83)
def test_invalidUmask(self):
"""
If a value is given for the C{umask} option which cannot be parsed as
an integer, L{UsageError} is raised by L{ServerOptions.parseOptions}.
"""
config = twistd.ServerOptions()
self.assertRaises(UsageError, config.parseOptions, ['--umask', 'abcdef'])
if _twistd_unix is None:
msg = "twistd unix not available"
test_defaultUmask.skip = test_umask.skip = test_invalidUmask.skip = msg
class TapFileTest(unittest.TestCase):
"""
Test twistd-related functionality that requires a tap file on disk.
"""
def setUp(self):
"""
Create a trivial Application and put it in a tap file on disk.
"""
self.tapfile = self.mktemp()
f = file(self.tapfile, 'wb')
cPickle.dump(service.Application("Hi!"), f)
f.close()
def test_createOrGetApplicationWithTapFile(self):
"""
Ensure that the createOrGetApplication call that 'twistd -f foo.tap'
makes will load the Application out of foo.tap.
"""
config = twistd.ServerOptions()
config.parseOptions(['-f', self.tapfile])
application = CrippledApplicationRunner(config).createOrGetApplication()
self.assertEquals(service.IService(application).name, 'Hi!')
class TestLoggerFactory(object):
"""
A logger factory for L{TestApplicationRunner}.
"""
def __init__(self, runner):
self.runner = runner
def start(self, application):
"""
Save the logging start on the C{runner} instance.
"""
self.runner.order.append("log")
self.runner.hadApplicationLogObserver = hasattr(self.runner,
'application')
def stop(self):
"""
Don't log anything.
"""
class TestApplicationRunner(app.ApplicationRunner):
"""
An ApplicationRunner which tracks the environment in which its methods are
called.
"""
def __init__(self, options):
app.ApplicationRunner.__init__(self, options)
self.order = []
self.logger = TestLoggerFactory(self)
def preApplication(self):
self.order.append("pre")
self.hadApplicationPreApplication = hasattr(self, 'application')
def postApplication(self):
self.order.append("post")
self.hadApplicationPostApplication = hasattr(self, 'application')
class ApplicationRunnerTest(unittest.TestCase):
"""
Non-platform-specific tests for the platform-specific ApplicationRunner.
"""
def setUp(self):
config = twistd.ServerOptions()
self.serviceMaker = MockServiceMaker()
# Set up a config object like it's been parsed with a subcommand
config.loadedPlugins = {'test_command': self.serviceMaker}
config.subOptions = object()
config.subCommand = 'test_command'
self.config = config
def test_applicationRunnerGetsCorrectApplication(self):
"""
Ensure that a twistd plugin gets used in appropriate ways: it
is passed its Options instance, and the service it returns is
added to the application.
"""
arunner = CrippledApplicationRunner(self.config)
arunner.run()
self.assertIdentical(
self.serviceMaker.options, self.config.subOptions,
"ServiceMaker.makeService needs to be passed the correct "
"sub Command object.")
self.assertIdentical(
self.serviceMaker.service,
service.IService(arunner.application).services[0],
"ServiceMaker.makeService's result needs to be set as a child "
"of the Application.")
def test_preAndPostApplication(self):
"""
Test thet preApplication and postApplication methods are
called by ApplicationRunner.run() when appropriate.
"""
s = TestApplicationRunner(self.config)
s.run()
self.assertFalse(s.hadApplicationPreApplication)
self.assertTrue(s.hadApplicationPostApplication)
self.assertTrue(s.hadApplicationLogObserver)
self.assertEquals(s.order, ["pre", "log", "post"])
def _applicationStartsWithConfiguredID(self, argv, uid, gid):
"""
Assert that given a particular command line, an application is started
as a particular UID/GID.
@param argv: A list of strings giving the options to parse.
@param uid: An integer giving the expected UID.
@param gid: An integer giving the expected GID.
"""
self.config.parseOptions(argv)
events = []
class FakeUnixApplicationRunner(twistd._SomeApplicationRunner):
def setupEnvironment(self, chroot, rundir, nodaemon, umask,
pidfile):
events.append('environment')
def shedPrivileges(self, euid, uid, gid):
events.append(('privileges', euid, uid, gid))
def startReactor(self, reactor, oldstdout, oldstderr):
events.append('reactor')
def removePID(self, pidfile):
pass
class FakeService(object):
implements(service.IService, service.IProcess)
processName = None
uid = None
gid = None
def setName(self, name):
pass
def setServiceParent(self, parent):
pass
def disownServiceParent(self):
pass
def privilegedStartService(self):
events.append('privilegedStartService')
def startService(self):
events.append('startService')
def stopService(self):
pass
application = FakeService()
verifyObject(service.IService, application)
verifyObject(service.IProcess, application)
runner = FakeUnixApplicationRunner(self.config)
runner.preApplication()
runner.application = application
runner.postApplication()
self.assertEqual(
events,
['environment', 'privilegedStartService',
('privileges', False, uid, gid), 'startService', 'reactor'])
def test_applicationStartsWithConfiguredNumericIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as numeric strings by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
uid = 1234
gid = 4321
self._applicationStartsWithConfiguredID(
["--uid", str(uid), "--gid", str(gid)], uid, gid)
test_applicationStartsWithConfiguredNumericIDs.skip = setuidSkip
def test_applicationStartsWithConfiguredNameIDs(self):
"""
L{postApplication} should change the UID and GID to the values
specified as user and group names by the configuration after running
L{service.IService.privilegedStartService} and before running
L{service.IService.startService}.
"""
user = "foo"
uid = 1234
group = "bar"
gid = 4321
patchUserDatabase(self.patch, user, uid, group, gid)
self._applicationStartsWithConfiguredID(
["--uid", user, "--gid", group], uid, gid)
test_applicationStartsWithConfiguredNameIDs.skip = setuidSkip
def test_startReactorRunsTheReactor(self):
"""
L{startReactor} calls L{reactor.run}.
"""
reactor = DummyReactor()
runner = app.ApplicationRunner({
"profile": False,
"profiler": "profile",
"debug": False})
runner.startReactor(reactor, None, None)
self.assertTrue(
reactor.called, "startReactor did not call reactor.run()")
class UnixApplicationRunnerSetupEnvironmentTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.setupEnvironment}.
@ivar root: The root of the filesystem, or C{unset} if none has been
specified with a call to L{os.chroot} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.chroot ).
@ivar cwd: The current working directory of the process, or C{unset} if
none has been specified with a call to L{os.chdir} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.chdir).
@ivar mask: The current file creation mask of the process, or C{unset} if
none has been specified with a call to L{os.umask} (patched for this
TestCase with L{UnixApplicationRunnerSetupEnvironmentTests.umask).
@ivar daemon: A boolean indicating whether daemonization has been performed
by a call to L{_twistd_unix.daemonize} (patched for this TestCase with
L{UnixApplicationRunnerSetupEnvironmentTests.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
unset = object()
def setUp(self):
self.root = self.unset
self.cwd = self.unset
self.mask = self.unset
self.daemon = False
self.pid = os.getpid()
self.patch(os, 'chroot', lambda path: setattr(self, 'root', path))
self.patch(os, 'chdir', lambda path: setattr(self, 'cwd', path))
self.patch(os, 'umask', lambda mask: setattr(self, 'mask', mask))
self.patch(_twistd_unix, "daemonize", self.daemonize)
self.runner = UnixApplicationRunner({})
def daemonize(self):
"""
Indicate that daemonization has happened and change the PID so that the
value written to the pidfile can be tested in the daemonization case.
"""
self.daemon = True
self.patch(os, 'getpid', lambda: self.pid + 1)
def test_chroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the root of the
filesystem if passed a non-C{None} value for the C{chroot} parameter.
"""
self.runner.setupEnvironment("/foo/bar", ".", True, None, None)
self.assertEqual(self.root, "/foo/bar")
def test_noChroot(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not change the root of
the filesystem if passed C{None} for the C{chroot} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIdentical(self.root, self.unset)
def test_changeWorkingDirectory(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the working directory
of the process to the path given for the C{rundir} parameter.
"""
self.runner.setupEnvironment(None, "/foo/bar", True, None, None)
self.assertEqual(self.cwd, "/foo/bar")
def test_daemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} daemonizes the process if
C{False} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertTrue(self.daemon)
def test_noDaemonize(self):
"""
L{UnixApplicationRunner.setupEnvironment} does not daemonize the
process if C{True} is passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertFalse(self.daemon)
def test_nonDaemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the process's PID to
the file specified by the C{pidfile} parameter.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", True, None, pidfile)
fObj = file(pidfile)
pid = int(fObj.read())
fObj.close()
self.assertEqual(pid, self.pid)
def test_daemonPIDFile(self):
"""
L{UnixApplicationRunner.setupEnvironment} writes the daemonized
process's PID to the file specified by the C{pidfile} parameter if
C{nodaemon} is C{False}.
"""
pidfile = self.mktemp()
self.runner.setupEnvironment(None, ".", False, None, pidfile)
fObj = file(pidfile)
pid = int(fObj.read())
fObj.close()
self.assertEqual(pid, self.pid + 1)
def test_umask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
the value specified by the C{umask} parameter.
"""
self.runner.setupEnvironment(None, ".", False, 123, None)
self.assertEqual(self.mask, 123)
def test_noDaemonizeNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} doesn't change the process
umask if C{None} is passed for the C{umask} parameter and C{True} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", True, None, None)
self.assertIdentical(self.mask, self.unset)
def test_daemonizedNoUmask(self):
"""
L{UnixApplicationRunner.setupEnvironment} changes the process umask to
C{0077} if C{None} is passed for the C{umask} parameter and C{False} is
passed for the C{nodaemon} parameter.
"""
self.runner.setupEnvironment(None, ".", False, None, None)
self.assertEqual(self.mask, 0077)
class UnixApplicationRunnerStartApplicationTests(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.startApplication}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_setupEnvironment(self):
"""
L{UnixApplicationRunner.startApplication} calls
L{UnixApplicationRunner.setupEnvironment} with the chroot, rundir,
nodaemon, umask, and pidfile parameters from the configuration it is
constructed with.
"""
options = twistd.ServerOptions()
options.parseOptions([
'--nodaemon',
'--umask', '0070',
'--chroot', '/foo/chroot',
'--rundir', '/foo/rundir',
'--pidfile', '/foo/pidfile'])
application = service.Application("test_setupEnvironment")
self.runner = UnixApplicationRunner(options)
args = []
def fakeSetupEnvironment(self, chroot, rundir, nodaemon, umask, pidfile):
args.extend((chroot, rundir, nodaemon, umask, pidfile))
# Sanity check
self.assertEqual(
inspect.getargspec(self.runner.setupEnvironment),
inspect.getargspec(fakeSetupEnvironment))
self.patch(UnixApplicationRunner, 'setupEnvironment', fakeSetupEnvironment)
self.patch(UnixApplicationRunner, 'shedPrivileges', lambda *a, **kw: None)
self.patch(app, 'startApplication', lambda *a, **kw: None)
self.runner.startApplication(application)
self.assertEqual(
args,
['/foo/chroot', '/foo/rundir', True, 56, '/foo/pidfile'])
class UnixApplicationRunnerRemovePID(unittest.TestCase):
"""
Tests for L{UnixApplicationRunner.removePID}.
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def test_removePID(self):
"""
L{UnixApplicationRunner.removePID} deletes the file the name of
which is passed to it.
"""
runner = UnixApplicationRunner({})
path = self.mktemp()
os.makedirs(path)
pidfile = os.path.join(path, "foo.pid")
file(pidfile, "w").close()
runner.removePID(pidfile)
self.assertFalse(os.path.exists(pidfile))
def test_removePIDErrors(self):
"""
Calling L{UnixApplicationRunner.removePID} with a non-existent filename logs
an OSError.
"""
runner = UnixApplicationRunner({})
runner.removePID("fakepid")
errors = self.flushLoggedErrors(OSError)
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].value.errno, errno.ENOENT)
class DummyReactor(object):
"""
A dummy reactor, only providing a C{run} method and checking that it
has been called.
@ivar called: if C{run} has been called or not.
@type called: C{bool}
"""
called = False
def run(self):
"""
A fake run method, checking that it's been called one and only time.
"""
if self.called:
raise RuntimeError("Already called")
self.called = True
class AppProfilingTestCase(unittest.TestCase):
"""
Tests for L{app.AppProfiler}.
"""
def test_profile(self):
"""
L{app.ProfileRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("DummyReactor.run", data)
self.assertIn("function calls", data)
if profile is None:
test_profile.skip = "profile module not available"
def _testStats(self, statsClass, profile):
out = StringIO.StringIO()
# Patch before creating the pstats, because pstats binds self.stream to
# sys.stdout early in 2.5 and newer.
stdout = self.patch(sys, 'stdout', out)
# If pstats.Stats can load the data and then reformat it, then the
# right thing probably happened.
stats = statsClass(profile)
stats.print_stats()
stdout.restore()
data = out.getvalue()
self.assertIn("function calls", data)
self.assertIn("(run)", data)
def test_profileSaveStats(self):
"""
With the C{savestats} option specified, L{app.ProfileRunner.run}
should save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if profile is None:
test_profileSaveStats.skip = "profile module not available"
def test_withoutProfile(self):
"""
When the C{profile} module is not present, L{app.ProfilerRunner.run}
should raise a C{SystemExit} exception.
"""
savedModules = sys.modules.copy()
config = twistd.ServerOptions()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
sys.modules["profile"] = None
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_profilePrintStatsError(self):
"""
When an error happens during the print of the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousProfile(profile.Profile):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(profile, "Profile", ErroneousProfile)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "profile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIdentical(sys.stdout, oldStdout)
if profile is None:
test_profilePrintStatsError.skip = "profile module not available"
def test_hotshot(self):
"""
L{app.HotshotRunner.run} should call the C{run} method of the reactor
and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("run", data)
self.assertIn("function calls", data)
if hotshot is None:
test_hotshot.skip = "hotshot module not available"
def test_hotshotSaveStats(self):
"""
With the C{savestats} option specified, L{app.HotshotRunner.run} should
save the raw stats object instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(hotshot.stats.load, config['profile'])
if hotshot is None:
test_hotshotSaveStats.skip = "hotshot module not available"
def test_withoutHotshot(self):
"""
When the C{hotshot} module is not present, L{app.HotshotRunner.run}
should raise a C{SystemExit} exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["hotshot"] = None
config = twistd.ServerOptions()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_hotshotPrintStatsError(self):
"""
When an error happens while printing the stats, C{sys.stdout}
should be restored to its initial value.
"""
class ErroneousStats(pstats.Stats):
def print_stats(self):
raise RuntimeError("Boom")
self.patch(pstats, "Stats", ErroneousStats)
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "hotshot"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
oldStdout = sys.stdout
self.assertRaises(RuntimeError, profiler.run, reactor)
self.assertIdentical(sys.stdout, oldStdout)
if hotshot is None:
test_hotshotPrintStatsError.skip = "hotshot module not available"
def test_cProfile(self):
"""
L{app.CProfileRunner.run} should call the C{run} method of the
reactor and save profile data in the specified file.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
data = file(config["profile"]).read()
self.assertIn("run", data)
self.assertIn("function calls", data)
if cProfile is None:
test_cProfile.skip = "cProfile module not available"
def test_cProfileSaveStats(self):
"""
With the C{savestats} option specified,
L{app.CProfileRunner.run} should save the raw stats object
instead of a summary output.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "cProfile"
config["savestats"] = True
profiler = app.AppProfiler(config)
reactor = DummyReactor()
profiler.run(reactor)
self.assertTrue(reactor.called)
self._testStats(pstats.Stats, config['profile'])
if cProfile is None:
test_cProfileSaveStats.skip = "cProfile module not available"
def test_withoutCProfile(self):
"""
When the C{cProfile} module is not present,
L{app.CProfileRunner.run} should raise a C{SystemExit}
exception and log the C{ImportError}.
"""
savedModules = sys.modules.copy()
sys.modules["cProfile"] = None
config = twistd.ServerOptions()
config["profiler"] = "cProfile"
profiler = app.AppProfiler(config)
try:
self.assertRaises(SystemExit, profiler.run, None)
finally:
sys.modules.clear()
sys.modules.update(savedModules)
def test_unknownProfiler(self):
"""
Check that L{app.AppProfiler} raises L{SystemExit} when given an
unknown profiler name.
"""
config = twistd.ServerOptions()
config["profile"] = self.mktemp()
config["profiler"] = "foobar"
error = self.assertRaises(SystemExit, app.AppProfiler, config)
self.assertEquals(str(error), "Unsupported profiler name: foobar")
def test_defaultProfiler(self):
"""
L{app.Profiler} defaults to the hotshot profiler if not specified.
"""
profiler = app.AppProfiler({})
self.assertEquals(profiler.profiler, "hotshot")
def test_profilerNameCaseInsentive(self):
"""
The case of the profiler name passed to L{app.AppProfiler} is not
relevant.
"""
profiler = app.AppProfiler({"profiler": "HotShot"})
self.assertEquals(profiler.profiler, "hotshot")
def _patchFileLogObserver(patch):
"""
Patch L{log.FileLogObserver} to record every call and keep a reference to
the passed log file for tests.
@param patch: a callback for patching (usually L{unittest.TestCase.patch}).
@return: the list that keeps track of the log files.
@rtype: C{list}
"""
logFiles = []
oldFileLobObserver = log.FileLogObserver
def FileLogObserver(logFile):
logFiles.append(logFile)
return oldFileLobObserver(logFile)
patch(log, 'FileLogObserver', FileLogObserver)
return logFiles
class AppLoggerTestCase(unittest.TestCase):
"""
Tests for L{app.AppLogger}.
@ivar observers: list of observers installed during the tests.
@type observers: C{list}
"""
def setUp(self):
"""
Override L{log.addObserver} so that we can trace the observers
installed in C{self.observers}.
"""
self.observers = []
def startLoggingWithObserver(observer):
self.observers.append(observer)
log.addObserver(observer)
self.patch(log, 'startLoggingWithObserver', startLoggingWithObserver)
def tearDown(self):
"""
Remove all installed observers.
"""
for observer in self.observers:
log.removeObserver(observer)
def _checkObserver(self, logs):
"""
Ensure that initial C{twistd} logs are written to the given list.
@type logs: C{list}
@param logs: The list whose C{append} method was specified as the
initial log observer.
"""
self.assertEquals(self.observers, [logs.append])
self.assertIn("starting up", logs[0]["message"][0])
self.assertIn("reactor class", logs[1]["message"][0])
def test_start(self):
"""
L{app.AppLogger.start} calls L{log.addObserver}, and then writes some
messages about twistd and the reactor.
"""
logger = app.AppLogger({})
observer = []
logger._getLogObserver = lambda: observer.append
logger.start(Componentized())
self._checkObserver(observer)
def test_startUsesApplicationLogObserver(self):
"""
When the L{ILogObserver} component is available on the application,
that object will be used as the log observer instead of constructing a
new one.
"""
application = Componentized()
logs = []
application.setComponent(ILogObserver, logs.append)
logger = app.AppLogger({})
logger.start(application)
self._checkObserver(logs)
def test_getLogObserverStdout(self):
"""
When logfile is empty or set to C{-}, L{app.AppLogger._getLogObserver}
returns a log observer pointing at C{sys.stdout}.
"""
logger = app.AppLogger({"logfile": "-"})
logFiles = _patchFileLogObserver(self.patch)
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertIdentical(logFiles[0], sys.stdout)
logger = app.AppLogger({"logfile": ""})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 2)
self.assertIdentical(logFiles[1], sys.stdout)
def test_getLogObserverFile(self):
"""
When passing the C{logfile} option, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path.
"""
logFiles = _patchFileLogObserver(self.patch)
filename = self.mktemp()
logger = app.AppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath(filename))
def test_stop(self):
"""
L{app.AppLogger.stop} removes the observer created in C{start}, and
reinitialize its C{_observer} so that if C{stop} is called several
times it doesn't break.
"""
removed = []
observer = object()
def remove(observer):
removed.append(observer)
self.patch(log, 'removeObserver', remove)
logger = app.AppLogger({})
logger._observer = observer
logger.stop()
self.assertEquals(removed, [observer])
logger.stop()
self.assertEquals(removed, [observer])
self.assertIdentical(logger._observer, None)
class UnixAppLoggerTestCase(unittest.TestCase):
"""
Tests for L{UnixAppLogger}.
@ivar signals: list of signal handlers installed.
@type signals: C{list}
"""
if _twistd_unix is None:
skip = "twistd unix not available"
def setUp(self):
"""
Fake C{signal.signal} for not installing the handlers but saving them
in C{self.signals}.
"""
self.signals = []
def fakeSignal(sig, f):
self.signals.append((sig, f))
self.patch(signal, "signal", fakeSignal)
def test_getLogObserverStdout(self):
"""
When non-daemonized and C{logfile} is empty or set to C{-},
L{UnixAppLogger._getLogObserver} returns a log observer pointing at
C{sys.stdout}.
"""
logFiles = _patchFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "-", "nodaemon": True})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertIdentical(logFiles[0], sys.stdout)
logger = UnixAppLogger({"logfile": "", "nodaemon": True})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 2)
self.assertIdentical(logFiles[1], sys.stdout)
def test_getLogObserverStdoutDaemon(self):
"""
When daemonized and C{logfile} is set to C{-},
L{UnixAppLogger._getLogObserver} raises C{SystemExit}.
"""
logger = UnixAppLogger({"logfile": "-", "nodaemon": False})
error = self.assertRaises(SystemExit, logger._getLogObserver)
self.assertEquals(str(error), "Daemons cannot log to stdout, exiting!")
def test_getLogObserverFile(self):
"""
When C{logfile} contains a file name, L{app.AppLogger._getLogObserver}
returns a log observer pointing at the specified path, and a signal
handler rotating the log is installed.
"""
logFiles = _patchFileLogObserver(self.patch)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath(filename))
self.assertEquals(len(self.signals), 1)
self.assertEquals(self.signals[0][0], signal.SIGUSR1)
d = Deferred()
def rotate():
d.callback(None)
logFiles[0].rotate = rotate
rotateLog = self.signals[0][1]
rotateLog(None, None)
return d
def test_getLogObserverDontOverrideSignalHandler(self):
"""
If a signal handler is already installed,
L{UnixAppLogger._getLogObserver} doesn't override it.
"""
def fakeGetSignal(sig):
self.assertEquals(sig, signal.SIGUSR1)
return object()
self.patch(signal, "getsignal", fakeGetSignal)
filename = self.mktemp()
logger = UnixAppLogger({"logfile": filename})
observer = logger._getLogObserver()
self.assertEquals(self.signals, [])
def test_getLogObserverDefaultFile(self):
"""
When daemonized and C{logfile} is empty, the observer returned by
L{UnixAppLogger._getLogObserver} points at C{twistd.log} in the current
directory.
"""
logFiles = _patchFileLogObserver(self.patch)
logger = UnixAppLogger({"logfile": "", "nodaemon": False})
observer = logger._getLogObserver()
self.assertEquals(len(logFiles), 1)
self.assertEquals(logFiles[0].path,
os.path.abspath("twistd.log"))
def test_getLogObserverSyslog(self):
"""
If C{syslog} is set to C{True}, L{UnixAppLogger._getLogObserver} starts
a L{syslog.SyslogObserver} with given C{prefix}.
"""
class fakesyslogobserver(object):
def __init__(self, prefix):
fakesyslogobserver.prefix = prefix
def emit(self, eventDict):
pass
self.patch(syslog, "SyslogObserver", fakesyslogobserver)
logger = UnixAppLogger({"syslog": True, "prefix": "test-prefix"})
observer = logger._getLogObserver()
self.assertEquals(fakesyslogobserver.prefix, "test-prefix")
if syslog is None:
test_getLogObserverSyslog.skip = "Syslog not available"
class DeprecationTests(unittest.TestCase):
"""
Tests for deprecated features.
"""
def test_initialLog(self):
"""
L{app.initialLog} is deprecated.
"""
logs = []
log.addObserver(logs.append)
self.addCleanup(log.removeObserver, logs.append)
self.callDeprecated(Version("Twisted", 8, 2, 0), app.initialLog)
self.assertEquals(len(logs), 2)
self.assertIn("starting up", logs[0]["message"][0])
self.assertIn("reactor class", logs[1]["message"][0])
|
apache-2.0
|
OMFGBKANG/nk2
|
scripts/build-all.py
|
282
|
8889
|
#! /usr/bin/env python
# Copyright (c) 2009, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/msm[0-9]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
shutil.copyfile(dotconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
global make_command
make_command = ["oldconfig"]
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
gpl-2.0
|
ychen820/microblog
|
src/lib/flask/templating.py
|
783
|
4707
|
# -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import posixpath
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered
from .module import blueprint_is_module
from ._compat import itervalues, iteritems
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
for loader, local_name in self._iter_loaders(template):
try:
return loader.get_source(environment, local_name)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield loader, template
# old style module based loaders in case we are dealing with a
# blueprint that is an old style module
try:
module, local_name = posixpath.normpath(template).split('/', 1)
blueprint = self.app.blueprints[module]
if blueprint_is_module(blueprint):
loader = blueprint.jinja_loader
if loader is not None:
yield loader, local_name
except (ValueError, KeyError):
pass
for blueprint in itervalues(self.app.blueprints):
if blueprint_is_module(blueprint):
continue
loader = blueprint.jinja_loader
if loader is not None:
yield loader, template
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for name, blueprint in iteritems(self.app.blueprints):
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
prefix = ''
if blueprint_is_module(blueprint):
prefix = name + '/'
result.add(prefix + template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
|
bsd-3-clause
|
pombredanne/django-allauth-api
|
src/allauth_api/account/rest_framework/authentication.py
|
1
|
4631
|
from django.contrib.auth import logout as auth_logout
from rest_framework.status import HTTP_401_UNAUTHORIZED, HTTP_204_NO_CONTENT
from rest_framework.response import Response
from rest_framework.authentication import BaseAuthentication, BasicAuthentication
from rest_framework.exceptions import AuthenticationFailed
from allauth.account import app_settings
from .utils import perform_login, RestFrameworkTokenGenerator, serializer_error_string
from .serializers import UserPassSerializer
from oauth2_provider.views.base import TokenView # , RevokeTokenView
import logging
logger = logging.getLogger(__name__)
class AllAuthMixin(object):
def authenticate(self, request):
input_data = self.get_input_data(request)
serializer = self.serializer_class(data=input_data)
if serializer.is_valid():
return (serializer.validated_data['user'], None)
else:
raise AuthenticationFailed(serializer_error_string(serializer.errors))
class HeaderDataAuthentication(AllAuthMixin, BaseAuthentication):
"""
An authentication method that recieves credentials in http headers
"""
def get_input_data(self, request):
return request.META
class PostDataAuthentication(AllAuthMixin, BaseAuthentication):
"""
An authentication method that looks for user credentials in the request data
"""
serializer_class = UserPassSerializer
def get_input_data(self, request):
return request.DATA
class UserPassAuthentication(BaseAuthentication):
"""
An authentication method that looks for username/password combination like basic HTTP
authentication or as simple post parameters
"""
def authenticate(self, request):
try:
result = BasicAuthentication().authenticate(request)
except AuthenticationFailed:
pass
if result is None:
result = PostDataAuthentication().authenticate(request)
return result
class BaseLogin(object):
"""
Base class for a login handler. All Login handlers should subclass this class
"""
auth_class = BaseAuthentication
def login(self, request, *args, **kwargs):
logger.debug("BaseLogin")
user = None
try:
user, _ = self.authenticate(request)
except AuthenticationFailed as err:
return Response({'message': err.detail}, err.status_code)
if user is not None:
return perform_login(request, user, email_verification=app_settings.EMAIL_VERIFICATION,
return_data=self.get_return_data(request, user),
signal_kwargs=self.get_signal_kwargs(request, user))
return Response({'message': 'User authentication failed'}, HTTP_401_UNAUTHORIZED)
def logout(self, request, **kwargs):
auth_logout(request)
return Response(None, HTTP_204_NO_CONTENT)
def authenticate(self, request, **kwargs):
return self.auth_class().authenticate(request)
def get_signal_kwargs(self, request, user):
return {}
def get_return_data(self, request, user):
return {}
class BasicLogin(BaseLogin):
"""
A login class that just uses the standard Django authentication
"""
auth_class = UserPassAuthentication
class TokenLogin(BasicLogin):
"""
A login class that accepts user/pass combinations in header or post data and returns a user
authentication token. This method, in its default configuration is only available if
rest_framework.authtoken is in installed_apps
"""
token_generator_class = RestFrameworkTokenGenerator
def get_return_data(self, request, user):
return {'token': self.token_generator_class().get_token(user).key}
def logout(self, request, **kwargs):
self.token_generator_class().revoke_token(request)
return Response(None, HTTP_204_NO_CONTENT)
class OAuth2Login(BaseLogin):
"""
A login class that accepts oauth2 authentication requests and returns the appropriate
access tokens. This login method, in its default configuration is only available if
oauth2_provider is in installed_apps
"""
def login(self, request, *args, **kwargs):
logger.debug("OAuth2Login")
view = TokenView.as_view()
return view(request._request, *args, **kwargs)
def logout(self, request, **kwargs):
# TODO: uncomment when update django-oauth-toolkit (only repo has revoke token right now)
# return RevokeTokenView(request, *args, **kwargs)
super(self, TokenLogin).logout(request, **kwargs)
|
bsd-2-clause
|
dbbhattacharya/kitsune
|
vendor/packages/logilab-common/deprecation.py
|
6
|
4472
|
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Deprecation utilities.
"""
__docformat__ = "restructuredtext en"
import sys
from warnings import warn
class class_deprecated(type):
"""metaclass to print a warning on instantiation of a deprecated class"""
def __call__(cls, *args, **kwargs):
msg = getattr(cls, "__deprecation_warning__",
"%s is deprecated" % cls.__name__)
warn(msg, DeprecationWarning, stacklevel=2)
return type.__call__(cls, *args, **kwargs)
def class_renamed(old_name, new_class, message=None):
"""automatically creates a class which fires a DeprecationWarning
when instantiated.
>>> Set = class_renamed('Set', set, 'Set is now replaced by set')
>>> s = Set()
sample.py:57: DeprecationWarning: Set is now replaced by set
s = Set()
>>>
"""
clsdict = {}
if message is None:
message = '%s is deprecated, use %s' % (old_name, new_class.__name__)
clsdict['__deprecation_warning__'] = message
try:
# new-style class
return class_deprecated(old_name, (new_class,), clsdict)
except (NameError, TypeError):
# old-style class
class DeprecatedClass(new_class):
"""FIXME: There might be a better way to handle old/new-style class
"""
def __init__(self, *args, **kwargs):
warn(message, DeprecationWarning, stacklevel=2)
new_class.__init__(self, *args, **kwargs)
return DeprecatedClass
def class_moved(new_class, old_name=None, message=None):
"""nice wrapper around class_renamed when a class has been moved into
another module
"""
if old_name is None:
old_name = new_class.__name__
if message is None:
message = 'class %s is now available as %s.%s' % (
old_name, new_class.__module__, new_class.__name__)
return class_renamed(old_name, new_class, message)
def deprecated(reason=None, stacklevel=2):
"""Decorator that raises a DeprecationWarning to print a message
when the decorated function is called.
"""
def deprecated_decorator(func):
message = reason or 'this function is deprecated, use %s instead'
if '%s' in message:
message = message % func.func_name
def wrapped(*args, **kwargs):
warn(message, DeprecationWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
wrapped.__name__ = func.__name__
wrapped.__doc__ = func.__doc__
return wrapped
return deprecated_decorator
@deprecated('replace deprecated_function(f,m) with deprecated(m)(f)')
def deprecated_function(func, message=None):
return deprecated(message)(func)
def moved(modpath, objname):
"""use to tell that a callable has been moved to a new module.
It returns a callable wrapper, so that when its called a warning is printed
telling where the object can be found, import is done (and not before) and
the actual object is called.
NOTE: the usage is somewhat limited on classes since it will fail if the
wrapper is use in a class ancestors list, use the `class_moved` function
instead (which has no lazy import feature though).
"""
def callnew(*args, **kwargs):
from logilab.common.modutils import load_module_from_name
message = "object %s has been moved to module %s" % (objname, modpath)
warn(message, DeprecationWarning, stacklevel=2)
m = load_module_from_name(modpath)
return getattr(m, objname)(*args, **kwargs)
return callnew
obsolete = deprecated('obsolete is deprecated, use deprecated instead')(deprecated)
|
bsd-3-clause
|
gauribhoite/personfinder
|
env/google_appengine/lib/django-1.4/django/utils/dates.py
|
488
|
2237
|
"Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
|
apache-2.0
|
astrikov-d/dartcms
|
dartcms/apps/users/urls.py
|
1
|
1621
|
# -*- coding: utf-8 -*-
from dartcms.utils.config import DartCMSConfig
from dartcms.views import DeleteObjectView, GridView
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from .forms import UserForm
from .models import CMSUser
from .views import ChangePasswordView, CMSUserInsertView, CMSUserUpdateView
app_name = "users"
config = DartCMSConfig({
'model': CMSUser,
'grid': {
'grid_columns': [
{'field': CMSUser.USERNAME_FIELD, 'width': '60%'},
{'field': 'last_login', 'width': '20%'},
{'field': 'is_staff', 'width': '10%'},
{'field': 'is_active', 'width': '10%'},
],
'search': [
CMSUser.USERNAME_FIELD, 'email'
],
'additional_grid_actions': [
{
'url': 'change-password', 'label': _('Change Password'), 'icon': 'edit',
'required_permissions': '__all__', 'kwarg_name': 'pk'
}
],
'model_properties': [CMSUser.USERNAME_FIELD]
},
'form': {
'form_class': UserForm
}
})
urlpatterns = config.get_urls(exclude=['addition', 'insert', 'update', 'delete']) + [
url(r'^$', GridView.as_view(**config.grid), name='index'),
url(r'^insert/$', CMSUserInsertView.as_view(**config.form), name='insert'),
url(r'^update/(?P<pk>\d+)/$', CMSUserUpdateView.as_view(**config.form), name='update'),
url(r'^delete/(?P<pk>\d+)/$', DeleteObjectView.as_view(**config.base), name='delete'),
url(r'^change-password/(?P<pk>\d+)/$', ChangePasswordView.as_view(), name='change_password'),
]
|
mit
|
mulkieran/pyblk
|
src/pyblk/_utils.py
|
1
|
2609
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <[email protected]>
"""
pyblk._utils
============
Generic utilities.
.. moduleauthor:: mulhern <[email protected]>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import networkx as nx
class GraphUtils(object):
"""
Generic utilties for graphs.
"""
# pylint: disable=too-few-public-methods
@staticmethod
def get_roots(graph):
"""
Get the roots of a graph.
:param `DiGraph` graph: the graph
:returns: the roots of the graph
:rtype: list of `Node`
"""
return [n for n in graph if not nx.ancestors(graph, n)]
class SortingUtils(object):
"""
Utilities helpful for sorting.
"""
# pylint: disable=too-few-public-methods
@staticmethod
def str_key_func_gen(func):
"""
A wrapper function that generates a function that yields a str
for all values.
:param func: a function that yields a result when applied to an arg
:type func: 'a -> *
"""
@functools.wraps(func)
def key_func(value):
"""
Transforms the result of func to a str type if it is not already.
None becomes '', so that its value will appear first, all other
non-str values are converted to str.
:param `a value: a value to pass to func
"""
res = func(value)
return '' if res is None else str(res)
return key_func
|
gpl-2.0
|
ThinkingBridge/platform_external_chromium_org
|
tools/json_to_struct/json_to_struct.py
|
69
|
6904
|
#!/usr/bin/env python
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Format for the JSON schema file:
# {
# "type_name": "DesiredCStructName",
# "headers": [ // Optional list of headers to be included by the .h.
# "path/to/header.h"
# ],
# "schema": [ // Fields of the generated structure.
# {
# "field": "my_enum_field",
# "type": "enum", // Either: int, string, string16, enum, array.
# "default": "RED", // Optional. Cannot be used for array.
# "ctype": "Color" // Only for enum, specify the C type.
# },
# {
# "field": "my_int_array_field", // my_int_array_field_size will also
# "type": "array", // be generated.
# "contents": {
# "type": "int" // Either: int, string, string16, enum, array.
# }
# },
# ...
# ]
# }
#
# Format for the JSON description file:
# {
# "int_variables": { // An optional list of constant int variables.
# "kDesiredConstantName": 45
# },
# "elements": { // All the elements for which to create static
# // initialization code in the .cc file.
# "my_const_variable": {
# "my_int_field": 10,
# "my_string_field": "foo bar",
# "my_enum_field": "BLACK",
# "my_int_array_field": [ 1, 2, 3, 5, 7 ],
# },
# "my_other_const_variable": {
# ...
# }
# }
# }
import json
from datetime import datetime
import os.path
import sys
import optparse
import re
_script_path = os.path.realpath(__file__)
sys.path.insert(0, os.path.normpath(_script_path + "/../../json_comment_eater"))
try:
import json_comment_eater
finally:
sys.path.pop(0)
import struct_generator
import element_generator
HEAD = """// Copyright %d The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// GENERATED FROM THE SCHEMA DEFINITION AND DESCRIPTION IN
// %s
// %s
// DO NOT EDIT.
"""
def _GenerateHeaderGuard(h_filename):
"""Generates the string used in #ifndef guarding the header file.
"""
result = re.sub('[%s\\\\.]' % os.sep, '_', h_filename.upper())
return re.sub('^_*', '', result) + '_' # Remove leading underscores.
def _GenerateH(basepath, fileroot, head, namespace, schema, description):
"""Generates the .h file containing the definition of the structure specified
by the schema.
Args:
basepath: The base directory in which files are generated.
fileroot: The filename and path, relative to basepath, of the file to
create, without an extension.
head: The string to output as the header of the .h file.
namespace: A string corresponding to the C++ namespace to use.
schema: A dict containing the schema. See comment at the top of this file.
description: A dict containing the description. See comment at the top of
this file.
"""
h_filename = fileroot + '.h'
with open(os.path.join(basepath, h_filename), 'w') as f:
f.write(head)
f.write('#include <cstddef>\n')
f.write('\n')
header_guard = _GenerateHeaderGuard(h_filename)
f.write('#ifndef %s\n' % header_guard)
f.write('#define %s\n' % header_guard)
f.write('\n')
for header in schema.get('headers', []):
f.write('#include "%s"\n' % header)
f.write('\n')
if namespace:
f.write('namespace %s {\n' % namespace)
f.write('\n')
f.write(struct_generator.GenerateStruct(
schema['type_name'], schema['schema']))
f.write('\n')
for var_name, value in description.get('int_variables', []).items():
f.write('extern const int %s;\n' % var_name)
f.write('\n')
for element_name, element in description['elements'].items():
f.write('extern const %s %s;\n' % (schema['type_name'], element_name))
if namespace:
f.write('\n')
f.write('} // namespace %s\n' % namespace)
f.write('\n')
f.write( '#endif // %s\n' % header_guard)
def _GenerateCC(basepath, fileroot, head, namespace, schema, description):
"""Generates the .cc file containing the static initializers for the
of the elements specified in the description.
Args:
basepath: The base directory in which files are generated.
fileroot: The filename and path, relative to basepath, of the file to
create, without an extension.
head: The string to output as the header of the .cc file.
namespace: A string corresponding to the C++ namespace to use.
schema: A dict containing the schema. See comment at the top of this file.
description: A dict containing the description. See comment at the top of
this file.
"""
with open(os.path.join(basepath, fileroot + '.cc'), 'w') as f:
f.write(head)
f.write('#include "%s"\n' % (fileroot + '.h'))
f.write('\n')
if namespace:
f.write('namespace %s {\n' % namespace)
f.write('\n')
f.write(element_generator.GenerateElements(schema['type_name'],
schema['schema'], description))
if namespace:
f.write('\n')
f.write('} // namespace %s\n' % namespace)
def _Load(filename):
"""Loads a JSON file int a Python object and return this object.
"""
# TODO(beaudoin): When moving to Python 2.7 use object_pairs_hook=OrderedDict.
with open(filename, 'r') as handle:
result = json.loads(json_comment_eater.Nom(handle.read()))
return result
if __name__ == '__main__':
parser = optparse.OptionParser(
description='Generates an C++ array of struct from a JSON description.',
usage='usage: %prog [option] -s schema description')
parser.add_option('-b', '--destbase',
help='base directory of generated files.')
parser.add_option('-d', '--destdir',
help='directory to output generated files, relative to destbase.')
parser.add_option('-n', '--namespace',
help='C++ namespace for generated files. e.g search_providers.')
parser.add_option('-s', '--schema', help='path to the schema file, '
'mandatory.')
(opts, args) = parser.parse_args()
if not opts.schema:
parser.error('You must specify a --schema.')
description_filename = os.path.normpath(args[0])
root, ext = os.path.splitext(description_filename)
shortroot = os.path.split(root)[1]
if opts.destdir:
output_root = os.path.join(os.path.normpath(opts.destdir), shortroot)
else:
output_root = shortroot
if opts.destbase:
basepath = os.path.normpath(opts.destbase)
else:
basepath = ''
schema = _Load(opts.schema)
description = _Load(description_filename)
head = HEAD % (datetime.now().year, opts.schema, description_filename)
_GenerateH(basepath, output_root, head, opts.namespace, schema, description)
_GenerateCC(basepath, output_root, head, opts.namespace, schema, description)
|
bsd-3-clause
|
dvliman/jaikuengine
|
.google_appengine/lib/django-1.2/django/contrib/auth/decorators.py
|
63
|
1940
|
try:
from functools import update_wrapper, wraps
except ImportError:
from django.utils.functional import update_wrapper, wraps # Python 2.4 fallback.
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.http import HttpResponseRedirect
from django.utils.decorators import available_attrs
from django.utils.http import urlquote
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
if not login_url:
from django.conf import settings
login_url = settings.LOGIN_URL
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect('%s?%s=%s' % tup)
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view)
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
"""
return user_passes_test(lambda u: u.has_perm(perm), login_url=login_url)
|
apache-2.0
|
meejah/torperf
|
analyze_guards.py
|
2
|
2559
|
#!/usr/bin/python
#
# This script takes a list of extradata files and tells you some statistics
# about the guard selection used by checking against the current consensus.
#
# Use the script like this:
# ./analyze_guards.py slowratio50kb.extradata slowratio1mb50kb.extradata
#
# It should then print out ranking stats one per file. Use your brain to
# determine if these stats make sense for the run you selected.
import sys
import math
import TorCtl.TorCtl
import TorCtl.TorUtil
TorCtl.TorUtil.loglevel = "NOTICE"
HOST="127.0.0.1"
PORT=9051
def analyze_list(router_map, idhex_list):
min_rank = len(router_map)
tot_rank = 0
max_rank = 0
absent = 0
for idhex in idhex_list:
if idhex not in router_map:
absent += 1
continue
rank = router_map[idhex].list_rank
tot_rank += rank
if rank < min_rank: min_rank = rank
if rank > max_rank: max_rank = rank
avg = float(tot_rank)/(len(idhex_list)-absent)
varience = 0
for idhex in idhex_list:
if idhex not in router_map: continue
rank = router_map[idhex].list_rank
varience += (rank-avg)*(rank-avg)
return (min_rank, avg, math.sqrt(varience/(len(idhex_list)-absent-1)), max_rank, absent)
def process_file(router_map, file_name):
f = file(file_name, "r")
idhex_list = f.readlines()
guard_list = []
for i in xrange(len(idhex_list)):
line = idhex_list[i].split()
path = None
used = False
for word in line:
if word.startswith("PATH="): path = word[5:]
if word.startswith("USED_BY"): used = True
if path and used:
guard = path.split(",")
guard_list.append(guard[0])
print "Guard rank stats (min, avg, dev, total, absent): "
print file_name + ": " + str(analyze_list(router_map, guard_list))
def main():
c = TorCtl.TorCtl.connect(HOST, PORT)
sorted_rlist = filter(lambda r: r.desc_bw > 0, c.read_routers(c.get_network_status()))
router_map = {}
for r in sorted_rlist: router_map["$"+r.idhex] = r
if "ratio" in sys.argv[1]:
print "Using ratio rankings"
def ratio_cmp(r1, r2):
if r1.bw/float(r1.desc_bw) > r2.bw/float(r2.desc_bw):
return -1
elif r1.bw/float(r1.desc_bw) < r2.bw/float(r2.desc_bw):
return 1
else:
return 0
sorted_rlist.sort(ratio_cmp)
else:
print "Using consensus bw rankings"
sorted_rlist.sort(lambda x, y: cmp(y.bw, x.bw))
for i in xrange(len(sorted_rlist)): sorted_rlist[i].list_rank = i
for file_name in sys.argv[1:]:
process_file(router_map, file_name)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
thinkopensolutions/account-fiscal-rule
|
account_fiscal_position_rule_sale/models/sale.py
|
2
|
1295
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-TODAY Akretion <http://www.akretion.com>
# @author Sébastien BEAU <[email protected]>
# @author Renato Lima <[email protected]>
# @author Raphaël Valyi <[email protected]>
# Copyright 2012 Camptocamp SA
# @author: Guewen Baconnier
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from openerp import models, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _fiscal_position_map(self, **kwargs):
ctx = dict(self._context)
ctx.update({'use_domain': ('use_sale', '=', True)})
return self.env['account.fiscal.position.rule'].with_context(
ctx).apply_fiscal_mapping(**kwargs)
@api.onchange('partner_id', 'partner_invoice_id',
'partner_shipping_id', 'company_id')
def onchange_fiscal_position_map(self):
kwargs = {
'company_id': self.company_id,
'partner_id': self.partner_id,
'partner_invoice_id': self.partner_invoice_id,
'partner_shipping_id': self.partner_shipping_id,
}
obj_fiscal_position = self._fiscal_position_map(**kwargs)
if obj_fiscal_position:
self.fiscal_position_id = obj_fiscal_position.id
|
agpl-3.0
|
mith1979/ansible_automation
|
applied_python/applied_python/lib/python2.7/site-packages/pygal/graph/base.py
|
2
|
8663
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Base for pygal charts"""
from __future__ import division
import os
from functools import reduce
from uuid import uuid4
from pygal._compat import is_list_like
from pygal.adapters import decimal_to_float, not_zero, positive
from pygal.config import Config, SerieConfig
from pygal.serie import Serie
from pygal.state import State
from pygal.svg import Svg
from pygal.util import compose, ident
from pygal.view import Box, Margin
class BaseGraph(object):
"""Chart internal behaviour related functions"""
_adapters = []
def __init__(self, config=None, **kwargs):
"""Config preparation and various initialization"""
if config:
if isinstance(config, type):
config = config()
else:
config = config.copy()
else:
config = Config()
config(**kwargs)
self.config = config
self.state = None
self.uuid = str(uuid4())
self.raw_series = []
self.raw_series2 = []
self.xml_filters = []
def __setattr__(self, name, value):
"""Set an attribute on the class or in the state if there is one"""
if name.startswith('__') or getattr(self, 'state', None) is None:
super(BaseGraph, self).__setattr__(name, value)
else:
setattr(self.state, name, value)
def __getattribute__(self, name):
"""Get an attribute from the class or from the state if there is one"""
if name.startswith('__') or name == 'state' or getattr(
self, 'state', None
) is None or name not in self.state.__dict__:
return super(BaseGraph, self).__getattribute__(name)
return getattr(self.state, name)
def prepare_values(self, raw, offset=0):
"""Prepare the values to start with sane values"""
from pygal.graph.map import BaseMap
from pygal import Histogram
if self.zero == 0 and isinstance(self, BaseMap):
self.zero = 1
if self.x_label_rotation:
self.x_label_rotation %= 360
if self.y_label_rotation:
self.y_label_rotation %= 360
for key in ('x_labels', 'y_labels'):
if getattr(self, key):
setattr(self, key, list(getattr(self, key)))
if not raw:
return
adapters = list(self._adapters) or [lambda x:x]
if self.logarithmic:
for fun in not_zero, positive:
if fun in adapters:
adapters.remove(fun)
adapters = adapters + [positive, not_zero]
adapters = adapters + [decimal_to_float]
self._adapt = reduce(compose, adapters) if not self.strict else ident
self._x_adapt = reduce(
compose, self._x_adapters) if not self.strict and getattr(
self, '_x_adapters', None) else ident
series = []
raw = [(
title,
list(raw_values) if not isinstance(
raw_values, dict) else raw_values,
serie_config_kwargs
) for title, raw_values, serie_config_kwargs in raw]
width = max([len(values) for _, values, _ in raw] +
[len(self.x_labels or [])])
for title, raw_values, serie_config_kwargs in raw:
metadata = {}
values = []
if isinstance(raw_values, dict):
if isinstance(self, BaseMap):
raw_values = list(raw_values.items())
else:
value_list = [None] * width
for k, v in raw_values.items():
if k in (self.x_labels or []):
value_list[self.x_labels.index(k)] = v
raw_values = value_list
for index, raw_value in enumerate(
raw_values + (
(width - len(raw_values)) * [None] # aligning values
if len(raw_values) < width else [])):
if isinstance(raw_value, dict):
raw_value = dict(raw_value)
value = raw_value.pop('value', None)
metadata[index] = raw_value
else:
value = raw_value
# Fix this by doing this in charts class methods
if isinstance(self, Histogram):
if value is None:
value = (None, None, None)
elif not is_list_like(value):
value = (value, self.zero, self.zero)
elif len(value) == 2:
value = (1, value[0], value[1])
value = list(map(self._adapt, value))
elif self._dual:
if value is None:
value = (None, None)
elif not is_list_like(value):
value = (value, self.zero)
if self._x_adapt:
value = (
self._x_adapt(value[0]),
self._adapt(value[1]))
if isinstance(self, BaseMap):
value = (self._adapt(value[0]), value[1])
else:
value = list(map(self._adapt, value))
else:
value = self._adapt(value)
values.append(value)
serie_config = SerieConfig()
serie_config(**dict((k, v) for k, v in self.state.__dict__.items()
if k in dir(serie_config)))
serie_config(**serie_config_kwargs)
series.append(
Serie(offset + len(series),
title, values, serie_config, metadata))
return series
def setup(self, **kwargs):
"""Set up the transient state prior rendering"""
# Keep labels in case of map
if getattr(self, 'x_labels', None) is not None:
self.x_labels = list(self.x_labels)
if getattr(self, 'y_labels', None) is not None:
self.y_labels = list(self.y_labels)
self.state = State(self, **kwargs)
if isinstance(self.style, type):
self.style = self.style()
self.series = self.prepare_values(
self.raw_series) or []
self.secondary_series = self.prepare_values(
self.raw_series2, len(self.series)) or []
self.horizontal = getattr(self, 'horizontal', False)
self.svg = Svg(self)
self._x_labels = None
self._y_labels = None
self._x_2nd_labels = None
self._y_2nd_labels = None
self.nodes = {}
self.margin_box = Margin(
self.margin_top or self.margin,
self.margin_right or self.margin,
self.margin_bottom or self.margin,
self.margin_left or self.margin)
self._box = Box()
self.view = None
if self.logarithmic and self.zero == 0:
# Explicit min to avoid interpolation dependency
positive_values = list(filter(
lambda x: x > 0,
[val[1] or 1 if self._dual else val
for serie in self.series for val in serie.safe_values]))
self.zero = min(positive_values or (1,)) or 1
if self._len < 3:
self.interpolate = None
self._draw()
self.svg.pre_render()
def teardown(self):
"""Remove the transient state after rendering"""
if os.getenv('PYGAL_KEEP_STATE'):
return
del self.state
self.state = None
def _repr_svg_(self):
"""Display svg in IPython notebook"""
return self.render(disable_xml_declaration=True)
def _repr_png_(self):
"""Display png in IPython notebook"""
return self.render_to_png()
|
apache-2.0
|
myarti/kivybits
|
Base/modules/screen.py
|
1
|
4333
|
'''Screen
======
This module changes some environement and configuration variables
to match the density / dpi / screensize of a specific device.
To see a list of the available screenid's, just run::
python main.py -m screen
To simulate a medium-density screen such as the Motolora Droid 2::
python main.py -m screen:droid2
To simulate a high-density screen such as HTC One X, in portrait::
python main.py -m screen:onex,portrait
To simulate the iPad 2 screen::
python main.py -m screen:ipad
If the generated window is too large, you can specify a scale::
python main.py -m screen:note2,portrait,scale=.75
Note that to display your contents correctly on a scaled window you
must consistently use units 'dp' and 'sp' throughout your app. See
:mod:`~kiv.metrics` for more details.
'''
import sys
from os import environ
from kivy.config import Config
from kivy.logger import Logger
# taken from http://en.wikipedia.org/wiki/List_of_displays_by_pixel_density
devices = {
# device: (name, width, height, dpi, density)
's7edge': ('Galaxy S7 Edge', 2560, 1440, 534, 4),
'onex': ('HTC One X', 1280, 720, 312, 2),
'one': ('HTC One', 1920, 1080, 468, 3),
'onesv': ('HTC One SV', 800, 480, 216, 1.5),
's3': ('Galaxy SIII', 1280, 720, 306, 2),
'note2': ('Galaxy Note II', 1280, 720, 267, 2),
'droid2': ('Motolora Droid 2', 854, 480, 240, 1.5),
'xoom': ('Motolora Xoom', 1280, 800, 149, 1),
'ipad': ('iPad (1 and 2)', 1024, 768, 132, 1),
'ipad3': ('iPad 3', 2048, 1536, 264, 2),
'iphone4': ('iPhone 4', 960, 640, 326, 2),
'iphone5': ('iPhone 5', 1136, 640, 326, 2),
'xperiae': ('Xperia E', 480, 320, 166, 1),
'nexus4': ('Nexus 4', 1280, 768, 320, 2),
'nexus7': ('Nexus 7 (2012 version)', 1280, 800, 216, 1.325),
'nexus7.2': ('Nexus 7 (2013 version)', 1920, 1200, 323, 2),
}
def start(win, ctx):
pass
def stop(win, ctx):
pass
def apply_device(device, scale, orientation):
name, width, height, dpi, density = devices[device]
if orientation == 'portrait':
width, height = height, width
Logger.info('Screen: Apply screen settings for {0}'.format(name))
Logger.info('Screen: size={0}x{1} dpi={2} density={3} '
'orientation={4}'.format(width, height, dpi, density,
orientation))
try:
scale = float(scale)
except:
scale = 1
environ['KIVY_METRICS_DENSITY'] = str(density * scale)
environ['KIVY_DPI'] = str(dpi * scale)
Config.set('graphics', 'width', str(int(width * scale)))
# simulate with the android bar
# FIXME should be configurable
Config.set('graphics', 'height', str(int(height * scale - 25 * density)))
Config.set('graphics', 'fullscreen', '0')
Config.set('graphics', 'show_mousecursor', '1')
def usage(device=None):
if device:
Logger.error('Screen: The specified device ({0}) is unknown.',
device)
print('\nModule usage: python main.py -m screen:deviceid[,orientation]\n')
print('Available devices:\n')
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
'Device ID', 'Name', 'Width', 'Height', 'DPI', 'Density'))
for device, info in devices.items():
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
device, *info))
print('\n')
print('Simulate a medium-density screen such as Motolora Droid 2:\n')
print(' python main.py -m screen:droid2\n')
print('Simulate a high-density screen such as HTC One X, in portrait:\n')
print(' python main.py -m screen:onex,portrait\n')
print('Simulate the iPad 2 screen\n')
print(' python main.py -m screen:ipad\n')
print('If the generated window is too large, you can specify a scale:\n')
print(' python main.py -m screen:note2,portrait,scale=.75\n')
sys.exit(1)
def configure(ctx):
scale = ctx.pop('scale', None)
orientation = 'landscape'
ctx.pop('landscape', None)
if ctx.pop('portrait', None):
orientation = 'portrait'
if not ctx:
return usage(None)
device = list(ctx.keys())[0]
if device not in devices:
return usage('')
apply_device(device, scale, orientation)
if __name__ == "__main__":
for n in devices.values():
assert n[1] > n[2]
|
mit
|
mscuthbert/abjad
|
abjad/tools/timespantools/test/test_timespantools_Timespan___or__.py
|
2
|
3746
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_timespantools_Timespan___or___01():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(-10, -5)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(-10, -5),
timespantools.Timespan(0, 15)
])
def test_timespantools_Timespan___or___02():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(-10, 0)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(-10, 15)
])
def test_timespantools_Timespan___or___03():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(-10, 5)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(-10, 15)
])
def test_timespantools_Timespan___or___04():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(-10, 15)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(-10, 15)
])
def test_timespantools_Timespan___or___05():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(-10, 25)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(-10, 25)
])
def test_timespantools_Timespan___or___06():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(0, 10)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 15)
])
def test_timespantools_Timespan___or___07():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(0, 15)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 15)
])
def test_timespantools_Timespan___or___08():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(5, 10)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 15)
])
def test_timespantools_Timespan___or___09():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(5, 15)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 15)
])
def test_timespantools_Timespan___or___10():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(0, 25)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 25)
])
def test_timespantools_Timespan___or___11():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(5, 25)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 25)
])
def test_timespantools_Timespan___or___12():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(15, 25)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 25)
])
def test_timespantools_Timespan___or___13():
timespan_1 = timespantools.Timespan(0, 15)
timespan_2 = timespantools.Timespan(20, 25)
result = timespan_1 | timespan_2
assert result == timespantools.TimespanInventory([
timespantools.Timespan(0, 15),
timespantools.Timespan(20, 25)
])
|
gpl-3.0
|
treetrnk/Tuxemon
|
tuxemon/core/components/event/actions/play_sound.py
|
2
|
1305
|
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (c) 2014-2017 William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
from core import tools
from core.components.event.eventaction import EventAction
class PlaySoundAction(EventAction):
"""Plays a sound from "resources/sounds/"
Valid Parameters: filename
"""
name = "play_sound"
valid_parameters = [
(str, "filename"),
]
def start(self):
filename = self.parameters.filename
sound = tools.load_sound("sounds/" + filename)
sound.play()
|
gpl-3.0
|
TomBaxter/osf.io
|
addons/onedrive/migrations/0001_initial.py
|
22
|
2662
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 16:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import osf.models.base
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('osf', '0067_auto_20171121_1050'),
]
operations = [
migrations.CreateModel(
name='NodeSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('folder_id', models.TextField(blank=True, null=True)),
('folder_path', models.TextField(blank=True, null=True)),
('external_account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_onedrive_node_settings', to='osf.ExternalAccount')),
('owner', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_onedrive_node_settings', to='osf.AbstractNode')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('deleted', models.BooleanField(default=False)),
('oauth_grants', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(blank=True, default=dict, encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder)),
('owner', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addons_onedrive_user_settings', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='nodesettings',
name='user_settings',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='addons_onedrive.UserSettings'),
),
]
|
apache-2.0
|
alfanugraha/LUMENS-repo
|
processing/gdal/ColorRelief.py
|
4
|
3811
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ColorRelief.py
---------------------
Date : October 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtGui import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterRaster import ParameterRaster
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterFile import ParameterFile
from processing.parameters.ParameterSelection import ParameterSelection
from processing.outputs.OutputRaster import OutputRaster
from processing.gdal.GdalUtils import GdalUtils
from processing.tools.system import *
class ColorRelief(GeoAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
COMPUTE_EDGES = 'COMPUTE_EDGES'
COLOR_TABLE = 'COLOR_TABLE'
MATCH_MODE = 'MATCH_MODE'
OUTPUT = 'OUTPUT'
MATCHING_MODES = ['"0,0,0,0" RGBA', 'Exact color', 'Nearest color']
#def getIcon(self):
# filepath = os.path.dirname(__file__) + '/icons/dem.png'
# return QIcon(filepath)
def defineCharacteristics(self):
self.name = 'Color relief'
self.group = '[GDAL] Analysis'
self.addParameter(ParameterRaster(self.INPUT, 'Input layer'))
self.addParameter(ParameterNumber(self.BAND, 'Band number', 1, 99, 1))
self.addParameter(ParameterBoolean(self.COMPUTE_EDGES, 'Compute edges',
False))
self.addParameter(ParameterFile(self.COLOR_TABLE,
'Color configuration file', optional=False))
self.addParameter(ParameterSelection(self.MATCH_MODE,
'Matching mode', self.MATCHING_MODES, 0))
self.addOutput(OutputRaster(self.OUTPUT, 'Output file'))
def processAlgorithm(self, progress):
arguments = ['color-relief']
arguments.append(unicode(self.getParameterValue(self.INPUT)))
arguments.append(unicode(self.getParameterValue(self.COLOR_TABLE)))
#filePath = unicode(self.getParameterValue(self.COLOR_TABLE))
#if filePath is None or filePath == '':
# filePath = os.path.join(os.path.dirname(__file__), 'terrain.txt')
#arguments.append(filePath)
arguments.append(unicode(self.getOutputValue(self.OUTPUT)))
arguments.append('-b')
arguments.append(str(self.getParameterValue(self.BAND)))
if self.getParameterValue(self.COMPUTE_EDGES):
arguments.append('-compute_edges')
mode = self.getParameterValue(self.MATCH_MODE)
if mode == 1:
arguments.append('-exact_color_entry')
elif mode == 2:
arguments.append('-nearest_color_entry')
GdalUtils.runGdal(['gdaldem',
GdalUtils.escapeAndJoin(arguments)], progress)
|
gpl-2.0
|
gusai-francelabs/datafari
|
windows/python/Lib/email/test/test_email_codecs_renamed.py
|
298
|
2842
|
# Copyright (C) 2002-2006 Python Software Foundation
# Contact: [email protected]
# email package unit tests for (optional) Asian codecs
import unittest
from test.test_support import run_unittest
from email.test.test_email import TestEmailBase
from email.charset import Charset
from email.header import Header, decode_header
from email.message import Message
# We're compatible with Python 2.3, but it doesn't have the built-in Asian
# codecs, so we have to skip all these tests.
try:
unicode('foo', 'euc-jp')
except LookupError:
raise unittest.SkipTest
class TestEmailAsianCodecs(TestEmailBase):
def test_japanese_codecs(self):
eq = self.ndiffAssertEqual
j = Charset("euc-jp")
g = Charset("iso-8859-1")
h = Header("Hello World!")
jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
ghello = 'Gr\xfc\xdf Gott!'
h.append(jhello, j)
h.append(ghello, g)
# BAW: This used to -- and maybe should -- fold the two iso-8859-1
# chunks into a single encoded word. However it doesn't violate the
# standard to have them as two encoded chunks and maybe it's
# reasonable <wink> for each .append() call to result in a separate
# encoded word.
eq(h.encode(), """\
Hello World! =?iso-2022-jp?b?GyRCJU8lbSE8JW8hPCVrJUkhKhsoQg==?=
=?iso-8859-1?q?Gr=FC=DF?= =?iso-8859-1?q?_Gott!?=""")
eq(decode_header(h.encode()),
[('Hello World!', None),
('\x1b$B%O%m!<%o!<%k%I!*\x1b(B', 'iso-2022-jp'),
('Gr\xfc\xdf Gott!', 'iso-8859-1')])
long = 'test-ja \xa4\xd8\xc5\xea\xb9\xc6\xa4\xb5\xa4\xec\xa4\xbf\xa5\xe1\xa1\xbc\xa5\xeb\xa4\xcf\xbb\xca\xb2\xf1\xbc\xd4\xa4\xce\xbe\xb5\xc7\xa7\xa4\xf2\xc2\xd4\xa4\xc3\xa4\xc6\xa4\xa4\xa4\xde\xa4\xb9'
h = Header(long, j, header_name="Subject")
# test a very long header
enc = h.encode()
# TK: splitting point may differ by codec design and/or Header encoding
eq(enc , """\
=?iso-2022-jp?b?dGVzdC1qYSAbJEIkWEVqOUYkNSRsJD8lYSE8JWskTztKGyhC?=
=?iso-2022-jp?b?GyRCMnE8VCROPjVHJyRyQlQkQyRGJCQkXiQ5GyhC?=""")
# TK: full decode comparison
eq(h.__unicode__().encode('euc-jp'), long)
def test_payload_encoding(self):
jhello = '\xa5\xcf\xa5\xed\xa1\xbc\xa5\xef\xa1\xbc\xa5\xeb\xa5\xc9\xa1\xaa'
jcode = 'euc-jp'
msg = Message()
msg.set_payload(jhello, jcode)
ustr = unicode(msg.get_payload(), msg.get_content_charset())
self.assertEqual(jhello, ustr.encode(jcode))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestEmailAsianCodecs))
return suite
def test_main():
run_unittest(TestEmailAsianCodecs)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
apache-2.0
|
Lujeni/ansible
|
lib/ansible/modules/system/filesystem.py
|
13
|
13388
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Alexander Bulimov <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
author:
- Alexander Bulimov (@abulimov)
module: filesystem
short_description: Makes a filesystem
description:
- This module creates a filesystem.
version_added: "1.2"
options:
fstype:
choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap ]
description:
- Filesystem type to be created.
- reiserfs support was added in 2.2.
- lvm support was added in 2.5.
- since 2.5, I(dev) can be an image file.
- vfat support was added in 2.5
- ocfs2 support was added in 2.6
- f2fs support was added in 2.7
- swap support was added in 2.8
required: yes
aliases: [type]
dev:
description:
- Target path to device or image file.
required: yes
aliases: [device]
force:
description:
- If C(yes), allows to create new filesystem on devices that already has filesystem.
type: bool
default: 'no'
resizefs:
description:
- If C(yes), if the block device and filesystem size differ, grow the filesystem into the space.
- Supported for C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(vfat), C(swap) filesystems.
- XFS Will only grow if mounted.
- vFAT will likely fail if fatresize < 1.04.
type: bool
default: 'no'
version_added: "2.0"
opts:
description:
- List of options to be passed to mkfs command.
requirements:
- Uses tools related to the I(fstype) (C(mkfs)) and C(blkid) command. When I(resizefs) is enabled, C(blockdev) command is required too.
notes:
- Potential filesystem on I(dev) are checked using C(blkid), in case C(blkid) isn't able to detect an existing filesystem,
this filesystem is overwritten even if I(force) is C(no).
'''
EXAMPLES = '''
- name: Create a ext2 filesystem on /dev/sdb1
filesystem:
fstype: ext2
dev: /dev/sdb1
- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
filesystem:
fstype: ext4
dev: /dev/sdb1
opts: -cc
'''
from distutils.version import LooseVersion
import os
import platform
import re
import stat
from ansible.module_utils.basic import AnsibleModule
class Device(object):
def __init__(self, module, path):
self.module = module
self.path = path
def size(self):
""" Return size in bytes of device. Returns int """
statinfo = os.stat(self.path)
if stat.S_ISBLK(statinfo.st_mode):
blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
_, devsize_in_bytes, _ = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
return int(devsize_in_bytes)
elif os.path.isfile(self.path):
return os.path.getsize(self.path)
else:
self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
def __str__(self):
return self.path
class Filesystem(object):
GROW = None
MKFS = None
MKFS_FORCE_FLAGS = ''
LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
def __init__(self, module):
self.module = module
@property
def fstype(self):
return type(self).__name__
def get_fs_size(self, dev):
""" Return size in bytes of filesystem on device. Returns int """
raise NotImplementedError()
def create(self, opts, dev):
if self.module.check_mode:
return
mkfs = self.module.get_bin_path(self.MKFS, required=True)
if opts is None:
cmd = "%s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, dev)
else:
cmd = "%s %s %s '%s'" % (mkfs, self.MKFS_FORCE_FLAGS, opts, dev)
self.module.run_command(cmd, check_rc=True)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
return [cmd, str(dev)]
def grow(self, dev):
"""Get dev and fs size and compare. Returns stdout of used command."""
devsize_in_bytes = dev.size()
try:
fssize_in_bytes = self.get_fs_size(dev)
except NotImplementedError:
self.module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % self.fstype)
if not fssize_in_bytes < devsize_in_bytes:
self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
elif self.module.check_mode:
self.module.exit_json(changed=True, msg="Resizing filesystem %s on device %s" % (self.fstype, dev))
else:
_, out, _ = self.module.run_command(self.grow_cmd(dev), check_rc=True)
return out
class Ext(Filesystem):
MKFS_FORCE_FLAGS = '-F'
GROW = 'resize2fs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('tune2fs', required=True)
# Get Block count and Block size
_, size, _ = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in size.splitlines():
if 'Block count:' in line:
block_count = int(line.split(':')[1].strip())
elif 'Block size:' in line:
block_size = int(line.split(':')[1].strip())
return block_size * block_count
class Ext2(Ext):
MKFS = 'mkfs.ext2'
class Ext3(Ext):
MKFS = 'mkfs.ext3'
class Ext4(Ext):
MKFS = 'mkfs.ext4'
class XFS(Filesystem):
MKFS = 'mkfs.xfs'
MKFS_FORCE_FLAGS = '-f'
GROW = 'xfs_growfs'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('xfs_growfs', required=True)
_, size, _ = self.module.run_command([cmd, '-n', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in size.splitlines():
col = line.split('=')
if col[0].strip() == 'data':
if col[1].strip() != 'bsize':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "bsize")')
if col[2].split()[1] != 'blocks':
self.module.fail_json(msg='Unexpected output format from xfs_growfs (could not locate "blocks")')
block_size = int(col[2].split()[0])
block_count = int(col[3].split(',')[0])
return block_size * block_count
class Reiserfs(Filesystem):
MKFS = 'mkfs.reiserfs'
MKFS_FORCE_FLAGS = '-f'
class Btrfs(Filesystem):
MKFS = 'mkfs.btrfs'
def __init__(self, module):
super(Btrfs, self).__init__(module)
_, stdout, stderr = self.module.run_command('%s --version' % self.MKFS, check_rc=True)
match = re.search(r" v([0-9.]+)", stdout)
if not match:
# v0.20-rc1 use stderr
match = re.search(r" v([0-9.]+)", stderr)
if match:
# v0.20-rc1 doesn't have --force parameter added in following version v3.12
if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
self.MKFS_FORCE_FLAGS = '-f'
else:
self.MKFS_FORCE_FLAGS = ''
else:
# assume version is greater or equal to 3.12
self.MKFS_FORCE_FLAGS = '-f'
self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
class Ocfs2(Filesystem):
MKFS = 'mkfs.ocfs2'
MKFS_FORCE_FLAGS = '-Fx'
class F2fs(Filesystem):
MKFS = 'mkfs.f2fs'
GROW = 'resize.f2fs'
@property
def MKFS_FORCE_FLAGS(self):
mkfs = self.module.get_bin_path(self.MKFS, required=True)
cmd = "%s %s" % (mkfs, os.devnull)
_, out, _ = self.module.run_command(cmd, check_rc=False, environ_update=self.LANG_ENV)
# Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
# mkfs.f2fs displays version since v1.2.0
match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
if match is not None:
# Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
# before that version -f switch wasn't used
if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
return '-f'
return ''
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('dump.f2fs', required=True)
# Get sector count and sector size
_, dump, _ = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
sector_size = None
sector_count = None
for line in dump.splitlines():
if 'Info: sector size = ' in line:
# expected: 'Info: sector size = 512'
sector_size = int(line.split()[4])
elif 'Info: total FS sectors = ' in line:
# expected: 'Info: total FS sectors = 102400 (50 MB)'
sector_count = int(line.split()[5])
if None not in (sector_size, sector_count):
break
else:
self.module.warn("Unable to process dump.f2fs output '%s'", '\n'.join(dump))
self.module.fail_json(msg="Unable to process dump.f2fs output for %s" % dev)
return sector_size * sector_count
class VFAT(Filesystem):
if platform.system() == 'FreeBSD':
MKFS = "newfs_msdos"
else:
MKFS = 'mkfs.vfat'
GROW = 'fatresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path(self.GROW, required=True)
_, output, _ = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
for line in output.splitlines()[1:]:
param, value = line.split(':', 1)
if param.strip() == 'Size':
return int(value.strip())
self.module.fail_json(msg="fatresize failed to provide filesystem size for %s" % dev)
def grow_cmd(self, dev):
cmd = self.module.get_bin_path(self.GROW)
return [cmd, "-s", str(dev.size()), str(dev.path)]
class LVM(Filesystem):
MKFS = 'pvcreate'
MKFS_FORCE_FLAGS = '-f'
GROW = 'pvresize'
def get_fs_size(self, dev):
cmd = self.module.get_bin_path('pvs', required=True)
_, size, _ = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
block_count = int(size)
return block_count
class Swap(Filesystem):
MKFS = 'mkswap'
MKFS_FORCE_FLAGS = '-f'
FILESYSTEMS = {
'ext2': Ext2,
'ext3': Ext3,
'ext4': Ext4,
'ext4dev': Ext4,
'f2fs': F2fs,
'reiserfs': Reiserfs,
'xfs': XFS,
'btrfs': Btrfs,
'vfat': VFAT,
'ocfs2': Ocfs2,
'LVM2_member': LVM,
'swap': Swap,
}
def main():
friendly_names = {
'lvm': 'LVM2_member',
}
fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
# There is no "single command" to manipulate filesystems, so we map them all out and their options
module = AnsibleModule(
argument_spec=dict(
fstype=dict(required=True, aliases=['type'],
choices=list(fstypes)),
dev=dict(required=True, aliases=['device']),
opts=dict(),
force=dict(type='bool', default=False),
resizefs=dict(type='bool', default=False),
),
supports_check_mode=True,
)
dev = module.params['dev']
fstype = module.params['fstype']
opts = module.params['opts']
force = module.params['force']
resizefs = module.params['resizefs']
if fstype in friendly_names:
fstype = friendly_names[fstype]
changed = False
try:
klass = FILESYSTEMS[fstype]
except KeyError:
module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
if not os.path.exists(dev):
module.fail_json(msg="Device %s not found." % dev)
dev = Device(module, dev)
cmd = module.get_bin_path('blkid', required=True)
rc, raw_fs, err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
# In case blkid isn't able to identify an existing filesystem, device is considered as empty,
# then this existing filesystem would be overwritten even if force isn't enabled.
fs = raw_fs.strip()
filesystem = klass(module)
same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
if same_fs and not resizefs and not force:
module.exit_json(changed=False)
elif same_fs and resizefs:
if not filesystem.GROW:
module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
out = filesystem.grow(dev)
module.exit_json(changed=True, msg=out)
elif fs and not force:
module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite" % (dev, fs), rc=rc, err=err)
# create fs
filesystem.create(opts, dev)
changed = True
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
gpl-3.0
|
1tush/reviewboard
|
reviewboard/webapi/resources/review_group.py
|
1
|
13050
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.utils import six
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.reviews.models import Group
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import (GROUP_ALREADY_EXISTS,
INVALID_USER)
from reviewboard.webapi.resources import resources
class ReviewGroupResource(WebAPIResource):
"""Provides information on review groups.
Review groups are groups of users that can be listed as an intended
reviewer on a review request.
"""
model = Group
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the review group.',
},
'name': {
'type': six.text_type,
'description': 'The short name of the group, used in the '
'reviewer list and the Dashboard.',
},
'display_name': {
'type': six.text_type,
'description': 'The human-readable name of the group, sometimes '
'used as a short description.',
},
'invite_only': {
'type': bool,
'description': 'Whether or not the group is invite-only. An '
'invite-only group is only accessible by members '
'of the group.',
},
'mailing_list': {
'type': six.text_type,
'description': 'The e-mail address that all posts on a review '
'group are sent to.',
},
'url': {
'type': six.text_type,
'description': "The URL to the user's page on the site. "
"This is deprecated and will be removed in a "
"future version.",
'deprecated_in': '2.0',
},
'absolute_url': {
'type': six.text_type,
'description': "The absolute URL to the user's page on the site.",
'added_in': '2.0',
},
'visible': {
'type': bool,
'description': 'Whether or not the group is visible to users '
'who are not members. This does not prevent users '
'from accessing the group if they know it, though.',
},
'extra_data': {
'type': dict,
'description': 'Extra data as part of the review group. '
'This can be set by the API or extensions.',
},
}
item_child_resources = [
resources.review_group_user,
]
uri_object_key = 'group_name'
uri_object_key_regex = '[A-Za-z0-9_-]+'
model_object_key = 'name'
autogenerate_etags = True
mimetype_list_resource_name = 'review-groups'
mimetype_item_resource_name = 'review-group'
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def has_delete_permissions(self, request, group, *args, **kwargs):
return group.is_mutable_by(request.user)
def has_modify_permissions(self, request, group):
return group.is_mutable_by(request.user)
def get_queryset(self, request, is_list=False, local_site_name=None,
*args, **kwargs):
search_q = request.GET.get('q', None)
local_site = self._get_local_site(local_site_name)
if is_list:
query = self.model.objects.accessible(request.user,
local_site=local_site)
else:
query = self.model.objects.filter(local_site=local_site)
if search_q:
q = Q(name__istartswith=search_q)
if request.GET.get('displayname', None):
q = q | Q(display_name__istartswith=search_q)
query = query.filter(q)
return query
def serialize_url_field(self, group, **kwargs):
return group.get_absolute_url()
def serialize_absolute_url_field(self, obj, request, **kwargs):
return request.build_absolute_uri(obj.get_absolute_url())
def has_access_permissions(self, request, group, *args, **kwargs):
return group.is_accessible_by(request.user)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Retrieve information on a review group.
Some basic information on the review group is provided, including
the name, description, and mailing list (if any) that e-mails to
the group are sent to.
The group links to the list of users that are members of the group.
"""
pass
@webapi_check_local_site
@webapi_request_fields(
optional={
'q': {
'type': six.text_type,
'description': 'The string that the group name (or the '
'display name when using ``displayname``) '
'must start with in order to be included in '
'the list. This is case-insensitive.',
},
'displayname': {
'type': bool,
'description': 'Specifies whether ``q`` should also match '
'the beginning of the display name.'
},
},
allow_unknown=True
)
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of review groups on the site.
The list of review groups can be filtered down using the ``q`` and
``displayname`` parameters.
Setting ``q`` to a value will by default limit the results to
group names starting with that value. This is a case-insensitive
comparison.
If ``displayname`` is set to ``1``, the display names will also be
checked along with the username. ``displayname`` is ignored if ``q``
is not set.
For example, accessing ``/api/groups/?q=dev&displayname=1`` will list
any groups with a name or display name starting with ``dev``.
"""
pass
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(GROUP_ALREADY_EXISTS, INVALID_FORM_DATA,
INVALID_USER, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
required={
'name': {
'type': six.text_type,
'description': 'The name of the group.',
},
'display_name': {
'type': six.text_type,
'description': 'The human-readable name of the group.',
},
},
optional={
'mailing_list': {
'type': six.text_type,
'description': 'The e-mail address that all posts on a review '
'group are sent to.',
},
'visible': {
'type': bool,
'description': 'Whether or not the group is visible to users '
'who are not members. The default is true.',
},
'invite_only': {
'type': bool,
'description': 'Whether or not the group is invite-only. '
'The default is false.',
},
},
allow_unknown=True
)
def create(self, request, name, display_name, mailing_list=None,
visible=True, invite_only=False, local_site_name=None,
extra_fields={}, *args, **kargs):
"""Creates a new review group.
This will create a brand new review group with the given name
and display name. The group will be public by default, unless
specified otherwise.
Extra data can be stored on the group for later lookup by passing
``extra_data.key_name=value``. The ``key_name`` and ``value`` can
be any valid strings. Passing a blank ``value`` will remove the key.
The ``extra_data.`` prefix is required.
"""
local_site = self._get_local_site(local_site_name)
if not self.model.objects.can_create(request.user, local_site):
return self._no_access_error(request.user)
group, is_new = self.model.objects.get_or_create(
name=name,
local_site=local_site,
defaults={
'display_name': display_name,
'mailing_list': mailing_list or '',
'visible': bool(visible),
'invite_only': bool(invite_only),
})
if not is_new:
return GROUP_ALREADY_EXISTS
if extra_fields:
self._import_extra_data(group.extra_data, extra_fields)
group.save(update_fields=['extra_data'])
return 201, {
self.item_result_key: group,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_FORM_DATA,
GROUP_ALREADY_EXISTS, NOT_LOGGED_IN,
PERMISSION_DENIED)
@webapi_request_fields(
optional={
'name': {
'type': six.text_type,
'description': 'The new name for the group.',
},
'display_name': {
'type': six.text_type,
'description': 'The human-readable name of the group.',
},
'mailing_list': {
'type': six.text_type,
'description': 'The e-mail address that all posts on a review '
'group are sent to.',
},
'visible': {
'type': bool,
'description': 'Whether or not the group is visible to users '
'who are not members.',
},
'invite_only': {
'type': bool,
'description': 'Whether or not the group is invite-only.'
},
},
allow_unknown=True
)
def update(self, request, name=None, extra_fields={}, *args, **kwargs):
"""Updates an existing review group.
All the fields of a review group can be modified, including the
name, so long as it doesn't conflict with another review group.
Extra data can be stored on the group for later lookup by passing
``extra_data.key_name=value``. The ``key_name`` and ``value`` can
be any valid strings. Passing a blank ``value`` will remove the key.
The ``extra_data.`` prefix is required.
"""
try:
group = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_modify_permissions(request, group):
return self._no_access_error(request.user)
if name is not None and name != group.name:
# If we're changing the group name, make sure that group doesn't
# exist.
local_site = self._get_local_site(kwargs.get('local_site_name'))
if self.model.objects.filter(name=name,
local_site=local_site).count():
return GROUP_ALREADY_EXISTS
group.name = name
for field in ("display_name", "mailing_list", "visible",
"invite_only"):
val = kwargs.get(field, None)
if val is not None:
setattr(group, field, val)
self._import_extra_data(group.extra_data, extra_fields)
group.save()
return 200, {
self.item_result_key: group,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Deletes a review group.
This will disassociate the group from all review requests previously
targetting the group, and permanently delete the group.
It is best to only delete empty, unused groups, and to instead
change a group to not be visible if it's on longer needed.
"""
try:
group = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_delete_permissions(request, group):
return self._no_access_error(request.user)
group.delete()
return 204, {}
review_group_resource = ReviewGroupResource()
|
mit
|
merenlab/anvio
|
anvio/panops.py
|
1
|
49412
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Classes for pan operations.
anvi-pan-genome is the default client using this module
"""
import os
import json
import math
import copy
import multiprocessing
import pandas as pd
from itertools import chain
import anvio
import anvio.utils as utils
import anvio.dbops as dbops
import anvio.terminal as terminal
import anvio.constants as constants
import anvio.clustering as clustering
import anvio.filesnpaths as filesnpaths
import anvio.tables.miscdata as miscdata
from anvio.drivers.blast import BLAST
from anvio.drivers.diamond import Diamond
from anvio.drivers.mcl import MCL
from anvio.drivers import Aligners
from anvio.errors import ConfigError, FilesNPathsError
from anvio.genomestorage import GenomeStorage
from anvio.tables.geneclusters import TableForGeneClusters
from anvio.tables.views import TablesForViews
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "A. Murat Eren"
__email__ = "[email protected]"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
aligners = Aligners()
class Pangenome(object):
def __init__(self, args=None, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
self.max_num_gene_clusters_for_hierarchical_clustering = constants.max_num_items_for_hierarchical_clustering
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.genome_names_to_focus = A('genome_names')
self.genomes_storage_path = A('genomes_storage')
self.genomes = None
self.project_name = A('project_name')
self.output_dir = A('output_dir')
self.num_threads = A('num_threads')
self.skip_alignments = A('skip_alignments')
self.skip_homogeneity = A('skip_homogeneity')
self.quick_homogeneity = A('quick_homogeneity')
self.align_with = A('align_with')
self.overwrite_output_destinations = A('overwrite_output_destinations')
self.debug = anvio.DEBUG
self.min_percent_identity = A('min_percent_identity')
self.gene_cluster_min_occurrence = A('min_occurrence')
self.mcl_inflation = A('mcl_inflation')
self.sensitive = A('sensitive')
self.minbit = A('minbit')
self.use_ncbi_blast = A('use_ncbi_blast')
self.exclude_partial_gene_calls = A('exclude_partial_gene_calls')
self.description_file_path = A('description')
self.skip_hierarchical_clustering = A('skip_hierarchical_clustering')
self.enforce_hierarchical_clustering = A('enforce_hierarchical_clustering')
if not self.project_name:
raise ConfigError("Please set a project name using --project-name or -n.")
# when it is time to organize gene_clusters
self.linkage = A('linkage') or constants.linkage_method_default
self.distance = A('distance') or constants.distance_metric_default
self.log_file_path = None
# to be filled during init:
self.amino_acid_sequences_dict = {}
self.view_data = {}
self.view_data_presence_absence = {}
self.additional_view_data = {}
self.aligner = None
# we don't know what we are about
self.description = None
def load_genomes(self):
# genome_name parameter can be a file or comma seperated genome names.
if self.genome_names_to_focus:
if filesnpaths.is_file_exists(self.genome_names_to_focus, dont_raise=True):
self.genome_names_to_focus = utils.get_column_data_from_TAB_delim_file(self.genome_names_to_focus, column_indices=[0], expected_number_of_fields=1)[0]
else:
self.genome_names_to_focus = [g.strip() for g in self.genome_names_to_focus.split(',')]
self.run.warning("A subset of genome names is found, and anvi'o will focus only on to those.")
self.genomes_storage = GenomeStorage(self.genomes_storage_path, storage_hash=None, genome_names_to_focus=self.genome_names_to_focus)
self.genomes = self.genomes_storage.get_genomes_dict()
self.external_genome_names = [g for g in self.genomes if self.genomes[g]['external_genome']]
self.internal_genome_names = [g for g in self.genomes if not self.genomes[g]['external_genome']]
self.hash_to_genome_name = {}
for genome_name in self.genomes:
self.hash_to_genome_name[self.genomes[genome_name]['genome_hash']] = genome_name
def generate_pan_db(self):
meta_values = {'internal_genome_names': ','.join(self.internal_genome_names),
'external_genome_names': ','.join(self.external_genome_names),
'num_genomes': len(self.genomes),
'min_percent_identity': self.min_percent_identity,
'gene_cluster_min_occurrence': self.gene_cluster_min_occurrence,
'mcl_inflation': self.mcl_inflation,
'default_view': 'gene_cluster_presence_absence',
'use_ncbi_blast': self.use_ncbi_blast,
'diamond_sensitive': self.sensitive,
'minbit': self.minbit,
'exclude_partial_gene_calls': self.exclude_partial_gene_calls,
'gene_alignments_computed': False if self.skip_alignments else True,
'genomes_storage_hash': self.genomes_storage.get_storage_hash(),
'project_name': self.project_name,
'items_ordered': False,
'description': self.description if self.description else '_No description is provided_',
}
dbops.PanDatabase(self.pan_db_path, quiet=False).create(meta_values)
# know thyself.
self.args.pan_db = self.pan_db_path
def get_output_file_path(self, file_name, delete_if_exists=False):
output_file_path = os.path.join(self.output_dir, file_name)
if delete_if_exists:
if os.path.exists(output_file_path):
os.remove(output_file_path)
return output_file_path
def check_programs(self):
if self.use_ncbi_blast:
utils.is_program_exists('blastp')
else:
utils.is_program_exists('diamond')
utils.is_program_exists('mcl')
def check_project_name(self):
# check the project name:
if not self.project_name:
raise ConfigError("Please set a project name using the `--project-name` parameter, and be prepared to see "
"it around as anvi'o will use it for multiple things, such as setting the output directory "
"and naming various output files including the database file that will be generated at the "
"end of the process. If you set your own output directory name, you can have multiple "
"projects in it and all of those projects can use the same intermediate files whenever "
"possible.")
utils.is_this_name_OK_for_database('pan project name', self.project_name, stringent=False)
def check_params(self):
# if the user did not set a specific output directory name, use the project name
# for it:
self.output_dir = self.output_dir if self.output_dir else self.project_name
# deal with the output directory:
try:
filesnpaths.is_file_exists(self.output_dir)
except FilesNPathsError:
filesnpaths.gen_output_directory(self.output_dir, delete_if_exists=self.overwrite_output_destinations)
filesnpaths.is_output_dir_writable(self.output_dir)
self.output_dir = os.path.abspath(self.output_dir)
if not self.log_file_path:
self.log_file_path = self.get_output_file_path('log.txt')
filesnpaths.is_output_file_writable(self.log_file_path)
os.remove(self.log_file_path) if os.path.exists(self.log_file_path) else None
if not isinstance(self.minbit, float):
raise ConfigError("minbit value must be of type float :(")
if self.minbit < 0 or self.minbit > 1:
raise ConfigError("Well. minbit must be between 0 and 1. Yes. Very boring.")
if not isinstance(self.min_percent_identity, float):
raise ConfigError("Minimum percent identity value must be of type float :(")
if self.min_percent_identity < 0 or self.min_percent_identity > 100:
raise ConfigError("Minimum percent identity must be between 0%% and 100%%. Although your %.2f%% is "
"pretty cute, too." % self.min_percent_identity)
if len([c for c in list(self.genomes.values()) if 'genome_hash' not in c]):
raise ConfigError("self.genomes does not seem to be a properly formatted dictionary for "
"the anvi'o class Pangenome.")
if self.enforce_hierarchical_clustering and self.skip_hierarchical_clustering:
raise ConfigError("You are confusing anvi'o :/ You can't tell anvi'o to skip hierarchical clustering "
"while also asking it to enforce it.")
if self.description_file_path:
filesnpaths.is_file_plain_text(self.description_file_path)
self.description = open(os.path.abspath(self.description_file_path), 'rU').read()
self.pan_db_path = self.get_output_file_path(self.project_name + '-PAN.db')
def run_diamond(self, unique_AA_sequences_fasta_path, unique_AA_sequences_names_dict):
diamond = Diamond(unique_AA_sequences_fasta_path, run=self.run, progress=self.progress,
num_threads=self.num_threads, overwrite_output_destinations=self.overwrite_output_destinations)
diamond.names_dict = unique_AA_sequences_names_dict
diamond.search_output_path = self.get_output_file_path('diamond-search-results')
diamond.tabular_output_path = self.get_output_file_path('diamond-search-results.txt')
diamond.sensitive = self.sensitive
return diamond.get_blast_results()
def run_blast(self, unique_AA_sequences_fasta_path, unique_AA_sequences_names_dict):
self.run.warning("You elected to use NCBI's `blastp` for amino acid sequence search. Running blastp will "
"be significantly slower than DIAMOND, but in some cases, slightly more sensitive. "
"We are unsure about whether the slight increase in sensitivity may justify significant "
"increase in run time, but you are the boss.", lc="cyan")
blast = BLAST(unique_AA_sequences_fasta_path, run=self.run, progress=self.progress,
num_threads=self.num_threads, overwrite_output_destinations=self.overwrite_output_destinations)
blast.names_dict = unique_AA_sequences_names_dict
blast.log_file_path = self.log_file_path
blast.search_output_path = self.get_output_file_path('blast-search-results.txt')
return blast.get_blast_results()
def run_search(self, unique_AA_sequences_fasta_path, unique_AA_sequences_names_dict):
if self.use_ncbi_blast:
return self.run_blast(unique_AA_sequences_fasta_path, unique_AA_sequences_names_dict)
else:
return self.run_diamond(unique_AA_sequences_fasta_path, unique_AA_sequences_names_dict)
def run_mcl(self, mcl_input_file_path):
mcl = MCL(mcl_input_file_path, run=self.run, progress=self.progress, num_threads=self.num_threads)
mcl.inflation = self.mcl_inflation
mcl.clusters_file_path = self.get_output_file_path('mcl-clusters.txt')
mcl.log_file_path = self.log_file_path
return mcl.get_clusters_dict()
def gen_mcl_input(self, blastall_results):
self.run.warning(None, header="MCL INPUT", lc="green")
self.progress.new('Processing search results')
self.progress.update('...')
all_ids = set([])
# mapping for the fields in the blast output
mapping = [str, str, float, int, int, int, int, int, int, int, float, float]
# here we perform an initial pass on the blast results to fill the dict that will hold
# the bit score for each gene when it was blasted against itself. this dictionary
# will then be used to calculate the 'minbit' value between two genes, which I learned
# from ITEP (Benedict MN et al, doi:10.1186/1471-2164-15-8). ITEP defines minbit as
# 'bit score between target and query / min(selfbit for query, selbit for target)'. This
# heuristic approach provides a mean to set a cutoff to eliminate weak matches between
# two genes. minbit value reaches to 1 for hits between two genes that are almost identical.
self_bit_scores = {}
line_no = 1
self.progress.update('(initial pass of the search results to set the self bit scores ...)')
for line in open(blastall_results):
fields = line.strip().split('\t')
try:
query_id, subject_id, perc_id, aln_length, mismatches, gaps, q_start, q_end, s_start, s_end, e_val, bit_score = \
[mapping[i](fields[i]) for i in range(0, len(mapping))]
except Exception as e:
self.progress.end()
raise ConfigError("Something went wrong while processing the blastall output file in line %d. "
"Here is the error from the uppoer management: '''%s'''" % (line_no, e))
line_no += 1
all_ids.add(query_id)
all_ids.add(subject_id)
if query_id == subject_id:
self_bit_scores[query_id] = bit_score
self.progress.end()
ids_without_self_search = all_ids - set(self_bit_scores.keys())
if len(ids_without_self_search):
search_tool = 'BLAST' if self.use_ncbi_blast else 'DIAMOND'
self.run.warning("%s did not retun search results for %d of %d the amino acid sequences in your input FASTA file. "
"Anvi'o will do some heuristic magic to complete the missing data in the search output to recover "
"from this. But since you are a scientist, here are the amino acid sequence IDs for which %s "
"failed to report self search results: %s." \
% (search_tool, len(ids_without_self_search), len(all_ids), \
search_tool, ', '.join(ids_without_self_search)))
# HEURISTICS TO ADD MISSING SELF SEARCH RESULTS
# we are here, because amino acid sequences in ids_without_self_search did not have any hits in the search output
# although they were in the FASTA file the target database were built from. so we will make sure they are not
# missing from self_bit_scores dict, or mcl_input (additional mcl inputs will be stored in the following dict)
additional_mcl_input_lines = {}
for id_without_self_search in ids_without_self_search:
entry_hash, gene_caller_id = id_without_self_search.split('_')
try:
genome_name = self.hash_to_genome_name[entry_hash]
except KeyError:
raise ConfigError("Something horrible happened. This can only happend if you started a new analysis with "
"additional genomes without cleaning the previous work directory. Sounds familiar?")
# divide the DNA length of the gene by three to get the AA length, and multiply that by two to get an approximate
# bit score that would have recovered from a perfect match
gene_amino_acid_sequence_length = len(self.genomes_storage.get_gene_sequence(genome_name, int(gene_caller_id), report_DNA_sequences=False))
self_bit_scores[id_without_self_search] = gene_amino_acid_sequence_length * 2
# add this SOB into additional_mcl_input_lines dict.
additional_mcl_input_lines[id_without_self_search] = '%s\t%s\t1.0\n' % (id_without_self_search, id_without_self_search)
# CONTINUE AS IF NOTHING HAPPENED
self.run.info('Min percent identity', self.min_percent_identity)
self.run.info('Minbit', self.minbit)
self.progress.new('Processing search results')
mcl_input_file_path = self.get_output_file_path('mcl-input.txt')
mcl_input = open(mcl_input_file_path, 'w')
line_no = 1
num_edges_stored = 0
for line in open(blastall_results):
fields = line.strip().split('\t')
query_id, subject_id, perc_id, aln_length, mismatches, gaps, q_start, q_end, s_start, s_end, e_val, bit_score = \
[mapping[i](fields[i]) for i in range(0, len(mapping))]
line_no += 1
if line_no % 5000 == 0:
self.progress.update('Lines processed %s ...' % pp(line_no))
#
# FILTERING BASED ON PERCENT IDENTITY
#
if perc_id < self.min_percent_identity:
continue
#
# FILTERING BASED ON MINBIT
#
minbit = bit_score / min(self_bit_scores[query_id], self_bit_scores[subject_id])
if minbit < self.minbit:
continue
mcl_input.write('%s\t%s\t%f\n' % (query_id, subject_id, perc_id / 100.0))
num_edges_stored += 1
# add additional lines if there are any:
for line in list(additional_mcl_input_lines.values()):
mcl_input.write(line)
num_edges_stored += 1
mcl_input.close()
self.progress.end()
self.run.info('Filtered search results', '%s edges stored' % pp(num_edges_stored))
self.run.info('MCL input', '%s' % mcl_input_file_path)
return mcl_input_file_path
def process_gene_clusters(self, gene_clusters_dict):
self.progress.new('Generating view data')
self.progress.update('...')
gene_clusters = list(gene_clusters_dict.keys())
for genome_name in self.genomes:
self.genomes[genome_name]['singleton_gene_clusters'] = 0
self.genomes[genome_name]['num_gene_clusters_raw'] = 0
for gene_cluster in gene_clusters:
self.view_data[gene_cluster] = dict([(genome_name, 0) for genome_name in self.genomes])
self.view_data_presence_absence[gene_cluster] = dict([(genome_name, 0) for genome_name in self.genomes])
self.additional_view_data[gene_cluster] = {'num_genes_in_gene_cluster': 0, 'num_genomes_gene_cluster_has_hits': 0, 'SCG': 0, 'max_num_paralogs': 0}
for gene_entry in gene_clusters_dict[gene_cluster]:
genome_name = gene_entry['genome_name']
self.view_data[gene_cluster][genome_name] += 1
self.view_data_presence_absence[gene_cluster][genome_name] = 1
self.additional_view_data[gene_cluster]['num_genes_in_gene_cluster'] += 1
self.genomes[genome_name]['num_gene_clusters_raw'] += 1
genomes_contributing_to_gene_cluster = [t[0] for t in self.view_data_presence_absence[gene_cluster].items() if t[1]]
if len(genomes_contributing_to_gene_cluster) == 1:
self.genomes[genomes_contributing_to_gene_cluster[0]]['singleton_gene_clusters'] += 1
self.additional_view_data[gene_cluster]['SCG'] = 1 if set(self.view_data[gene_cluster].values()) == set([1]) else 0
self.additional_view_data[gene_cluster]['max_num_paralogs'] = max(self.view_data[gene_cluster].values())
self.additional_view_data[gene_cluster]['num_genomes_gene_cluster_has_hits'] = len([True for genome in self.view_data[gene_cluster] if self.view_data[gene_cluster][genome] > 0])
self.progress.end()
########################################################################################
# FILTERING BASED ON OCCURRENCE
########################################################################################
gene_clusters_of_interest = set([])
for gene_cluster in gene_clusters:
if self.additional_view_data[gene_cluster]['num_genomes_gene_cluster_has_hits'] >= self.gene_cluster_min_occurrence:
gene_clusters_of_interest.add(gene_cluster)
removed_gene_clusters = 0
for gene_cluster in gene_clusters:
if gene_cluster not in gene_clusters_of_interest:
self.view_data.pop(gene_cluster)
self.view_data_presence_absence.pop(gene_cluster)
self.additional_view_data.pop(gene_cluster)
gene_clusters_dict.pop(gene_cluster)
removed_gene_clusters += 1
if self.gene_cluster_min_occurrence > 1:
self.run.info('gene_clusters min occurrence', '%d (the filter removed %d gene_clusters)' % (self.gene_cluster_min_occurrence, removed_gene_clusters))
########################################################################################
# CAN WE CLUSTER THIS STUFF? DOES THE USER WANT US TO TRY REGARDLESS?
########################################################################################
if len(gene_clusters_dict) > self.max_num_gene_clusters_for_hierarchical_clustering:
if self.enforce_hierarchical_clustering:
self.run.warning("You have %s gene_clusters, which exceeds the number of gene_clusters anvi'o is comfortable to cluster. But "
"since you have used the flag `--enforce-hierarchical-clustering`, anvi'o will attempt "
"to create a hierarchical clustering of your gene_clusters anyway. It may take a bit of "
"time. Pour yourself a coffee. Or go to a nice vacation. See you in 10 mins, or next year "
"or never." % pp(len(gene_clusters_dict)))
else:
self.run.warning("It seems you have %s gene clusters in your pangenome. This exceeds the soft limit "
"of %s for anvi'o to attempt to create a hierarchical clustering of your gene clusters "
"(which becomes the center tree in all anvi'o displays). If you want a hierarchical "
"clustering to be done anyway, please see the flag `--enforce-hierarchical-clustering`." \
% (pp(len(gene_clusters_dict)), pp(self.max_num_gene_clusters_for_hierarchical_clustering)))
self.skip_hierarchical_clustering = True
########################################################################################
# STORING FILTERED DATA IN THE DB
########################################################################################
TablesForViews(self.pan_db_path).create_new_view(
view_data=self.view_data,
table_name='gene_cluster_frequencies',
view_name = 'gene_cluster_frequencies',
from_matrix_form=True)
TablesForViews(self.pan_db_path).create_new_view(
view_data=self.view_data_presence_absence,
table_name='gene_cluster_presence_absence',
view_name = 'gene_cluster_presence_absence',
from_matrix_form=True)
item_additional_data_table = miscdata.TableForItemAdditionalData(self.args, r=terminal.Run(verbose=False))
item_additional_data_keys = ['num_genomes_gene_cluster_has_hits', 'num_genes_in_gene_cluster', 'max_num_paralogs', 'SCG']
item_additional_data_table.add(self.additional_view_data, item_additional_data_keys, skip_check_names=True)
# ^^^^^^^^^^^^^^^^^^^^^
# /
# here we say skip_check_names=True, simply because there is no gene_clusters table has not been
# generated yet, but the check names functionality in dbops looks for the gene clsuters table to
# be certain. it is not a big deal here, since we absoluely know what gene cluster names we are
# working with.
########################################################################################
# RETURN THE -LIKELY- UPDATED PROTEIN CLUSTERS DICT
########################################################################################
return gene_clusters_dict
def gen_synteny_based_ordering_of_gene_clusters(self, gene_clusters_dict):
"""Take the dictionary of gene_clusters, and order gene_clusters per genome based on synteny of genes.
This adds more orders to the pangenomic output so the user can enforce ordering of
gene_clusters based on the synteny of genes they contain in a given genome.
The synteny in this context is defined by the gene caller ids. Gene caller ids
follow a numerical order in anvi'o contigs databases for genes that are coming
from the same contig. Of course, the synteny does not mean much for genes that
fragmented into multiple contigs.
"""
# yes. this is meren converting the gene_clusters_dict into a pandas data frame :/ if you are reading
# this line and if you are not evan, don't tell evan about this. everyone else: i don't know
# what you're talking about.
df = pd.DataFrame(list(chain.from_iterable(list(gene_clusters_dict.values()))))
df = df.sort_values(by=['genome_name', 'gene_caller_id'])
df = df.reset_index(drop=True)
# forced synteny
for genome_name in df.genome_name.unique():
gene_clusters_in_genome = df.loc[(df.genome_name == genome_name)].gene_cluster_id.unique()
gene_clusters_not_described = df.loc[~df.gene_cluster_id.isin(gene_clusters_in_genome)].gene_cluster_id.unique()
gene_clusters_order_based_on_genome_synteny = list(gene_clusters_in_genome) + list(gene_clusters_not_described)
order_name = 'Forced synteny <> %s' % genome_name
dbops.add_items_order_to_db(self.pan_db_path, order_name, ','.join(gene_clusters_order_based_on_genome_synteny), order_data_type_newick=False, run=terminal.Run(verbose=False))
gene_cluster_gene_cluster_edges = []
# network description of gene_cluster-gene_cluster relationships given the gene synteny.
gene_ordered_list_of_gene_clusters = list(zip(df.gene_caller_id, df.gene_cluster_id))
for index in range(1, len(gene_ordered_list_of_gene_clusters)):
(GENE_A, gene_cluster_A), (GENE_B, gene_cluster_B) = gene_ordered_list_of_gene_clusters[index-1], gene_ordered_list_of_gene_clusters[index]
if GENE_A == GENE_B - 1:
gene_cluster_gene_cluster_edges.append((gene_cluster_A, gene_cluster_B), )
# FIXME: Do something with gene_cluster_gene_cluster_edges.
def gen_hierarchical_clustering_of_gene_clusters(self):
"""Uses a clustering configuration to add hierarchical clustering of gene clusters into the pan db
Note how this function cheats the system to create an enchanced clustering configuration:
We want to use the clustering configurations for pan genomomic analyses to order
gene clusters. however, we want to add something into the clustering configuraiton
file, which depends on the number of genomes we have. this addition is 'num_genomes_gene_cluster_has_hits'
data, which pulls together gene clusters that are distributed across genomes similarly based
on this extra bit of inofrmation. becasue the clustering configurations framework in anvi'o
does not allow us to have variable information in these recipes, we are going to generate one
on the fly to have a more capable one."""
if self.skip_hierarchical_clustering:
return
updated_clustering_configs = {}
for config_name in constants.clustering_configs['pan']:
config_path = constants.clustering_configs['pan'][config_name]
# now we have the config path. we first get a temporary file path:
enhanced_config_path = filesnpaths.get_temp_file_path()
# setup the additional section based on the number of genomes we have:
if config_name == 'presence-absence':
additional_config_section="""\n[AdditionalData !PAN.db::item_additional_data]\ntable_form=dataframe\ncolumns_to_use = %s\nnormalize = False\n""" \
% ','.join(['num_genomes_gene_cluster_has_hits'] * (int(round(len(self.genomes) / 2))))
elif config_name == 'frequency':
additional_config_section="""\n[AdditionalData !PAN.db::item_additional_data]\ntable_form=dataframe\ncolumns_to_use = %s\nnormalize = False\nlog=True\n""" \
% ','.join(['num_genes_in_gene_cluster'] * (int(round(math.sqrt(len(self.genomes))))))
# write the content down in to file at the new path:
open(enhanced_config_path, 'w').write(open(config_path).read() + additional_config_section)
# update the clustering configs:
updated_clustering_configs[config_name] = enhanced_config_path
dbops.do_hierarchical_clustering_of_items(self.pan_db_path, updated_clustering_configs, database_paths={'PAN.db': self.pan_db_path},\
input_directory=self.output_dir, default_clustering_config=constants.pan_default,\
distance=self.distance, linkage=self.linkage, run=terminal.Run(verbose=False), progress=self.progress)
def populate_gene_cluster_homogeneity_index(self, gene_clusters_dict):
if self.skip_alignments:
self.run.warning('Skipping homogeneity calculations because gene clusters are not alligned.')
return
if self.skip_homogeneity:
self.run.warning("Skipping homogeneity calculations per the '--skip-homogeneity' flag.")
return
pan = dbops.PanSuperclass(args=self.args, r=terminal.Run(verbose=False), p=self.progress)
gene_cluster_names = set(list(gene_clusters_dict.keys()))
d = pan.compute_homogeneity_indices_for_gene_clusters(gene_cluster_names=gene_cluster_names, num_threads=self.num_threads)
if d is None:
self.run.warning("Anvi'o received an empty dictionary for homogeneity indices. Not good :/ Returning empty handed,\
without updating anything in the pan database...")
return
miscdata.TableForItemAdditionalData(self.args, r=terminal.Run(verbose=False)).add(d, ['functional_homogeneity_index', 'geometric_homogeneity_index', 'combined_homogeneity_index'], skip_check_names=True)
def populate_layers_additional_data_and_orders(self):
self.progress.new('Layers additional data and orders')
self.progress.update('Copmputing the hierarchical clustering of the (transposed) view data')
layer_orders_data_dict = {}
for clustering_tuple in [('gene_cluster presence absence', self.view_data), ('gene_cluster frequencies', self.view_data_presence_absence)]:
v, d = clustering_tuple
newick = clustering.get_newick_tree_data_for_dict(d, transpose=True, distance = self.distance, linkage=self.linkage)
layer_orders_data_dict[v] = {'data_type': 'newick', 'data_value': newick}
self.progress.update('Generating layers additional data ..')
layers_additional_data_dict = {}
layers_additional_data_keys = ['total_length', 'gc_content']
for h in ['percent_completion', 'percent_redundancy']:
if h in list(self.genomes.values())[0]:
layers_additional_data_keys.append(h)
layers_additional_data_keys.extend(['num_genes', 'avg_gene_length', 'num_genes_per_kb',
'singleton_gene_clusters'])
if self.gene_cluster_min_occurrence > 1:
layers_additional_data_keys.extend(['num_gene_clusters_raw'])
for genome_name in self.genomes:
new_dict = {}
for key in layers_additional_data_keys:
new_dict[key] = self.genomes[genome_name][key]
layers_additional_data_dict[genome_name] = new_dict
# summarize gene cluster stats across genomes
layers_additional_data_keys.extend(['num_gene_clusters'])
for genome_name in self.genomes:
layers_additional_data_dict[genome_name]['num_gene_clusters'] = 0
for gene_cluster in self.view_data_presence_absence:
# tracking the total number of gene clusters
if self.view_data_presence_absence[gene_cluster][genome_name]:
layers_additional_data_dict[genome_name]['num_gene_clusters'] += 1
self.progress.end()
miscdata.TableForLayerOrders(self.args, r=terminal.Run(verbose=False)).add(layer_orders_data_dict)
miscdata.TableForLayerAdditionalData(self.args, r=terminal.Run(verbose=False)).add(layers_additional_data_dict, layers_additional_data_keys)
def sanity_check(self):
self.check_project_name()
self.check_programs()
if not isinstance(self.mcl_inflation, float):
raise ConfigError("Well, MCL likes its inflation parameter in 'float' form...")
if self.mcl_inflation > 100 or self.mcl_inflation < 0.1:
raise ConfigError("MCL inflation parameter should have a reasonable value :/ Like between 0.1 and 100.0.")
if not isinstance(self.genomes, type({})):
raise ConfigError("self.genomes must be a dict. Anvi'o needs an adult :(")
if len(self.genomes) < 2:
raise ConfigError("There must be at least two genomes for this workflow to work. You have like '%d' of them :/" \
% len(self.genomes))
if self.skip_alignments and self.align_with:
raise ConfigError("You are asking anvi'o to skip aligning sequences within your gene clusters, and then you "
"are also asking it to use '%s' for aligning sequences within your gene clusters. It is easy "
"to ignore this and skip the alignment, but anvi'o gets nervous when it realizes her users are "
"being inconsistent. Please make up your mind, and come back as the explicit person you are" \
% self.align_with)
self.check_params()
self.run.log_file_path = self.log_file_path
self.run.info('Args', (str(self.args)), quiet=True)
def store_gene_clusters(self, gene_clusters_dict):
self.progress.new('Storing gene clusters in the database')
self.progress.update('...')
table_for_gene_clusters = TableForGeneClusters(self.pan_db_path, run=self.run, progress=self.progress)
num_genes_in_gene_clusters = 0
for gene_cluster_name in gene_clusters_dict:
for gene_entry in gene_clusters_dict[gene_cluster_name]:
table_for_gene_clusters.add(gene_entry)
num_genes_in_gene_clusters += 1
self.progress.end()
table_for_gene_clusters.store()
pan_db = dbops.PanDatabase(self.pan_db_path, quiet=True)
pan_db.db.set_meta_value('num_gene_clusters', len(gene_clusters_dict))
pan_db.db.set_meta_value('num_genes_in_gene_clusters', num_genes_in_gene_clusters)
pan_db.disconnect()
def gen_gene_clusters_dict_from_mcl_clusters(self, mcl_clusters):
self.progress.new('Generating the gene clusters dictionary from raw MCL clusters')
self.progress.update('...')
gene_clusters_dict = {}
for gene_cluster in mcl_clusters:
gene_clusters_dict[gene_cluster] = []
for entry_hash, gene_caller_id in [e.split('_') for e in mcl_clusters[gene_cluster]]:
try:
genome_name = self.hash_to_genome_name[entry_hash]
except KeyError:
self.progress.end()
raise ConfigError("Something horrible happened. This can only happen if you started a new analysis with "
"additional genomes without cleaning the previous work directory. Sounds familiar?")
gene_clusters_dict[gene_cluster].append({'gene_caller_id': int(gene_caller_id), 'gene_cluster_id': gene_cluster, 'genome_name': genome_name, 'alignment_summary': ''})
self.progress.end()
return gene_clusters_dict
def compute_alignments_for_gene_clusters(self, gene_clusters_dict):
if self.skip_alignments:
self.run.warning('Skipping gene alignments.')
return gene_clusters_dict
# we run "select aligner" to print the citation information (the actual selection is
# done in the `alignment_worker` down below)
aligners.select(self.align_with)
gene_cluster_names = list(gene_clusters_dict.keys())
# we only need to align gene clusters with more than one sequence
non_singleton_gene_cluster_names = [g for g in gene_cluster_names if len(gene_clusters_dict[g]) > 1]
num_non_singleton_gene_clusters = len(non_singleton_gene_cluster_names)
self.progress.new('Aligning amino acid sequences for genes in gene clusters', progress_total_items=num_non_singleton_gene_clusters)
self.progress.update('...')
manager = multiprocessing.Manager()
input_queue = manager.Queue()
output_queue = manager.Queue()
for gene_cluster_name in non_singleton_gene_cluster_names:
input_queue.put(gene_cluster_name)
workers = []
for i in range(0, self.num_threads):
worker = multiprocessing.Process(target=Pangenome.alignment_worker,
args=(input_queue, output_queue, gene_clusters_dict, self.genomes_storage, self.align_with, self.run))
workers.append(worker)
worker.start()
received_gene_clusters = 0
while received_gene_clusters < num_non_singleton_gene_clusters:
try:
gene_clusters_item = output_queue.get()
if gene_clusters_item:
# worker returns None if there is nothing to align
# we do not need to owerwrite it to gene_clusters_dict
gene_clusters_dict[gene_clusters_item['name']] = gene_clusters_item['entry']
if self.debug:
print(json.dumps(gene_clusters_item, indent=2))
received_gene_clusters += 1
self.progress.increment()
self.progress.update("Processed %d of %d non-singleton GCs in %d threads." %
(received_gene_clusters, num_non_singleton_gene_clusters, self.num_threads))
except KeyboardInterrupt:
print("Anvi'o profiler recieved SIGINT, terminating all processes...")
break
for worker in workers:
worker.terminate()
self.progress.end()
return gene_clusters_dict
@staticmethod
def alignment_worker(input_queue, output_queue, gene_clusters_dict, genomes_storage, align_with, run):
# Note for future changes, this worker should not write anything to gene_clusters_dict
# or genome_storage, changes will not be reflected to main process or other processes.
aligner = aligners.select(align_with, quiet=True)
# this instance of Run is here because we don't want to create this over and over again
# in the loop down below. there also is another run instance the worker gets to make sure
# it can report its own messages .. don't be confused we-do-not-age-discriminate-here padawan.
r = terminal.Run()
r.verbose = False
# Main process needs to kill this worker after it receives all tasks because of this infinite loop
while True:
gene_cluster_name = input_queue.get(True)
if len(gene_clusters_dict[gene_cluster_name]) == 1:
# this sequence is a singleton and does not need alignment
output_queue.put(None)
continue
gene_sequences_in_gene_cluster = []
for gene_entry in gene_clusters_dict[gene_cluster_name]:
sequence = genomes_storage.get_gene_sequence(gene_entry['genome_name'], gene_entry['gene_caller_id'])
gene_sequences_in_gene_cluster.append(('%s_%d' % (gene_entry['genome_name'], gene_entry['gene_caller_id']), sequence),)
# sometimes alignments fail, and because pangenomic analyses can take forever,
# everything goes into the trash bin. to prevent that, here we have a try/except
# block with lots of warnings if something goes wrong.
try:
alignments = aligner(run=r).run_stdin(gene_sequences_in_gene_cluster)
except:
# realm of sad face. before we continue to spam the user with error messages,
# we turn our gene sequences to alignments without alignments. this worker will
# report raw, unaligned sequences for this gene cluster as if they were aligned
# so things will continue working operationally, and it will be on the user to
# make sure they went through their results carefully.
alignments = dict(gene_sequences_in_gene_cluster)
# constructing our #sad:
if anvio.DEBUG:
temp_file_path = filesnpaths.get_temp_file_path(prefix='ANVIO_GC_%s' % (gene_cluster_name))
with open(temp_file_path, 'w') as output:
for tpl in gene_sequences_in_gene_cluster:
output.write('>%s\n%s\n' % (tpl[0], tpl[1]))
debug_info = "The %d sequences in gene cluster %s are stored in the temporary file '%s'" % \
(len(gene_sequences_in_gene_cluster), gene_cluster_name, temp_file_path)
else:
debug_info = "If you re-run your last command with a `--debug` flag, anvi'o will generate more\
information for you about the contenets of this gene cluster (but if you are seeing\
millions of these warnings, it may not be a good idea since with the `--debug` flag\
anvi'o will generate a FASTA file in a temporary directory with the contents of the\
gene cluster, and will not attempt to delete them later)."
run.warning("VERY BAD NEWS. The alignment of sequences with '%s' in the gene cluster '%s' failed "
"for some reason. Since the real answer to 'why' is too deep in the matrix, there is "
"no reliable solution for anvi'o to find it for you, BUT THIS WILL AFFECT YOUR SCIENCE "
"GOING FORWARD, SO YOU SHOULD CONSIDER ADDRESSING THIS ISSUE FIRST. %s" % \
(aligner.__name__, gene_cluster_name, debug_info), nl_before=1)
output = {'name': gene_cluster_name, 'entry': copy.deepcopy(gene_clusters_dict[gene_cluster_name])}
for gene_entry in output['entry']:
gene_entry['alignment_summary'] = utils.summarize_alignment(alignments['%s_%d' % (gene_entry['genome_name'], gene_entry['gene_caller_id'])])
output_queue.put(output)
def process(self):
# load genomes from genomes storage
self.load_genomes()
# check sanity
self.sanity_check()
# gen pan_db
self.generate_pan_db()
# get all amino acid sequences:
combined_aas_FASTA_path = self.get_output_file_path('combined-aas.fa')
self.genomes_storage.gen_combined_aa_sequences_FASTA(combined_aas_FASTA_path,
exclude_partial_gene_calls=self.exclude_partial_gene_calls)
# get unique amino acid sequences:
self.progress.new('Uniquing the output FASTA file')
self.progress.update('...')
unique_aas_FASTA_path, unique_aas_names_file_path, unique_aas_names_dict = utils.unique_FASTA_file(combined_aas_FASTA_path, store_frequencies_in_deflines=False)
self.progress.end()
self.run.info('Unique AA sequences FASTA', unique_aas_FASTA_path)
# run search
blastall_results = self.run_search(unique_aas_FASTA_path, unique_aas_names_dict)
# generate MCL input from filtered blastall_results
mcl_input_file_path = self.gen_mcl_input(blastall_results)
# get clusters from MCL
mcl_clusters = self.run_mcl(mcl_input_file_path)
# we have the raw gene clusters dict, but we need to re-format it for following steps
gene_clusters_dict = self.gen_gene_clusters_dict_from_mcl_clusters(mcl_clusters)
del mcl_clusters
# compute alignments for genes within each gene_cluster (or don't)
gene_clusters_dict = self.compute_alignments_for_gene_clusters(gene_clusters_dict)
# populate the pan db with results
gene_clusters_dict = self.process_gene_clusters(gene_clusters_dict)
# store gene clusters dict into the db
self.store_gene_clusters(gene_clusters_dict)
# generate a hierarchical clustering of gene clusters (or don't)
self.gen_hierarchical_clustering_of_gene_clusters()
# generate orderings of gene_clusters based on synteny of genes
self.gen_synteny_based_ordering_of_gene_clusters(gene_clusters_dict)
# populate layers additional data and orders
self.populate_layers_additional_data_and_orders()
# work with gene cluster homogeneity index
self.populate_gene_cluster_homogeneity_index(gene_clusters_dict)
# let people know if they have too much data for their own comfort
if len(gene_clusters_dict) > 20000 or len(self.genomes) > 150:
if len(gene_clusters_dict) > 20000 and len(self.genomes) > 150:
_ = "gene clusters and genomes"
elif len(gene_clusters_dict) > 20000:
_ = "gene clusters"
else:
_ = "genomes"
self.run.warning(f"It seems you have a lot of {_} in this pan database :) It is all good! But please be aware that you may "
f"run into performance issues when you try to interactively visaulize these data using `anvi-display-pan`. "
f"In some cases it may even be impossible to do it, in fact. This is largely because the part of the "
f"anvi'o workflow to offer interactive access to a pangenomes is not designed to accommodate very"
f" large number of {_}, but rather enable in-depth exploratory analyses of pangenomes interactively. You still "
f"can work with large pangenomes via the command line utilities and do a lot of science with them. If you "
f"are unable to work with the interactive interface and it is critical for you, you have multiple options, "
f"You can use the `--min-occurrence` flag to reduce the number of gene clusters, or use the program "
f"`anvi-dereplicate-genomes` in an attempt to reduce the number of redundant genomes in your analysis. "
f"If you are unsure what would be the best game plan for you, you can consider coming to the anvi'o Slack "
f"channel and consult the opinion of the anvi'o community. Despite all these, it is still a good idea to run "
f"`anvi-display-pan` and see what it says first.", lc="cyan", header="FRIENDLY WARNING")
# done
self.run.info_single(f"Your pangenome is ready with a total of {pp(len(gene_clusters_dict))} gene clusters across "
f"{len(self.genomes)} genomes 🎉", mc="green", nl_after=1)
self.run.quit()
|
gpl-3.0
|
koderiter/mineos
|
procfs_reader.py
|
4
|
4504
|
"""
A python script to get procfs info (/proc)
"""
__author__ = "William Dizon"
__license__ = "GNU GPL v3.0"
__version__ = "0.6.0"
__email__ = "[email protected]"
import os
from binascii import b2a_qp
_PROCFS_PATHS = ['/proc',
'/usr/compat/linux/proc']
for procfs in _PROCFS_PATHS:
try:
with open(os.path.join(procfs, 'uptime'), 'rb') as procdump:
_procfs = procfs
break
except IOError:
continue
else:
raise RuntimeError('No suitable procfs filesystem found')
def pids():
return set(int(pid) for pid in os.listdir(_procfs) if pid.isdigit())
def pid_cmdline():
"""
Generator: all processes' pids
"""
for pid in pids():
try:
with open(os.path.join(_procfs, str(pid), 'cmdline'), 'rb') as fh:
cmdline = b2a_qp(fh.read())
cmdline = cmdline.replace('=00', ' ').replace('=\n', '').strip()
yield (pid, cmdline)
except IOError:
continue
def entries(pid, page):
with open(os.path.join(_procfs, str(pid), page)) as proc_status:
for line in proc_status:
split = b2a_qp(line).partition(':')
yield (split[0].strip(), split[2].strip())
def path_owner(path):
from pwd import getpwuid
st = os.stat(path)
uid = st.st_uid
return getpwuid(uid).pw_name
def pid_owner(pid):
from pwd import getpwuid
try:
status_page = dict(entries(pid, 'status'))
except IOError:
raise IOError('Process %s does not exist' % pid)
else:
return getpwuid(int(status_page['Uid'].partition('\t')[0]))
def pid_group(pid):
from grp import getgrgid
try:
status_page = dict(entries(pid, 'status'))
except IOError:
raise IOError('Process %s does not exist' % pid)
else:
return getgrgid(int(status_page['Gid'].partition('\t')[0]))
def proc_uptime():
raw = entries('', 'uptime').next()[0]
return tuple(float(v) for v in raw.split())
def proc_loadavg():
raw = entries('', 'loadavg').next()[0]
return tuple(float(v) for v in raw.split()[:3])
def human_readable(n):
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def disk_free(path):
"""
df stats of filesystem.
Keyword Arguments:
path -- path to filesystem to poll
Returns:
namedtuple (total, used, free)
Thank you, Giampaolo Rodola
http://code.activestate.com/recipes/577972-disk-usage/
"""
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(human_readable(total),
human_readable(used),
human_readable(free))
def disk_usage(path):
return sum(os.path.getsize(os.path.join(dirpath,filename))
for dirpath, dirnames, filenames in os.walk(path)
for filename in filenames)
def tail(f, window=50):
"""
Returns the last `window` lines of file `f` as a list.
http://stackoverflow.com/a/7047765/1191579
"""
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = window + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
return ''.join(data).splitlines()[-window:]
def git_hash(path):
"""Returns the tag or short commit hash of a git path"""
from distutils.spawn import find_executable
from subprocess import check_output
from shlex import split
try:
return check_output(split('%s describe --always' % find_executable('git')), cwd=path).strip()
except:
return ''
|
gpl-3.0
|
biocore/qiime
|
qiime/make_per_library_sff.py
|
15
|
3549
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Kyle Bittinger"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Kyle Bittinger"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Kyle Bittinger"
__email__ = "[email protected]"
import itertools
import os
import subprocess
from qiime.process_sff import (
check_sfffile,
)
from cogent.parse.binary_sff import (
parse_binary_sff, write_binary_sff,
)
def filter_sff_reads(sff_data, ids_to_keep=None, ids_to_remove=None):
"""Retain reads where the ID is in ids_to_keep but not in ids_to_remove.
This function reproduces the behavior of the -i and -e options in
Roche's sfffile program.
"""
# TODO: Move to PyCogent
header, reads = sff_data
# Force evaluation of all reads. We have no choice, since we need
# the total number of reads to be returned with the header.
# Another design choice would be to go back and correct the header
# when we write the binary SFF file to disk -- maybe we'll switch
# to that strategy in the future.
if ids_to_keep is not None:
reads = [r for r in reads if r['Name'] in ids_to_keep]
if ids_to_remove is not None:
reads = [r for r in reads if r['Name'] not in ids_to_remove]
header['number_of_reads'] = len(reads)
return header, reads
def parse_id_list(id_list_file):
ids = set()
for line in id_list_file:
words = line.split()
if words:
ids.add(words[0].lstrip('>'))
return ids
def combine_sff_data(*sff_datasets):
combined_header = {'number_of_reads': 0}
combined_reads = []
for header, reads in sff_datasets:
prev_num_reads = combined_header['number_of_reads']
combined_header = header.copy()
combined_header['number_of_reads'] += prev_num_reads
combined_reads = itertools.chain(combined_reads, reads)
combined_header['index_offset'] = 0
combined_header['index_length'] = 0
return combined_header, combined_reads
def make_per_library_sff(sff_fps, id_list_fp, debug=False):
id_list_basepath, _ = os.path.splitext(id_list_fp)
output_fp = id_list_basepath + '.sff'
sff_datasets = [parse_binary_sff(open(fp), True) for fp in sff_fps]
sff_data = combine_sff_data(*sff_datasets)
ids = parse_id_list(open(id_list_fp))
filtered_sff_data = filter_sff_reads(sff_data, ids_to_keep=ids)
if debug:
print 'Creating SFF file for %s' % id_list_fp
write_binary_sff(open(output_fp, 'w'), *filtered_sff_data)
def make_per_library_sff_with_sfffile(
sff_fps, id_list_fp, sfffile_path=None, debug=False):
id_list_basepath, _ = os.path.splitext(id_list_fp)
output_fp = id_list_basepath + '.sff'
check_sfffile()
args = ['sfffile', '-i', id_list_fp, '-o', output_fp] + sff_fps
if debug:
print args
subprocess.check_call(
args, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
def make_per_library_sffs(
sff_fps, id_list_dir, use_sfftools=False, sfffile_path=None, debug=False):
for dirpath, dirnames, filenames in os.walk(id_list_dir):
for filename in filenames:
if filename.startswith('.'):
continue
id_list_fp = os.path.join(dirpath, filename)
if use_sfftools:
make_per_library_sff_with_sfffile(
sff_fps, id_list_fp, sfffile_path, debug)
else:
make_per_library_sff(sff_fps, id_list_fp, debug)
|
gpl-2.0
|
hipnusleo/laserjet
|
resource/pypi/cryptography-1.7.1/tests/hazmat/primitives/test_rsa.py
|
1
|
82608
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import itertools
import math
import os
import pytest
from cryptography.exceptions import (
AlreadyFinalized, InvalidSignature, _Reasons
)
from cryptography.hazmat.backends.interfaces import (
PEMSerializationBackend, RSABackend
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
padding, rsa, utils as asym_utils
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateNumbers, RSAPublicNumbers
)
from .fixtures_rsa import (
RSA_KEY_1024, RSA_KEY_1025, RSA_KEY_1026, RSA_KEY_1027, RSA_KEY_1028,
RSA_KEY_1029, RSA_KEY_1030, RSA_KEY_1031, RSA_KEY_1536, RSA_KEY_2048,
RSA_KEY_2048_ALT, RSA_KEY_512, RSA_KEY_512_ALT, RSA_KEY_522, RSA_KEY_599,
RSA_KEY_745, RSA_KEY_768,
)
from .utils import (
_check_rsa_private_numbers, generate_rsa_verification_test
)
from ...doubles import (
DummyAsymmetricPadding, DummyHashAlgorithm, DummyKeySerializationEncryption
)
from ...utils import (
load_pkcs1_vectors, load_rsa_nist_vectors, load_vectors_from_file,
raises_unsupported_algorithm
)
class DummyMGF(object):
_salt_length = 0
def _check_rsa_private_numbers_if_serializable(key):
if isinstance(key, rsa.RSAPrivateKeyWithSerialization):
_check_rsa_private_numbers(key.private_numbers())
def test_check_rsa_private_numbers_if_serializable():
_check_rsa_private_numbers_if_serializable("notserializable")
def _flatten_pkcs1_examples(vectors):
flattened_vectors = []
for vector in vectors:
examples = vector[0].pop("examples")
for example in examples:
merged_vector = (vector[0], vector[1], example)
flattened_vectors.append(merged_vector)
return flattened_vectors
def _build_oaep_sha2_vectors():
base_path = os.path.join("asymmetric", "RSA", "oaep-custom")
vectors = []
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
if mgf1alg.name == "sha1" and oaepalg.name == "sha1":
# We need to generate the cartesian product of the permutations
# of all the SHAs above, but SHA1/SHA1 is something we already
# tested previously and thus did not generate custom vectors for.
continue
examples = _flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join(
base_path,
"oaep-{0}-{1}.txt".format(
mgf1alg.name, oaepalg.name
)
),
load_pkcs1_vectors
)
)
# We've loaded the files, but the loaders don't give us any information
# about the mgf1 or oaep hash algorithms. We know this info so we'll
# just add that to the end of the tuple
for private, public, vector in examples:
vectors.append((private, public, vector, mgf1alg, oaepalg))
return vectors
def _skip_pss_hash_algorithm_unsupported(backend, hash_alg):
if not backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hash_alg),
salt_length=padding.PSS.MAX_LENGTH
)
):
pytest.skip(
"Does not support {0} in MGF1 using PSS.".format(hash_alg.name)
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
def test_skip_pss_hash_algorithm_unsupported(backend):
with pytest.raises(pytest.skip.Exception):
_skip_pss_hash_algorithm_unsupported(backend, DummyHashAlgorithm())
def test_modular_inverse():
p = int(
"d1f9f6c09fd3d38987f7970247b85a6da84907753d42ec52bc23b745093f4fff5cff3"
"617ce43d00121a9accc0051f519c76e08cf02fc18acfe4c9e6aea18da470a2b611d2e"
"56a7b35caa2c0239bc041a53cc5875ca0b668ae6377d4b23e932d8c995fd1e58ecfd8"
"c4b73259c0d8a54d691cca3f6fb85c8a5c1baf588e898d481", 16
)
q = int(
"d1519255eb8f678c86cfd06802d1fbef8b664441ac46b73d33d13a8404580a33a8e74"
"cb2ea2e2963125b3d454d7a922cef24dd13e55f989cbabf64255a736671f4629a47b5"
"b2347cfcd669133088d1c159518531025297c2d67c9da856a12e80222cd03b4c6ec0f"
"86c957cb7bb8de7a127b645ec9e820aa94581e4762e209f01", 16
)
assert rsa._modinv(q, p) == int(
"0275e06afa722999315f8f322275483e15e2fb46d827b17800f99110b269a6732748f"
"624a382fa2ed1ec68c99f7fc56fb60e76eea51614881f497ba7034c17dde955f92f15"
"772f8b2b41f3e56d88b1e096cdd293eba4eae1e82db815e0fadea0c4ec971bc6fd875"
"c20e67e48c31a611e98d32c6213ae4c4d7b53023b2f80c538", 16
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSA(object):
@pytest.mark.parametrize(
("public_exponent", "key_size"),
itertools.product(
(3, 5, 65537),
(1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1536, 2048)
)
)
def test_generate_rsa_keys(self, backend, public_exponent, key_size):
skey = rsa.generate_private_key(public_exponent, key_size, backend)
assert skey.key_size == key_size
_check_rsa_private_numbers_if_serializable(skey)
pkey = skey.public_key()
assert isinstance(pkey.public_numbers(), rsa.RSAPublicNumbers)
def test_generate_bad_public_exponent(self, backend):
with pytest.raises(ValueError):
rsa.generate_private_key(public_exponent=1,
key_size=2048,
backend=backend)
with pytest.raises(ValueError):
rsa.generate_private_key(public_exponent=4,
key_size=2048,
backend=backend)
def test_cant_generate_insecure_tiny_key(self, backend):
with pytest.raises(ValueError):
rsa.generate_private_key(public_exponent=65537,
key_size=511,
backend=backend)
with pytest.raises(ValueError):
rsa.generate_private_key(public_exponent=65537,
key_size=256,
backend=backend)
@pytest.mark.parametrize(
"pkcs1_example",
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
)
)
def test_load_pss_vect_example_keys(self, pkcs1_example):
secret, public = pkcs1_example
private_num = rsa.RSAPrivateNumbers(
p=secret["p"],
q=secret["q"],
d=secret["private_exponent"],
dmp1=secret["dmp1"],
dmq1=secret["dmq1"],
iqmp=secret["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=secret["public_exponent"],
n=secret["modulus"]
)
)
_check_rsa_private_numbers(private_num)
public_num = rsa.RSAPublicNumbers(
e=public["public_exponent"],
n=public["modulus"]
)
assert public_num
public_num2 = private_num.public_numbers
assert public_num2
assert public_num.n == public_num2.n
assert public_num.e == public_num2.e
def test_rsa_generate_invalid_backend():
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
rsa.generate_private_key(65537, 2048, pretend_backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSASignature(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors
))
)
def test_pkcs1v15_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"],
n=private["modulus"]
)
).private_key(backend)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(binascii.unhexlify(example["message"]))
signature = signer.finalize()
assert binascii.hexlify(signature) == example["signature"]
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
))
)
def test_pss_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"],
n=private["modulus"]
)
).private_key(backend)
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"],
n=public["modulus"]
).public_key(backend)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
signer.update(binascii.unhexlify(example["message"]))
signature = signer.finalize()
assert len(signature) == math.ceil(private_key.key_size / 8.0)
# PSS signatures contain randomness so we can't do an exact
# signature check. Instead we'll verify that the signature created
# successfully verifies.
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1(),
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
@pytest.mark.parametrize(
"hash_alg",
[hashes.SHA224(), hashes.SHA256(), hashes.SHA384(), hashes.SHA512()]
)
def test_pss_signing_sha2(self, hash_alg, backend):
_skip_pss_hash_algorithm_unsupported(backend, hash_alg)
private_key = RSA_KEY_768.private_key(backend)
public_key = private_key.public_key()
pss = padding.PSS(
mgf=padding.MGF1(hash_alg),
salt_length=padding.PSS.MAX_LENGTH
)
signer = private_key.signer(pss, hash_alg)
signer.update(b"testing signature")
signature = signer.finalize()
verifier = public_key.verifier(signature, pss, hash_alg)
verifier.update(b"testing signature")
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA512()) and
backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
)
),
skip_message="Does not support SHA512."
)
def test_pss_minimum_key_size_for_digest(self, backend):
private_key = RSA_KEY_522.private_key(backend)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA512()
)
signer.update(b"no failure")
signer.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)
def test_pss_signing_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.signer(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA512()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
def test_pss_signing_salt_length_too_long(self, backend):
private_key = RSA_KEY_512.private_key(backend)
signer = private_key.signer(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=1000000
),
hashes.SHA1()
)
signer.update(b"failure coming")
with pytest.raises(ValueError):
signer.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_use_after_finalize(self, backend):
private_key = RSA_KEY_512.private_key(backend)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(b"sign me")
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.signer(DummyAsymmetricPadding(), hashes.SHA1())
def test_padding_incorrect_type(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(TypeError):
private_key.signer("notpadding", hashes.SHA1())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS."
)
def test_unsupported_pss_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
private_key.signer(
padding.PSS(
mgf=DummyMGF(),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_pkcs1_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_599.private_key(backend)
signer = private_key.signer(
padding.PKCS1v15(),
hashes.SHA512()
)
signer.update(b"failure coming")
with pytest.raises(ValueError):
signer.finalize()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_pkcs1_minimum_key_size(self, backend):
private_key = RSA_KEY_745.private_key(backend)
signer = private_key.signer(
padding.PKCS1v15(),
hashes.SHA512()
)
signer.update(b"no failure")
signer.finalize()
def test_sign(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
pkcs = padding.PKCS1v15()
algorithm = hashes.SHA1()
signature = private_key.sign(message, pkcs, algorithm)
public_key = private_key.public_key()
verifier = public_key.verifier(signature, pkcs, algorithm)
verifier.update(message)
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS."
)
def test_prehashed_sign(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
signature = private_key.sign(digest, pss, prehashed_alg)
public_key = private_key.public_key()
verifier = public_key.verifier(signature, pss, hashes.SHA1())
verifier.update(message)
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS."
)
def test_prehashed_digest_mismatch(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA512(), backend)
h.update(message)
digest = h.finalize()
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
with pytest.raises(ValueError):
private_key.sign(digest, pss, prehashed_alg)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAVerification(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors
))
)
def test_pkcs1v15_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"],
n=public["modulus"]
).public_key(backend)
verifier = public_key.verifier(
binascii.unhexlify(example["signature"]),
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_invalid_pkcs1v15_signature_wrong_data(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(b"incorrect data")
with pytest.raises(InvalidSignature):
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_invalid_pkcs1v15_signature_wrong_key(self, backend):
private_key = RSA_KEY_512.private_key(backend)
private_key2 = RSA_KEY_512_ALT.private_key(backend)
public_key = private_key2.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(b"sign me")
with pytest.raises(InvalidSignature):
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=20
)
),
skip_message="Does not support PSS."
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"),
load_pkcs1_vectors
))
)
def test_pss_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"],
n=public["modulus"]
).public_key(backend)
verifier = public_key.verifier(
binascii.unhexlify(example["signature"]),
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=20
),
hashes.SHA1()
)
verifier.update(binascii.unhexlify(example["message"]))
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
def test_invalid_pss_signature_wrong_data(self, backend):
public_key = rsa.RSAPublicNumbers(
n=int(
b"dffc2137d5e810cde9e4b4612f5796447218bab913b3fa98bdf7982e4fa6"
b"ec4d6653ef2b29fb1642b095befcbea6decc178fb4bed243d3c3592c6854"
b"6af2d3f3", 16
),
e=65537
).public_key(backend)
signature = binascii.unhexlify(
b"0e68c3649df91c5bc3665f96e157efa75b71934aaa514d91e94ca8418d100f45"
b"6f05288e58525f99666bab052adcffdf7186eb40f583bd38d98c97d3d524808b"
)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
verifier.update(b"incorrect data")
with pytest.raises(InvalidSignature):
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
def test_invalid_pss_signature_wrong_key(self, backend):
signature = binascii.unhexlify(
b"3a1880165014ba6eb53cc1449d13e5132ebcc0cfd9ade6d7a2494a0503bd0826"
b"f8a46c431e0d7be0ca3e453f8b2b009e2733764da7927cc6dbe7a021437a242e"
)
public_key = rsa.RSAPublicNumbers(
n=int(
b"381201f4905d67dfeb3dec131a0fbea773489227ec7a1448c3109189ac68"
b"5a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8"
b"960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303"
b"231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4"
b"030d3581e13522e1", 16
),
e=65537
).public_key(backend)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
verifier.update(b"sign me")
with pytest.raises(InvalidSignature):
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
def test_invalid_pss_signature_data_too_large_for_modulus(self, backend):
signature = binascii.unhexlify(
b"cb43bde4f7ab89eb4a79c6e8dd67e0d1af60715da64429d90c716a490b799c29"
b"194cf8046509c6ed851052367a74e2e92d9b38947ed74332acb115a03fcc0222"
)
public_key = rsa.RSAPublicNumbers(
n=int(
b"381201f4905d67dfeb3dec131a0fbea773489227ec7a1448c3109189ac68"
b"5a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8"
b"960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303"
b"231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4"
b"030d3581e13522", 16
),
e=65537
).public_key(backend)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
verifier.update(b"sign me")
with pytest.raises(InvalidSignature):
verifier.verify()
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_use_after_finalize(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(b"sign me")
signature = signer.finalize()
verifier = public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(b"sign me")
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.verifier(
b"sig", DummyAsymmetricPadding(), hashes.SHA1()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_signature_not_bytes(self, backend):
public_key = RSA_KEY_512.public_numbers.public_key(backend)
signature = 1234
with pytest.raises(TypeError):
public_key.verifier(
signature,
padding.PKCS1v15(),
hashes.SHA1()
)
def test_padding_incorrect_type(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with pytest.raises(TypeError):
public_key.verifier(b"sig", "notpadding", hashes.SHA1())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS."
)
def test_unsupported_pss_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
public_key.verifier(
b"sig",
padding.PSS(
mgf=DummyMGF(),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512."
)
def test_pss_verify_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_512.private_key(backend)
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = private_key.public_key()
with pytest.raises(ValueError):
public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA512()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS."
)
def test_pss_verify_salt_length_too_long(self, backend):
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = rsa.RSAPublicNumbers(
n=int(
b"d309e4612809437548b747d7f9eb9cd3340f54fe42bb3f84a36933b0839c"
b"11b0c8b7f67e11f7252370161e31159c49c784d4bc41c42a78ce0f0b40a3"
b"ca8ffb91", 16
),
e=65537
).public_key(backend)
verifier = public_key.verifier(
signature,
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
),
salt_length=1000000
),
hashes.SHA1()
)
verifier.update(b"sign me")
with pytest.raises(InvalidSignature):
verifier.verify()
def test_verify(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
pkcs = padding.PKCS1v15()
algorithm = hashes.SHA1()
signer = private_key.signer(pkcs, algorithm)
signer.update(message)
signature = signer.finalize()
public_key = private_key.public_key()
public_key.verify(signature, message, pkcs, algorithm)
def test_prehashed_verify(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
pkcs = padding.PKCS1v15()
signature = private_key.sign(message, pkcs, hashes.SHA1())
public_key = private_key.public_key()
public_key.verify(signature, digest, pkcs, prehashed_alg)
def test_prehashed_digest_mismatch(self, backend):
public_key = RSA_KEY_512.private_key(backend).public_key()
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
data = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA512())
pkcs = padding.PKCS1v15()
with pytest.raises(ValueError):
public_key.verify(b"\x00" * 64, data, pkcs, prehashed_alg)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPSSMGF1Verification(object):
test_rsa_pss_mgf1_sha1 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS using MGF1 with SHA1."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"]
)
))
test_rsa_pss_mgf1_sha224 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA224()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS using MGF1 with SHA224."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"]
)
))
test_rsa_pss_mgf1_sha256 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS using MGF1 with SHA256."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"]
)
))
test_rsa_pss_mgf1_sha384 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA384()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS using MGF1 with SHA384."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"]
)
))
test_rsa_pss_mgf1_sha512 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA512()),
salt_length=padding.PSS.MAX_LENGTH
)
),
skip_message="Does not support PSS using MGF1 with SHA512."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"]
)
))
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPKCS1Verification(object):
test_rsa_pkcs1v15_verify_sha1 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA1()) and
backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA1 and PKCS1v1.5."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha224 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA224()) and
backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA224 and PKCS1v1.5."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha256 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA256()) and
backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA256 and PKCS1v1.5."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha384 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA384()) and
backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA384 and PKCS1v1.5."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PKCS1v15()
))
test_rsa_pkcs1v15_verify_sha512 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA512()) and
backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA512 and PKCS1v1.5."
)(generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PKCS1v15()
))
class TestPSS(object):
def test_calculate_max_pss_salt_length(self):
with pytest.raises(TypeError):
padding.calculate_max_pss_salt_length(object(), hashes.SHA256())
def test_invalid_salt_length_not_integer(self):
with pytest.raises(TypeError):
padding.PSS(
mgf=padding.MGF1(
hashes.SHA1()
),
salt_length=b"not_a_length"
)
def test_invalid_salt_length_negative_integer(self):
with pytest.raises(ValueError):
padding.PSS(
mgf=padding.MGF1(
hashes.SHA1()
),
salt_length=-1
)
def test_valid_pss_parameters(self):
algorithm = hashes.SHA1()
salt_length = algorithm.digest_size
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=salt_length)
assert pss._mgf == mgf
assert pss._salt_length == salt_length
def test_valid_pss_parameters_maximum(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert pss._mgf == mgf
assert pss._salt_length == padding.PSS.MAX_LENGTH
class TestMGF1(object):
def test_invalid_hash_algorithm(self):
with pytest.raises(TypeError):
padding.MGF1(b"not_a_hash")
def test_valid_mgf1_parameters(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
assert mgf._algorithm == algorithm
class TestOAEP(object):
def test_invalid_algorithm(self):
mgf = padding.MGF1(hashes.SHA1())
with pytest.raises(TypeError):
padding.OAEP(
mgf=mgf,
algorithm=b"",
label=None
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSADecryption(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15crypt-vectors.txt"),
load_pkcs1_vectors
))
)
def test_decrypt_pkcs1v15_vectors(self, vector, backend):
private, public, example = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"],
n=private["modulus"]
)
).private_key(backend)
ciphertext = binascii.unhexlify(example["encryption"])
assert len(ciphertext) == math.ceil(skey.key_size / 8.0)
message = skey.decrypt(ciphertext, padding.PKCS1v15())
assert message == binascii.unhexlify(example["message"])
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(b"0" * 64, DummyAsymmetricPadding())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_decrypt_invalid_decrypt(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"\x00" * 64,
padding.PKCS1v15()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_decrypt_ciphertext_too_large(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(
b"\x00" * 65,
padding.PKCS1v15()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
def test_decrypt_ciphertext_too_small(self, backend):
private_key = RSA_KEY_512.private_key(backend)
ct = binascii.unhexlify(
b"50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b80804f1"
b"69d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d8ea0"
)
with pytest.raises(ValueError):
private_key.decrypt(
ct,
padding.PKCS1v15()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
),
skip_message="Does not support OAEP."
)
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "oaep-vect.txt"),
load_pkcs1_vectors
))
)
def test_decrypt_oaep_vectors(self, vector, backend):
private, public, example = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"],
n=private["modulus"]
)
).private_key(backend)
message = skey.decrypt(
binascii.unhexlify(example["encryption"]),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
assert message == binascii.unhexlify(example["message"])
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA224()),
algorithm=hashes.SHA224(),
label=None
)
),
skip_message="Does not support OAEP using SHA224 MGF1 and SHA224 hash."
)
@pytest.mark.parametrize(
"vector",
_build_oaep_sha2_vectors()
)
def test_decrypt_oaep_sha2_vectors(self, vector, backend):
private, public, example, mgf1_alg, hash_alg = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"],
n=private["modulus"]
)
).private_key(backend)
message = skey.decrypt(
binascii.unhexlify(example["encryption"]),
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1_alg),
algorithm=hash_alg,
label=None
)
)
assert message == binascii.unhexlify(example["message"])
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
),
skip_message="Does not support OAEP."
)
def test_invalid_oaep_decryption(self, backend):
# More recent versions of OpenSSL may raise RSA_R_OAEP_DECODING_ERROR
# This test triggers it and confirms that we properly handle it. Other
# backends should also return the proper ValueError.
private_key = RSA_KEY_512.private_key(backend)
ciphertext = private_key.public_key().encrypt(
b'secure data',
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
private_key_alt = RSA_KEY_512_ALT.private_key(backend)
with pytest.raises(ValueError):
private_key_alt.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
),
skip_message="Does not support OAEP."
)
def test_invalid_oaep_decryption_data_to_large_for_modulus(self, backend):
key = RSA_KEY_2048_ALT.private_key(backend)
ciphertext = (
b'\xb1ph\xc0\x0b\x1a|\xe6\xda\xea\xb5\xd7%\x94\x07\xf96\xfb\x96'
b'\x11\x9b\xdc4\xea.-\x91\x80\x13S\x94\x04m\xe9\xc5/F\x1b\x9b:\\'
b'\x1d\x04\x16ML\xae\xb32J\x01yuA\xbb\x83\x1c\x8f\xf6\xa5\xdbp\xcd'
b'\nx\xc7\xf6\x15\xb2/\xdcH\xae\xe7\x13\x13by\r4t\x99\x0fc\x1f\xc1'
b'\x1c\xb1\xdd\xc5\x08\xd1\xee\xa1XQ\xb8H@L5v\xc3\xaf\xf2\r\x97'
b'\xed\xaa\xe7\xf1\xd4xai\xd3\x83\xd9\xaa9\xbfx\xe1\x87F \x01\xff'
b'L\xccv}ae\xb3\xfa\xf2B\xb8\xf9\x04H\x94\x85\xcb\x86\xbb\\ghx!W31'
b'\xc7;t\na_E\xc2\x16\xb0;\xa1\x18\t\x1b\xe1\xdb\x80>)\x15\xc6\x12'
b'\xcb\xeeg`\x8b\x9b\x1b\x05y4\xb0\x84M6\xcd\xa1\x827o\xfd\x96\xba'
b'Z#\x8d\xae\x01\xc9\xf2\xb6\xde\x89{8&eQ\x1e8\x03\x01#?\xb66\\'
b'\xad.\xe9\xfa!\x95 c{\xcaz\xe0*\tP\r\x91\x9a)B\xb5\xadN\xf4$\x83'
b'\t\xb5u\xab\x19\x99'
)
with pytest.raises(ValueError):
key.decrypt(
ciphertext,
padding.OAEP(
algorithm=hashes.SHA1(),
mgf=padding.MGF1(hashes.SHA1()),
label=None
)
)
def test_unsupported_oaep_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAEncryption(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
),
skip_message="Does not support OAEP."
)
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(RSA_KEY_1024, RSA_KEY_1025, RSA_KEY_1026, RSA_KEY_1027,
RSA_KEY_1028, RSA_KEY_1029, RSA_KEY_1030, RSA_KEY_1031,
RSA_KEY_1536, RSA_KEY_2048),
[
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
]
)
)
def test_rsa_encrypt_oaep(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
pt = b"encrypt me!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == math.ceil(public_key.key_size / 8.0)
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA512(),
label=None
)
),
skip_message="Does not support OAEP using SHA256 MGF1 and SHA512 hash."
)
@pytest.mark.parametrize(
("mgf1hash", "oaephash"),
itertools.product([
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
], [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
])
)
def test_rsa_encrypt_oaep_sha2(self, mgf1hash, oaephash, backend):
pad = padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1hash),
algorithm=oaephash,
label=None
)
private_key = RSA_KEY_2048.private_key(backend)
pt = b"encrypt me using sha2 hashes!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == math.ceil(public_key.key_size / 8.0)
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5."
)
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(RSA_KEY_1024, RSA_KEY_1025, RSA_KEY_1026, RSA_KEY_1027,
RSA_KEY_1028, RSA_KEY_1029, RSA_KEY_1030, RSA_KEY_1031,
RSA_KEY_1536, RSA_KEY_2048),
[padding.PKCS1v15()]
)
)
def test_rsa_encrypt_pkcs1v15(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
pt = b"encrypt me!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == math.ceil(public_key.key_size / 8.0)
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(RSA_KEY_1024, RSA_KEY_1025, RSA_KEY_1026, RSA_KEY_1027,
RSA_KEY_1028, RSA_KEY_1029, RSA_KEY_1030, RSA_KEY_1031,
RSA_KEY_1536, RSA_KEY_2048),
(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
),
padding.PKCS1v15()
)
)
)
def test_rsa_encrypt_key_too_small(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
public_key = private_key.public_key()
# Slightly smaller than the key size but not enough for padding.
with pytest.raises(ValueError):
public_key.encrypt(
b"\x00" * (private_key.key_size // 8 - 1),
pad
)
# Larger than the key size.
with pytest.raises(ValueError):
public_key.encrypt(
b"\x00" * (private_key.key_size // 8 + 5),
pad
)
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.encrypt(b"somedata", DummyAsymmetricPadding())
with pytest.raises(TypeError):
public_key.encrypt(b"somedata", padding=object())
def test_unsupported_oaep_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
public_key.encrypt(
b"ciphertext",
padding.OAEP(
mgf=DummyMGF(),
algorithm=hashes.SHA1(),
label=None
)
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSANumbers(object):
def test_rsa_public_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
assert public_numbers.e == 1
assert public_numbers.n == 15
def test_rsa_private_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
private_numbers = rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers
)
assert private_numbers.p == 3
assert private_numbers.q == 5
assert private_numbers.d == 1
assert private_numbers.dmp1 == 1
assert private_numbers.dmq1 == 1
assert private_numbers.iqmp == 2
assert private_numbers.public_numbers == public_numbers
def test_rsa_private_numbers_create_key(self, backend):
private_key = RSA_KEY_1024.private_key(backend)
assert private_key
def test_rsa_public_numbers_create_key(self, backend):
public_key = RSA_KEY_1024.public_numbers.public_key(backend)
assert public_key
def test_public_numbers_invalid_types(self):
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=None, n=15)
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=1, n=None)
def test_private_numbers_invalid_types(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=None,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=None,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=5,
d=None,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=None,
dmq1=1,
iqmp=2,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=None,
iqmp=2,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=None,
public_numbers=public_numbers
)
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=None
)
def test_invalid_public_numbers_argument_values(self, backend):
# Start with public_exponent=7, modulus=15. Then change one value at a
# time to test the bounds.
# Test a modulus < 3.
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=7, n=2).public_key(backend)
# Test a public_exponent < 3
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=1, n=15).public_key(backend)
# Test a public_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=17, n=15).public_key(backend)
# Test a public_exponent that is not odd.
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=14, n=15).public_key(backend)
def test_invalid_private_numbers_argument_values(self, backend):
# Start with p=3, q=11, private_exponent=3, public_exponent=7,
# modulus=33, dmp1=1, dmq1=3, iqmp=2. Then change one value at
# a time to test the bounds.
# Test a modulus < 3.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=2
)
).private_key(backend)
# Test a modulus != p * q.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=35
)
).private_key(backend)
# Test a p > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=37,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a q > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=37,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a dmp1 > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=35,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a dmq1 > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=35,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test an iqmp > modulus.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=35,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a private_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=37,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a public_exponent < 3
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=1,
n=33
)
).private_key(backend)
# Test a public_exponent > modulus
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=35,
public_numbers=rsa.RSAPublicNumbers(
e=65537,
n=33
)
).private_key(backend)
# Test a public_exponent that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=6,
n=33
)
).private_key(backend)
# Test a dmp1 that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=2,
dmq1=3,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
# Test a dmq1 that is not odd.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=3,
q=11,
d=3,
dmp1=1,
dmq1=4,
iqmp=2,
public_numbers=rsa.RSAPublicNumbers(
e=7,
n=33
)
).private_key(backend)
def test_public_number_repr(self):
num = RSAPublicNumbers(1, 1)
assert repr(num) == "<RSAPublicNumbers(e=1, n=1)>"
class TestRSANumbersEquality(object):
def test_public_numbers_eq(self):
num = RSAPublicNumbers(1, 2)
num2 = RSAPublicNumbers(1, 2)
assert num == num2
def test_public_numbers_ne(self):
num = RSAPublicNumbers(1, 2)
assert num != RSAPublicNumbers(2, 2)
assert num != RSAPublicNumbers(1, 3)
assert num != object()
def test_private_numbers_eq(self):
pub = RSAPublicNumbers(1, 2)
num = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub)
pub2 = RSAPublicNumbers(1, 2)
num2 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub2)
assert num == num2
def test_private_numbers_ne(self):
pub = RSAPublicNumbers(1, 2)
num = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 7, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 4, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 5, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 4, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 3, 3, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
2, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 6, RSAPublicNumbers(2, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 3)
)
assert num != object()
def test_public_numbers_hash(self):
pub1 = RSAPublicNumbers(3, 17)
pub2 = RSAPublicNumbers(3, 17)
pub3 = RSAPublicNumbers(7, 21)
assert hash(pub1) == hash(pub2)
assert hash(pub1) != hash(pub3)
def test_private_numbers_hash(self):
priv1 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2))
priv2 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2))
priv3 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 3))
assert hash(priv1) == hash(priv2)
assert hash(priv1) != hash(priv3)
class TestRSAPrimeFactorRecovery(object):
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs1v15crypt-vectors.txt"),
load_pkcs1_vectors
))
)
def test_recover_prime_factors(self, vector):
private, public, example = vector
p, q = rsa.rsa_recover_prime_factors(
private["modulus"],
private["public_exponent"],
private["private_exponent"]
)
# Unfortunately there is no convention on which prime should be p
# and which one q. The function we use always makes p > q, but the
# NIST vectors are not so consistent. Accordingly, we verify we've
# recovered the proper (p, q) by sorting them and asserting on that.
assert sorted([p, q]) == sorted([private["p"], private["q"]])
assert p > q
def test_invalid_recover_prime_factors(self):
with pytest.raises(ValueError):
rsa.rsa_recover_prime_factors(34, 3, 7)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=PEMSerializationBackend)
class TestRSAPrivateKeySerialization(object):
@pytest.mark.parametrize(
("fmt", "password"),
itertools.product(
[
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.PrivateFormat.PKCS8
],
[
b"s",
b"longerpassword",
b"!*$&(@#$*&($T@%_somesymbols",
b"\x01" * 1000,
]
)
)
def test_private_bytes_encrypted_pem(self, backend, fmt, password):
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
serialization.Encoding.PEM,
fmt,
serialization.BestAvailableEncryption(password)
)
loaded_key = serialization.load_pem_private_key(
serialized, password, backend
)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("fmt", "password"),
[
[serialization.PrivateFormat.PKCS8, b"s"],
[serialization.PrivateFormat.PKCS8, b"longerpassword"],
[serialization.PrivateFormat.PKCS8, b"!*$&(@#$*&($T@%_somesymbol"],
[serialization.PrivateFormat.PKCS8, b"\x01" * 1000]
]
)
def test_private_bytes_encrypted_der(self, backend, fmt, password):
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
serialization.Encoding.DER,
fmt,
serialization.BestAvailableEncryption(password)
)
loaded_key = serialization.load_der_private_key(
serialized, password, backend
)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt", "loader_func"),
[
[
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_pem_private_key
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_der_private_key
],
[
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.load_pem_private_key
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.load_der_private_key
],
]
)
def test_private_bytes_unencrypted(self, backend, encoding, fmt,
loader_func):
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
encoding, fmt, serialization.NoEncryption()
)
loaded_key = loader_func(serialized, None, backend)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("key_path", "encoding", "loader_func"),
[
[
os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"testrsa.pem"
),
serialization.Encoding.PEM,
serialization.load_pem_private_key
],
[
os.path.join("asymmetric", "DER_Serialization", "testrsa.der"),
serialization.Encoding.DER,
serialization.load_der_private_key
],
]
)
def test_private_bytes_traditional_openssl_unencrypted(
self, backend, key_path, encoding, loader_func
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, None, backend)
serialized = key.private_bytes(
encoding,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption()
)
assert serialized == key_bytes
def test_private_bytes_traditional_der_encrypted_invalid(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password")
)
def test_private_bytes_invalid_encoding(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
"notencoding",
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
def test_private_bytes_invalid_format(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
"invalidformat",
serialization.NoEncryption()
)
def test_private_bytes_invalid_encryption_algorithm(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
"notanencalg"
)
def test_private_bytes_unsupported_encryption_type(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
DummyKeySerializationEncryption()
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=PEMSerializationBackend)
class TestRSAPEMPublicKeySerialization(object):
@pytest.mark.parametrize(
("key_path", "loader_func", "encoding", "format"),
[
(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.PKCS1,
), (
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.der"),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
), (
os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
), (
os.path.join(
"asymmetric",
"DER_Serialization",
"unenc-rsa-pkcs8.pub.der"
),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
]
)
def test_public_bytes_match(self, key_path, loader_func, encoding, format,
backend):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, backend)
serialized = key.public_bytes(encoding, format)
assert serialized == key_bytes
def test_public_bytes_openssh(self, backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
lambda pemfile: pemfile.read(), mode="rb"
)
key = serialization.load_pem_public_key(key_bytes, backend)
ssh_bytes = key.public_bytes(
serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH
)
assert ssh_bytes == (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC7JHoJfg6yNzLMOWet8Z49a4KD"
b"0dCspMAYvo2YAMB7/wdEycocujbhJ2n/seONi+5XqTqqFkM5VBl8rmkkFPZk/7x0"
b"xmdsTPECSWnHK+HhoaNDFPR3j8jQhVo1laxiqcEhAHegi5cwtFosuJAvSKAFKEvy"
b"D43si00DQnXWrYHAEQ=="
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.PKCS1,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
def test_public_bytes_invalid_encoding(self, backend):
key = RSA_KEY_2048.private_key(backend).public_key()
with pytest.raises(TypeError):
key.public_bytes("notencoding", serialization.PublicFormat.PKCS1)
def test_public_bytes_invalid_format(self, backend):
key = RSA_KEY_2048.private_key(backend).public_key()
with pytest.raises(TypeError):
key.public_bytes(serialization.Encoding.PEM, "invalidformat")
|
apache-2.0
|
wandec/grr
|
lib/hunts/implementation.py
|
1
|
36869
|
#!/usr/bin/env python
"""The implementation of hunts.
A hunt is a mechanism for automatically scheduling flows on a selective subset
of clients, managing these flows, collecting and presenting the combined results
of all these flows.
In essence a hunt is just another flow which schedules child flows using
CallFlow(). Replies from these child flows are then collected and stored in the
hunt's AFF4 representation. The main difference between a hunt and a regular
flow is that in hunts responses are processed concurrently and not necessarily
in request order. A hunt process many responses concurrently, while a flow
processes responses in strict request order (in a single thread).
For this reason a hunt has its own runner - the HuntRunner.
"""
import re
import threading
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import flow_runner
from grr.lib import queue_manager
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import type_info
from grr.lib import utils
from grr.proto import flows_pb2
class HuntRunnerArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.HuntRunnerArgs
def Validate(self):
if self.HasField("regex_rules"):
self.regex_rules.Validate()
if self.HasField("integer_rules"):
self.integer_rules.Validate()
class HuntRunner(flow_runner.FlowRunner):
"""The runner for hunts.
This runner implement some slight differences from the regular flows:
1) Responses are not precessed in strict request order. Instead they are
processed concurrently on a thread pool.
2) Hunt Errors are not fatal and do not generally terminate the hunt. The hunt
continues running.
3) Resources are tallied for each client and as a hunt total.
"""
schedule_kill_notifications = False
process_requests_in_order = False
def _AddClient(self, client_id):
next_client_due = self.flow_obj.state.context.next_client_due
if self.args.client_rate > 0:
self.flow_obj.state.context.next_client_due = (
next_client_due + 60 / self.args.client_rate)
self.CallState(messages=[client_id], next_state="RegisterClient",
client_id=client_id, start_time=next_client_due)
else:
self._RegisterAndRunClient(client_id)
def _RegisterAndRunClient(self, client_id):
self.flow_obj.RegisterClient(client_id)
self.RunStateMethod("RunClient", direct_response=[client_id])
def _Process(self, request, responses, thread_pool=None, events=None):
"""Hunts process all responses concurrently in a threadpool."""
# This function is called and runs within the main processing thread. We do
# not need to lock the hunt object while running in this method.
if request.next_state == "AddClient":
if not self.IsHuntStarted():
logging.debug(
"Unable to start client %s on hunt %s which is in state %s",
request.client_id, self.session_id,
self.flow_obj.Get(self.flow_obj.Schema.STATE))
return
# Update the client count.
client_count = int(
self.flow_obj.Get(self.flow_obj.Schema.CLIENT_COUNT, 0))
# Stop the hunt if we exceed the client limit.
if 0 < self.args.client_limit <= client_count:
# Remove our rules from the foreman so we dont get more clients sent to
# this hunt. Hunt will be paused.
self.Pause()
# Ignore this client since it had gone over the limit.
return
# Update the client count.
self.flow_obj.Set(self.flow_obj.Schema.CLIENT_COUNT(client_count + 1))
# Add client to list of clients and optionally run it
# (if client_rate == 0).
self._AddClient(request.client_id)
return
if request.next_state == "RegisterClient":
if self.IsHuntStarted():
self._RegisterAndRunClient(request.client_id)
else:
logging.debug(
"Not starting client %s on hunt %s which is not running: %s",
request.client_id, self.session_id,
self.flow_obj.Get(self.flow_obj.Schema.STATE))
return
event = threading.Event()
events.append(event)
# In a hunt, all requests are independent and can be processed
# in separate threads.
thread_pool.AddTask(target=self.RunStateMethod,
args=(request.next_state, request, responses, event),
name="Hunt processing")
def Log(self, format_str, *args):
"""Logs the message using the flow's standard logging.
Args:
format_str: Format string
*args: arguments to the format string
Raises:
RuntimeError: on parent missing logs_collection
"""
format_str = utils.SmartUnicode(format_str)
try:
# The status message is always in unicode
status = format_str % args
except TypeError:
logging.error("Tried to log a format string with the wrong number "
"of arguments: %s", format_str)
status = format_str
logging.info("%s: %s", self.session_id, status)
self.SetStatus(utils.SmartUnicode(status))
logs_collection = self.OpenLogsCollection(self.args.logs_collection_urn)
logs_collection.Add(
rdfvalue.FlowLog(client_id=None, urn=self.session_id,
flow_name=self.flow_obj.__class__.__name__,
log_message=status))
logs_collection.Flush()
def Error(self, backtrace, client_id=None):
"""Logs an error for a client but does not terminate the hunt."""
logging.error("Hunt Error: %s", backtrace)
self.flow_obj.LogClientError(client_id, backtrace=backtrace)
def SaveResourceUsage(self, request, responses):
"""Update the resource usage of the hunt."""
self.flow_obj.ProcessClientResourcesStats(request.client_id,
responses.status)
# Do this last since it may raise "CPU quota exceeded".
self.UpdateProtoResources(responses.status)
def InitializeContext(self, args):
"""Initializes the context of this hunt."""
if args is None:
args = HuntRunnerArgs()
# For large hunts, checking client limits creates a high load on the foreman
# since it needs to read the hunt object's client list. We therefore don't
# allow setting it for large hunts. Note that client_limit of 0 means
# unlimited which is allowed (the foreman then does not need to check the
# client list)..
if args.client_limit > 1000:
raise RuntimeError("Please specify client_limit <= 1000.")
context = utils.DataObject(
args=args,
backtrace=None,
client_resources=rdfvalue.ClientResources(),
create_time=rdfvalue.RDFDatetime().Now(),
creator=self.token.username,
expires=rdfvalue.RDFDatetime().Now(),
# If not None, kill-stuck-flow notification is scheduled at the given
# time.
kill_timestamp=None,
network_bytes_sent=0,
next_client_due=0,
next_outbound_id=1,
next_processed_request=1,
next_states=set(),
outstanding_requests=0,
current_state=None,
start_time=rdfvalue.RDFDatetime().Now(),
# Hunts are always in the running state.
state=rdfvalue.Flow.State.RUNNING,
usage_stats=rdfvalue.ClientResourcesStats(),
remaining_cpu_quota=args.cpu_limit,
)
# Store the context in the flow_obj for next time.
self.flow_obj.state.Register("context", context)
return context
def GetNewSessionID(self, **_):
"""Returns a random integer session ID for this flow.
All hunts are created under the aff4:/hunts namespace.
Returns:
a formatted session id string.
"""
return rdfvalue.SessionID(base="aff4:/hunts", queue=self.args.queue)
def _CreateAuditEvent(self, event_action):
try:
flow_name = self.flow_obj.args.flow_runner_args.flow_name
except AttributeError:
flow_name = ""
event = rdfvalue.AuditEvent(user=self.flow_obj.token.username,
action=event_action, urn=self.flow_obj.urn,
flow_name=flow_name,
description=self.args.description)
flow.Events.PublishEvent("Audit", event, token=self.flow_obj.token)
def Start(self, add_foreman_rules=True):
"""This uploads the rules to the foreman and, thus, starts the hunt."""
# We are already running.
if self.flow_obj.Get(self.flow_obj.Schema.STATE) == "STARTED":
return
# Check the permissions for the hunt here. Note that self.args.token is the
# original creators's token, while the aff4 object was created with the
# caller's token. This check therefore ensures that the caller to this
# method has permissions to start the hunt (not necessarily the original
# creator of the hunt).
data_store.DB.security_manager.CheckHuntAccess(
self.flow_obj.token, self.session_id)
# Determine when this hunt will expire.
self.context.expires = self.args.expiry_time.Expiry()
# When the next client can be scheduled. Implements gradual client
# recruitment rate according to the client_rate.
self.context.next_client_due = rdfvalue.RDFDatetime().Now()
self._CreateAuditEvent("HUNT_STARTED")
# Start the hunt.
self.flow_obj.Set(self.flow_obj.Schema.STATE("STARTED"))
self.flow_obj.Flush()
if not add_foreman_rules:
return
# Add a new rule to the foreman
foreman_rule = rdfvalue.ForemanRule(
created=rdfvalue.RDFDatetime().Now(),
expires=self.context.expires,
description="Hunt %s %s" % (self.session_id,
self.args.hunt_name),
regex_rules=self.args.regex_rules,
integer_rules=self.args.integer_rules)
foreman_rule.actions.Append(hunt_id=self.session_id,
hunt_name=self.args.hunt_name,
client_limit=self.args.client_limit)
# Make sure the rule makes sense.
foreman_rule.Validate()
with aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token,
aff4_type="GRRForeman",
ignore_cache=True) as foreman:
foreman_rules = foreman.Get(foreman.Schema.RULES,
default=foreman.Schema.RULES())
foreman_rules.Append(foreman_rule)
foreman.Set(foreman_rules)
def _RemoveForemanRule(self):
with aff4.FACTORY.Open("aff4:/foreman", mode="rw", token=self.token,
ignore_cache=True) as foreman:
aff4_rules = foreman.Get(foreman.Schema.RULES)
aff4_rules = foreman.Schema.RULES(
# Remove those rules which fire off this hunt id.
[r for r in aff4_rules if r.hunt_id != self.session_id])
foreman.Set(aff4_rules)
def Pause(self):
"""Pauses the hunt (removes Foreman rules, does not touch expiry time)."""
if not self.IsHuntStarted():
return
# Make sure the user is allowed to pause this hunt.
data_store.DB.security_manager.CheckHuntAccess(
self.flow_obj.token, self.session_id)
self._RemoveForemanRule()
self.flow_obj.Set(self.flow_obj.Schema.STATE("PAUSED"))
self.flow_obj.Flush()
self._CreateAuditEvent("HUNT_PAUSED")
def Stop(self):
"""Cancels the hunt (removes Foreman rules, resets expiry time to 0)."""
# Make sure the user is allowed to stop this hunt.
data_store.DB.security_manager.CheckHuntAccess(
self.flow_obj.token, self.session_id)
# Expire the hunt so the worker can destroy it.
self.args.expires = rdfvalue.RDFDatetime().Now()
self._RemoveForemanRule()
self.flow_obj.Set(self.flow_obj.Schema.STATE("STOPPED"))
self.flow_obj.Flush()
self._CreateAuditEvent("HUNT_STOPPED")
def IsRunning(self):
"""Hunts are always considered to be running.
Note that consider the hunt itself to always be active, since we might have
child flows which are still in flight at the moment the hunt is paused or
stopped. We stull want to receive responses from these flows and process
them.
Returns:
True
"""
return True
def IsHuntStarted(self):
"""Is this hunt considered started?
This method is used to check if new clients should be processed by this
hunt. Note that child flow responses are always processed (As determined by
IsRunning() but new clients are not allowed to be scheduled unless the hunt
should be started.
Returns:
If a new client is allowed to be scheduled on this hunt.
"""
# Hunt is considered running in PAUSED or STARTED states.
state = self.flow_obj.Get(self.flow_obj.Schema.STATE)
if state in ["STOPPED", "PAUSED"]:
return False
# Hunt has expired.
if self.context.expires < rdfvalue.RDFDatetime().Now():
# Stop the hunt due to expiry.
self.Stop()
return False
return True
def OutstandingRequests(self):
# Lie about it to prevent us from being destroyed.
return 1
def CallState(self, messages=None, next_state="", client_id=None,
request_data=None, start_time=None):
"""This method is used to asynchronously schedule a new hunt state.
The state will be invoked in a later time and receive all the messages
we send.
Args:
messages: A list of rdfvalues to send. If the last one is not a
GrrStatus, we append an OK Status.
next_state: The state in this hunt to be invoked with the responses.
client_id: ClientURN to use in scheduled requests.
request_data: Any dict provided here will be available in the
RequestState protobuf. The Responses object maintains a
reference to this protobuf for use in the execution of the
state method. (so you can access this data by
responses.request).
start_time: Schedule the state at this time. This delays notification
and messages for processing into the future.
Raises:
ValueError: on arguments error.
"""
if messages is None:
messages = []
if not next_state:
raise ValueError("next_state can't be empty.")
# Now we construct a special response which will be sent to the hunt
# flow. Randomize the request_id so we do not overwrite other messages in
# the queue.
request_state = rdfvalue.RequestState(id=utils.PRNG.GetULong(),
session_id=self.context.session_id,
client_id=client_id,
next_state=next_state)
if request_data:
request_state.data = rdfvalue.Dict().FromDict(request_data)
self.QueueRequest(request_state, timestamp=start_time)
# Add the status message if needed.
if not messages or not isinstance(messages[-1], rdfvalue.GrrStatus):
messages.append(rdfvalue.GrrStatus())
# Send all the messages
for i, payload in enumerate(messages):
if isinstance(payload, rdfvalue.RDFValue):
msg = rdfvalue.GrrMessage(
session_id=self.session_id, request_id=request_state.id,
response_id=1 + i,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload,
type=rdfvalue.GrrMessage.Type.MESSAGE)
if isinstance(payload, rdfvalue.GrrStatus):
msg.type = rdfvalue.GrrMessage.Type.STATUS
else:
raise flow_runner.FlowRunnerError("Bad message %s of type %s." %
(payload, type(payload)))
self.QueueResponse(msg, timestamp=start_time)
# Add the status message if needed.
if not messages or not isinstance(messages[-1], rdfvalue.GrrStatus):
messages.append(rdfvalue.GrrStatus())
# Notify the worker about it.
self.QueueNotification(session_id=self.session_id, timestamp=start_time)
class GRRHunt(flow.GRRFlow):
"""The GRR Hunt class."""
# Some common rules.
MATCH_WINDOWS = rdfvalue.ForemanAttributeRegex(attribute_name="System",
attribute_regex="Windows")
MATCH_LINUX = rdfvalue.ForemanAttributeRegex(attribute_name="System",
attribute_regex="Linux")
MATCH_DARWIN = rdfvalue.ForemanAttributeRegex(attribute_name="System",
attribute_regex="Darwin")
# TODO(user): this is deprecated (see ResultsOutputCollection, which
# is now used to work with hunt results).
DEPRECATED_RESULTS_QUEUE = rdfvalue.RDFURN("HR")
class SchemaCls(flow.GRRFlow.SchemaCls):
"""The schema for hunts.
This object stores the persistent information for the hunt.
"""
# TODO(user): remove as soon as there are no more active hunts
# storing client ids and errors in versioned attributes.
DEPRECATED_CLIENTS = aff4.Attribute("aff4:clients", rdfvalue.RDFURN,
"The list of clients this hunt was "
"run against.",
creates_new_object_version=False)
CLIENT_COUNT = aff4.Attribute("aff4:client_count", rdfvalue.RDFInteger,
"The total number of clients scheduled.",
versioned=False,
creates_new_object_version=False)
# TODO(user): remove as soon as there are no more active hunts
# storing client ids and errors in versioned attributes.
DEPRECATED_FINISHED = aff4.Attribute(
"aff4:finished", rdfvalue.RDFURN,
"The list of clients the hunt has completed on.",
creates_new_object_version=False)
# TODO(user): remove as soon as there are no more active hunts
# storing client ids and errors in versioned attributes.
DEPRECATED_ERRORS = aff4.Attribute(
"aff4:errors", rdfvalue.HuntError,
"The list of clients that returned an error.",
creates_new_object_version=False)
# TODO(user): remove as soon as there's no more potential need to
# migrate old logs
DEPRECATED_LOG = aff4.Attribute("aff4:result_log", rdfvalue.FlowLog,
"The log entries.",
creates_new_object_version=False)
# This needs to be kept out the args semantic value since must be updated
# without taking a lock on the hunt object.
STATE = aff4.Attribute(
"aff4:hunt_state", rdfvalue.RDFString,
"The state of this hunt Can be 'STOPPED', 'STARTED' or 'PAUSED'.",
versioned=False, lock_protected=False, default="PAUSED")
args_type = None
runner_cls = HuntRunner
def Initialize(self):
super(GRRHunt, self).Initialize()
# Hunts run in multiple threads so we need to protect access.
self.lock = threading.RLock()
self.processed_responses = False
if "r" in self.mode:
self.client_count = self.Get(self.Schema.CLIENT_COUNT)
@property
def logs_collection_urn(self):
return self.urn.Add("Logs")
@property
def all_clients_collection_urn(self):
return self.urn.Add("AllClients")
@property
def completed_clients_collection_urn(self):
return self.urn.Add("CompletedClients")
@property
def clients_errors_collection_urn(self):
return self.urn.Add("ErrorClients")
@property
def output_plugins_status_collection_urn(self):
return self.urn.Add("OutputPluginsStatus")
@property
def output_plugins_errors_collection_urn(self):
return self.urn.Add("OutputPluginsErrors")
@property
def creator(self):
return self.state.context.creator
def _AddObjectToCollection(self, obj, collection_urn):
aff4.PackedVersionedCollection.AddToCollection(
collection_urn, [obj], sync=False, token=self.token)
def _GetCollectionItems(self, collection_urn):
collection = aff4.FACTORY.Create(collection_urn,
"PackedVersionedCollection",
mode="r", token=self.token)
return collection.GenerateItems()
def RegisterClient(self, client_urn):
self._AddObjectToCollection(client_urn, self.all_clients_collection_urn)
def RegisterCompletedClient(self, client_urn):
self._AddObjectToCollection(client_urn,
self.completed_clients_collection_urn)
def RegisterClientError(self, client_id, log_message=None, backtrace=None):
error = rdfvalue.HuntError(client_id=client_id,
backtrace=backtrace)
if log_message:
error.log_message = utils.SmartUnicode(log_message)
self._AddObjectToCollection(error, self.clients_errors_collection_urn)
@flow.StateHandler()
def RunClient(self, client_id):
"""This method runs the hunt on a specific client.
Note that this method holds a lock on the hunt object and runs in the main
thread. It is safe to access any hunt parameters from here.
Args:
client_id: The new client assigned to this hunt.
"""
@classmethod
def StartHunt(cls, args=None, runner_args=None, **kwargs):
"""This class method creates new hunts."""
# Build the runner args from the keywords.
if runner_args is None:
runner_args = HuntRunnerArgs()
cls.FilterArgsFromSemanticProtobuf(runner_args, kwargs)
# Is the required flow a known flow?
if (runner_args.hunt_name not in cls.classes and
not aff4.issubclass(cls.classes[runner_args.hunt_name], GRRHunt)):
raise RuntimeError("Unable to locate hunt %s" % runner_args.hunt_name)
# Make a new hunt object and initialize its runner.
hunt_obj = aff4.FACTORY.Create(None, runner_args.hunt_name,
mode="w", token=runner_args.token)
# Hunt is called using keyword args. We construct an args proto from the
# kwargs..
if hunt_obj.args_type and args is None:
args = hunt_obj.args_type()
cls.FilterArgsFromSemanticProtobuf(args, kwargs)
if hunt_obj.args_type and not isinstance(args, hunt_obj.args_type):
raise RuntimeError("Hunt args must be instance of %s" %
hunt_obj.args_type)
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartHunt: %s" % kwargs)
# Store the hunt args in the state.
hunt_obj.state.Register("args", args)
# Hunts are always created in the paused state. The runner method Start
# should be called to start them.
hunt_obj.Set(hunt_obj.Schema.STATE("PAUSED"))
runner = hunt_obj.CreateRunner(runner_args=runner_args)
# Allow the hunt to do its own initialization.
runner.RunStateMethod("Start")
hunt_obj.Flush()
try:
flow_name = args.flow_runner_args.flow_name
except AttributeError:
flow_name = ""
event = rdfvalue.AuditEvent(user=runner_args.token.username,
action="HUNT_CREATED", urn=hunt_obj.urn,
flow_name=flow_name,
description=runner_args.description)
flow.Events.PublishEvent("Audit", event, token=runner_args.token)
return hunt_obj
@classmethod
def StartClients(cls, hunt_id, client_ids, token=None):
"""This method is called by the foreman for each client it discovers.
Note that this function is performance sensitive since it is called by the
foreman for every client which needs to be scheduled.
Args:
hunt_id: The hunt to schedule.
client_ids: List of clients that should be added to the hunt.
token: An optional access token to use.
"""
token = token or access_control.ACLToken(username="Hunt", reason="hunting")
with queue_manager.QueueManager(token=token) as flow_manager:
for client_id in client_ids:
# Now we construct a special response which will be sent to the hunt
# flow. Randomize the request_id so we do not overwrite other messages
# in the queue.
state = rdfvalue.RequestState(id=utils.PRNG.GetULong(),
session_id=hunt_id,
client_id=client_id,
next_state="AddClient")
# Queue the new request.
flow_manager.QueueRequest(hunt_id, state)
# Send a response.
msg = rdfvalue.GrrMessage(
session_id=hunt_id,
request_id=state.id, response_id=1,
auth_state=rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED,
type=rdfvalue.GrrMessage.Type.STATUS,
payload=rdfvalue.GrrStatus())
flow_manager.QueueResponse(hunt_id, msg)
# And notify the worker about it.
flow_manager.QueueNotification(session_id=hunt_id)
def Run(self):
"""A shortcut method for starting the hunt."""
self.GetRunner().Start()
def Pause(self):
"""A shortcut method for pausing the hunt."""
self.GetRunner().Pause()
def Stop(self):
"""A shortcut method for stopping the hunt."""
self.GetRunner().Stop()
def AddResultsToCollection(self, responses, client_id):
if responses.success:
with self.lock:
self.processed_responses = True
msgs = [rdfvalue.GrrMessage(payload=response, source=client_id)
for response in responses]
aff4.ResultsOutputCollection.AddToCollection(
self.state.context.results_collection_urn, msgs,
sync=True, token=self.token)
# Update stats.
stats.STATS.IncrementCounter("hunt_results_added",
delta=len(msgs))
else:
self.LogClientError(client_id, log_message=utils.SmartStr(
responses.status))
def Save(self):
if self.state and self.processed_responses:
with self.lock:
# TODO(user): remove when old-style hunt results are no longer
# supported.
if (self.state.context.results_collection is not None and
not isinstance(self.state.context.results_collection,
aff4.ResultsOutputCollection)):
data_store.DB.Set(self.DEPRECATED_RESULTS_QUEUE, self.urn,
rdfvalue.RDFDatetime().Now(),
replace=True, token=self.token)
super(GRRHunt, self).Save()
def CallFlow(self, flow_name=None, next_state=None, request_data=None,
client_id=None, **kwargs):
"""Create a new child flow from a hunt."""
base_session_id = None
if client_id:
# The flow is stored in the hunt namespace,
base_session_id = self.urn.Add(client_id.Basename())
# Actually start the new flow.
# We need to pass the logs_collection_urn here rather than in __init__ to
# wait for the hunt urn to be created.
child_urn = self.runner.CallFlow(
flow_name=flow_name, next_state=next_state,
base_session_id=base_session_id, client_id=client_id,
request_data=request_data, logs_collection_urn=self.logs_collection_urn,
**kwargs)
if client_id:
# But we also create a symlink to it from the client's namespace.
hunt_link_urn = client_id.Add("flows").Add(
"%s:hunt" % (self.urn.Basename()))
hunt_link = aff4.FACTORY.Create(hunt_link_urn, "AFF4Symlink",
token=self.token)
hunt_link.Set(hunt_link.Schema.SYMLINK_TARGET(child_urn))
hunt_link.Close()
return child_urn
def Name(self):
return self.state.context.args.hunt_name
def CheckClient(self, client):
return self.CheckRulesForClient(client, self.state.context.rules)
@classmethod
def CheckRulesForClient(cls, client, rules):
for rule in rules:
if cls.CheckRule(client, rule):
return True
return False
@classmethod
def CheckRule(cls, client, rule):
try:
for r in rule.regex_rules:
if r.path != "/":
continue
attribute = aff4.Attribute.NAMES[r.attribute_name]
value = utils.SmartStr(client.Get(attribute))
if not re.search(r.attribute_regex, value):
return False
for i in rule.integer_rules:
if i.path != "/":
continue
value = int(client.Get(aff4.Attribute.NAMES[i.attribute_name]))
op = i.operator
if op == rdfvalue.ForemanAttributeInteger.Operator.LESS_THAN:
if value >= i.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.GREATER_THAN:
if value <= i.value:
return False
elif op == rdfvalue.ForemanAttributeInteger.Operator.EQUAL:
if value != i.value:
return False
else:
# Unknown operator.
return False
return True
except (KeyError, ValueError):
return False
def TestRules(self):
"""This quickly verifies the ruleset.
This applies the ruleset to all clients in the db to see how many of them
would match the current rules.
"""
root = aff4.FACTORY.Open(aff4.ROOT_URN, token=self.token)
display_warning = False
for rule in self.rules:
for r in rule.regex_rules:
if r.path != "/":
display_warning = True
for r in rule.integer_rules:
if r.path != "/":
display_warning = True
if display_warning:
logging.info("One or more rules use a relative path under the client, "
"this is not supported so your count may be off.")
all_clients = 0
num_matching_clients = 0
matching_clients = []
for client in root.OpenChildren(chunk_limit=100000):
if client.Get(client.Schema.TYPE) == "VFSGRRClient":
all_clients += 1
if self.CheckClient(client):
num_matching_clients += 1
matching_clients.append(utils.SmartUnicode(client.urn))
logging.info("Out of %d checked clients, %d matched the given rule set.",
all_clients, num_matching_clients)
if matching_clients:
logging.info("Example matches: %s", str(matching_clients[:3]))
def SetDescription(self, description=None):
if description:
self.state.context.args.description = description
else:
try:
flow_name = self.state.args.flow_runner_args.flow_name
except AttributeError:
flow_name = ""
self.state.context.args.description = flow_name
@flow.StateHandler()
def Start(self):
"""Initializes this hunt from arguments."""
self.state.context.Register("results_metadata_urn",
self.urn.Add("ResultsMetadata"))
self.state.context.Register("results_collection_urn",
self.urn.Add("Results"))
self.state.context.Register("output_plugins_base_urn",
self.urn.Add("Results"))
# TODO(user): Remove as soon as old style hunts (ones that use
# RDFValueCollection) are removed.
self.state.context.Register("results_collection", None)
with aff4.FACTORY.Create(
self.state.context.results_metadata_urn, "HuntResultsMetadata",
mode="rw", token=self.token) as results_metadata:
state = rdfvalue.FlowState()
try:
plugins_descriptors = self.state.args.output_plugins
except AttributeError:
plugins_descriptors = []
for index, plugin_descriptor in enumerate(plugins_descriptors):
output_base_urn = self.state.context.output_plugins_base_urn.Add(
plugin_descriptor.plugin_name)
plugin_class = plugin_descriptor.GetPluginClass()
plugin_obj = plugin_class(self.state.context.results_collection_urn,
output_base_urn=output_base_urn,
args=plugin_descriptor.plugin_args,
token=self.token)
state.Register("%s_%d" % (plugin_descriptor.plugin_name, index),
(plugin_descriptor, plugin_obj.state))
results_metadata.Set(results_metadata.Schema.OUTPUT_PLUGINS(state))
# Create results collection.
with aff4.FACTORY.Create(
self.state.context.results_collection_urn, "ResultsOutputCollection",
mode="w", token=self.token) as results_collection:
results_collection.Set(results_collection.Schema.RESULTS_SOURCE,
self.urn)
# Create all other hunt-related collections.
for urn in [self.logs_collection_urn,
self.all_clients_collection_urn,
self.completed_clients_collection_urn,
self.clients_errors_collection_urn,
self.output_plugins_status_collection_urn,
self.output_plugins_errors_collection_urn]:
with aff4.FACTORY.Create(urn, "PackedVersionedCollection", mode="w",
token=self.token):
pass
if not self.state.context.args.description:
self.SetDescription()
@flow.StateHandler()
def End(self):
"""Final state."""
def MarkClientDone(self, client_id):
"""Adds a client_id to the list of completed tasks."""
self.RegisterCompletedClient(client_id)
if self.state.context.args.notification_event:
status = rdfvalue.HuntNotification(session_id=self.session_id,
client_id=client_id)
self.Publish(self.state.context.args.notification_event, status)
def LogClientError(self, client_id, log_message=None, backtrace=None):
"""Logs an error for a client."""
self.RegisterClientError(client_id, log_message=log_message,
backtrace=backtrace)
def ProcessClientResourcesStats(self, client_id, status):
"""Process status message from a client and update the stats.
This method may be implemented in the subclasses. It's called
once *per every hunt's state per every client*.
Args:
client_id: Client id.
status: Status returned from the client.
"""
def GetClientsCounts(self):
collections = aff4.FACTORY.MultiOpen(
[self.all_clients_collection_urn, self.completed_clients_collection_urn,
self.clients_errors_collection_urn],
aff4_type="PackedVersionedCollection", mode="r", token=self.token)
collections_dict = dict((coll.urn, coll) for coll in collections)
def CollectionLen(collection_urn):
if collection_urn in collections_dict:
return collections_dict[collection_urn].CalculateLength()
else:
return 0
all_clients_count = CollectionLen(self.all_clients_collection_urn)
completed_clients_count = CollectionLen(
self.completed_clients_collection_urn)
clients_errors_count = CollectionLen(self.clients_errors_collection_urn)
return all_clients_count, completed_clients_count, clients_errors_count
def GetClientsErrors(self, client_id=None):
errors = self._GetCollectionItems(self.clients_errors_collection_urn)
if not client_id:
return errors
else:
return [error for error in errors if error.client_id == client_id]
def GetClients(self):
return set(self._GetCollectionItems(self.all_clients_collection_urn))
def GetClientsByStatus(self):
"""Get all the clients in a dict of {status: [client_list]}."""
started = set(self._GetCollectionItems(self.all_clients_collection_urn))
completed = set(self._GetCollectionItems(
self.completed_clients_collection_urn))
outstanding = started - completed
return {"STARTED": sorted(started),
"COMPLETED": sorted(completed),
"OUTSTANDING": sorted(outstanding)}
def GetClientStates(self, client_list, client_chunk=50):
"""Take in a client list and return dicts with their age and hostname."""
for client_group in utils.Grouper(client_list, client_chunk):
for fd in aff4.FACTORY.MultiOpen(client_group, mode="r",
aff4_type="VFSGRRClient",
token=self.token):
result = {}
result["age"] = fd.Get(fd.Schema.PING)
result["hostname"] = fd.Get(fd.Schema.HOSTNAME)
yield (fd.urn, result)
def GetLog(self, client_id=None):
log_vals = aff4.FACTORY.Create(
self.logs_collection_urn, mode="r",
aff4_type="PackedVersionedCollection", token=self.token)
if not client_id:
return log_vals
else:
return [val for val in log_vals if val.client_id == client_id]
class HuntInitHook(registry.InitHook):
pre = ["StatsInit"]
def RunOnce(self):
"""Register standard hunt-related stats."""
stats.STATS.RegisterCounterMetric("hunt_results_added")
|
apache-2.0
|
yakovenkodenis/rethinkdb
|
test/common/test_exceptions.py
|
37
|
1071
|
#!/usr/bin/env python
'''Collection of the shared exceptions used in testing'''
class TestingFrameworkException(Exception):
'''Generic exception for this testing framework, mostly a base class for others'''
_message = 'A generic testing framework error occured'
detail = None
debugInfo = None
def __init__(self, detail=None, debugInfo=None):
if detail is not None:
self.detail = str(detail)
if debugInfo is not None:
if hasattr(debugInfo, 'read'):
debugInfo.seek(0)
self.debugInfo = debugInfo.read()
else:
self.debugInfo = debugInfo
def __str__(self):
if self.detail is not None:
return "%s: %s" % (self.message(), self.detail)
else:
return self.message()
def message(self):
return self._message
class NotBuiltException(TestingFrameworkException):
'''Exception to raise when an item that was expected to be built was not'''
_message = 'An item was not built'
|
agpl-3.0
|
darith27/wagtail
|
wagtail/tests/testapp/migrations/0001_initial.py
|
22
|
18850
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
import modelcluster.contrib.taggit
import wagtail.wagtailimages.models
import wagtail.wagtailadmin.taggable
import modelcluster.fields
import wagtail.wagtailcore.fields
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0013_update_golive_expire_help_text'),
('wagtaildocs', '0002_initial_data'),
('taggit', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailimages', '0005_make_filter_spec_unique'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('colour', models.CharField(max_length=255)),
('advert', models.ForeignKey(to='tests.Advert', related_name='+')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BusinessChild',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessSubIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CustomImageWithAdminFormFields',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('file', models.ImageField(width_field='width', height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='File')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='taggit.TaggedItem', help_text=None)),
('uploaded_by_user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL, editable=False)),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='CustomImageWithoutAdminFormFields',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('file', models.ImageField(width_field='width', height_field='height', upload_to=wagtail.wagtailimages.models.get_upload_to, verbose_name='File')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='taggit.TaggedItem', help_text=None)),
('uploaded_by_user', models.ForeignKey(null=True, blank=True, to=settings.AUTH_USER_MODEL, editable=False)),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.wagtailadmin.taggable.TagSearchable),
),
migrations.CreateModel(
name='EventIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('date_from', models.DateField(verbose_name='Start date', null=True)),
('date_to', models.DateField(blank=True, help_text='Not required if event is on a single day', verbose_name='End date', null=True)),
('time_from', models.TimeField(blank=True, verbose_name='Start time', null=True)),
('time_to', models.TimeField(blank=True, verbose_name='End time', null=True)),
('audience', models.CharField(choices=[('public', 'Public'), ('private', 'Private')], max_length=255)),
('location', models.CharField(max_length=255)),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
('feed_image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('page', models.ForeignKey(to='tests.EventPage', help_text='more help text')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Surname')),
('image', models.ForeignKey(to='wagtailimages.Image', null=True, related_name='+', blank=True, on_delete=django.db.models.deletion.SET_NULL)),
('link_document', models.ForeignKey(to='wagtaildocs.Document', null=True, related_name='+', blank=True)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255)),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16)),
('required', models.BooleanField(default=True)),
('choices', models.CharField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', max_length=512)),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255)),
('help_text', models.CharField(blank=True, max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('to_address', models.CharField(blank=True, help_text='Optional - form submissions will be emailed to this address', max_length=255)),
('from_address', models.CharField(blank=True, max_length=255)),
('subject', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PageWithOldStyleRouteMethod',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SimplePage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('advert', models.ForeignKey(to='tests.Advert', help_text='help text')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StandardChild',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndex',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, primary_key=True, to='wagtailcore.Page', auto_created=True, serialize=False)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPageTag',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(to='tests.TaggedPage', related_name='tagged_items')),
('tag', models.ForeignKey(to='taggit.Tag', related_name='tests_taggedpagetag_items')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='taggedpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(verbose_name='Tags', to='taggit.Tag', blank=True, through='tests.TaggedPageTag', help_text='A comma-separated list of tags.'),
preserve_default=True,
),
migrations.AddField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(to='wagtailcore.Page', help_text='help text'),
preserve_default=True,
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.FormPage', related_name='form_fields'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='speakers'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='related_links'),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(to='wagtailcore.Page', null=True, related_name='+', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(to='tests.EventPage', related_name='carousel_items'),
preserve_default=True,
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(to='wagtailcore.Page', related_name='advert_placements'),
preserve_default=True,
),
]
|
bsd-3-clause
|
daniponi/django
|
tests/servers/tests.py
|
33
|
6903
|
# -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import contextlib
import errno
import os
import socket
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase, override_settings
from django.utils._os import upath
from django.utils.http import urlencode
from django.utils.six import text_type
from django.utils.six.moves.urllib.error import HTTPError
from django.utils.six.moves.urllib.request import urlopen
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
@override_settings(ROOT_URLCONF='servers.urls', **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
'servers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
]
fixtures = ['testdata.json']
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', socket.error)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
@classmethod
def tearDownClass(cls):
# skip it, as setUpClass doesn't call its parent either
pass
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
finally:
super(LiveServerAddress, cls).tearDownClass()
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], text_type)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/example_view/')) as f:
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/static/example_static_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_no_collectstatic_emulation(self):
"""
Test that LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
try:
self.urlopen('/static/another_app/another_app_static_file.txt')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response (got %d)' % err.code)
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/media/example_media_file.txt')) as f:
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
with contextlib.closing(self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
with contextlib.closing(self.urlopen('/model_view/')) as f:
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
class LiveServerPort(LiveServerBase):
def test_port_bind(self):
"""
Each LiveServerTestCase binds to a unique port or fails to start a
server thread when run concurrently (#26011).
"""
TestCase = type(str("TestCase"), (LiveServerBase,), {})
try:
TestCase.setUpClass()
except socket.error as e:
if e.errno == errno.EADDRINUSE:
# We're out of ports, LiveServerTestCase correctly fails with
# a socket error.
return
# Unexpected error.
raise
try:
# We've acquired a port, ensure our server threads acquired
# different addresses.
self.assertNotEqual(
self.live_server_url, TestCase.live_server_url,
"Acquired duplicate server addresses for server threads: %s" % self.live_server_url
)
finally:
TestCase.tearDownClass()
|
bsd-3-clause
|
gpodder/mygpo
|
mygpo/api/simple.py
|
1
|
11069
|
import json
import string
from functools import wraps
from django.shortcuts import render
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.cache import cache_page
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from django.contrib.sites.requests import RequestSite
from django.utils.translation import gettext as _
from mygpo.api.basic_auth import require_valid_user, check_username
from mygpo.api.backend import get_device
from mygpo.podcasts.models import Podcast
from mygpo.api.opml import Exporter, Importer
from mygpo.api.httpresponse import JsonResponse
from mygpo.directory.models import ExamplePodcast
from mygpo.api.advanced.directory import podcast_data
from mygpo.subscriptions import get_subscribed_podcasts
from mygpo.subscriptions.tasks import subscribe, unsubscribe
from mygpo.directory.search import search_podcasts
from mygpo.decorators import allowed_methods, cors_origin
from mygpo.utils import parse_range, normalize_feed_url
import logging
logger = logging.getLogger(__name__)
ALLOWED_FORMATS = ("txt", "opml", "json", "jsonp", "xml")
def check_format(fn):
@wraps(fn)
def tmp(request, format, *args, **kwargs):
if format not in ALLOWED_FORMATS:
return HttpResponseBadRequest("Invalid format")
return fn(request, *args, format=format, **kwargs)
return tmp
@csrf_exempt
@require_valid_user
@check_username
@check_format
@never_cache
@allowed_methods(["GET", "PUT", "POST"])
@cors_origin()
def subscriptions(request, username, device_uid, format):
user_agent = request.META.get("HTTP_USER_AGENT", "")
if request.method == "GET":
title = _("%(username)s's Subscription List") % {"username": username}
subscriptions = get_subscriptions(request.user, device_uid, user_agent)
return format_podcast_list(
subscriptions, format, title, jsonp_padding=request.GET.get("jsonp")
)
elif request.method in ("PUT", "POST"):
try:
body = request.body.decode("utf-8")
subscriptions = parse_subscription(body, format)
except ValueError as e:
return HttpResponseBadRequest("Unable to parse POST data: %s" % str(e))
return set_subscriptions(subscriptions, request.user, device_uid, user_agent)
@csrf_exempt
@require_valid_user
@check_username
@check_format
@never_cache
@allowed_methods(["GET"])
@cors_origin()
def all_subscriptions(request, username, format):
try:
scale = int(request.GET.get("scale_logo", 64))
except (TypeError, ValueError):
return HttpResponseBadRequest("scale_logo has to be a numeric value")
if scale not in range(1, 257):
return HttpResponseBadRequest("scale_logo has to be a number from 1 to 256")
subscriptions = get_subscribed_podcasts(request.user)
title = _("%(username)s's Subscription List") % {"username": username}
domain = RequestSite(request).domain
p_data = lambda p: podcast_data(p, domain, scale)
return format_podcast_list(
subscriptions,
format,
title,
json_map=p_data,
xml_template="podcasts.xml",
request=request,
)
def format_podcast_list(
obj_list,
format,
title,
get_podcast=None,
json_map=lambda x: x.url,
jsonp_padding=None,
xml_template=None,
request=None,
template_args={},
):
"""
Formats a list of podcasts for use in a API response
obj_list is a list of podcasts or objects that contain podcasts
format is one if txt, opml or json
title is a label of the list
if obj_list is a list of objects containing podcasts, get_podcast is the
function used to get the podcast out of the each of these objects
json_map is a function returning the contents of an object (from obj_list)
that should be contained in the result (only used for format='json')
"""
def default_get_podcast(p):
return p
get_podcast = get_podcast or default_get_podcast
if format == "txt":
podcasts = map(get_podcast, obj_list)
s = "\n".join([p.url for p in podcasts] + [""])
return HttpResponse(s, content_type="text/plain")
elif format == "opml":
podcasts = map(get_podcast, obj_list)
exporter = Exporter(title)
opml = exporter.generate(podcasts)
return HttpResponse(opml, content_type="text/xml")
elif format == "json":
objs = list(map(json_map, obj_list))
return JsonResponse(objs)
elif format == "jsonp":
ALLOWED_FUNCNAME = string.ascii_letters + string.digits + "_"
if not jsonp_padding:
return HttpResponseBadRequest(
"For a JSONP response, specify the name of the callback function in the jsonp parameter"
)
if any(x not in ALLOWED_FUNCNAME for x in jsonp_padding):
return HttpResponseBadRequest(
"JSONP padding can only contain the characters %(char)s"
% {"char": ALLOWED_FUNCNAME}
)
objs = list(map(json_map, obj_list))
return JsonResponse(objs, jsonp_padding=jsonp_padding)
elif format == "xml":
if None in (xml_template, request):
return HttpResponseBadRequest("XML is not a valid format for this request")
podcasts = map(json_map, obj_list)
template_args.update({"podcasts": podcasts})
return render(
request, xml_template, template_args, content_type="application/xml"
)
else:
return None
def get_subscriptions(user, device_uid, user_agent=None):
device = get_device(user, device_uid, user_agent)
return device.get_subscribed_podcasts()
def parse_subscription(raw_post_data, format):
""" Parses the data according to the format """
if format == "txt":
urls = raw_post_data.split("\n")
elif format == "opml":
begin = raw_post_data.find("<?xml")
end = raw_post_data.find("</opml>") + 7
i = Importer(content=raw_post_data[begin:end])
urls = [p["url"] for p in i.items]
elif format == "json":
begin = raw_post_data.find("[")
end = raw_post_data.find("]") + 1
urls = json.loads(raw_post_data[begin:end])
if not isinstance(urls, list):
raise ValueError("A list of feed URLs was expected")
else:
return []
urls = filter(None, urls)
urls = list(map(normalize_feed_url, urls))
return urls
def set_subscriptions(urls, user, device_uid, user_agent):
# remove empty urls
urls = list(filter(None, (u.strip() for u in urls)))
device = get_device(user, device_uid, user_agent, undelete=True)
subscriptions = dict((p.url, p) for p in device.get_subscribed_podcasts())
new = [p for p in urls if p not in subscriptions.keys()]
rem = [p for p in subscriptions.keys() if p not in urls]
remove_podcasts = Podcast.objects.filter(urls__url__in=rem)
for podcast in remove_podcasts:
unsubscribe(podcast.pk, user.pk, device.uid)
for url in new:
podcast = Podcast.objects.get_or_create_for_url(url).object
subscribe(podcast.pk, user.pk, device.uid, url)
# Only an empty response is a successful response
return HttpResponse("", content_type="text/plain")
@check_format
@allowed_methods(["GET"])
@cache_page(60 * 60)
@cors_origin()
def toplist(request, count, format):
count = parse_range(count, 1, 100, 100)
entries = Podcast.objects.all().toplist()[:count]
domain = RequestSite(request).domain
try:
scale = int(request.GET.get("scale_logo", 64))
except (TypeError, ValueError):
return HttpResponseBadRequest("scale_logo has to be a numeric value")
if scale not in range(1, 257):
return HttpResponseBadRequest("scale_logo has to be a number from 1 to 256")
def json_map(t):
podcast = t
p = podcast_data(podcast, domain, scale)
return p
title = _("gpodder.net - Top %(count)d") % {"count": len(entries)}
return format_podcast_list(
entries,
format,
title,
get_podcast=lambda t: t,
json_map=json_map,
jsonp_padding=request.GET.get("jsonp", ""),
xml_template="podcasts.xml",
request=request,
)
@check_format
@cache_page(60 * 60)
@allowed_methods(["GET"])
@cors_origin()
def search(request, format):
NUM_RESULTS = 20
query = request.GET.get("q", "")
try:
scale = int(request.GET.get("scale_logo", 64))
except (TypeError, ValueError):
return HttpResponseBadRequest("scale_logo has to be a numeric value")
if scale not in range(1, 257):
return HttpResponseBadRequest("scale_logo has to be a number from 1 to 256")
if not query:
return HttpResponseBadRequest("/search.opml|txt|json?q={query}")
results = search_podcasts(query)[:NUM_RESULTS]
title = _("gpodder.net - Search")
domain = RequestSite(request).domain
p_data = lambda p: podcast_data(p, domain, scale)
return format_podcast_list(
results,
format,
title,
json_map=p_data,
jsonp_padding=request.GET.get("jsonp", ""),
xml_template="podcasts.xml",
request=request,
)
@require_valid_user
@check_format
@never_cache
@allowed_methods(["GET"])
@cors_origin()
def suggestions(request, count, format):
count = parse_range(count, 1, 100, 100)
user = request.user
suggestions = Podcast.objects.filter(
podcastsuggestion__suggested_to=user, podcastsuggestion__deleted=False
)
title = _("gpodder.net - %(count)d Suggestions") % {"count": len(suggestions)}
domain = RequestSite(request).domain
p_data = lambda p: podcast_data(p, domain)
return format_podcast_list(
suggestions,
format,
title,
json_map=p_data,
jsonp_padding=request.GET.get("jsonp"),
)
@check_format
@allowed_methods(["GET"])
@cache_page(60 * 60)
@cors_origin()
def example_podcasts(request, format):
podcasts = cache.get("example-podcasts", None)
try:
scale = int(request.GET.get("scale_logo", 64))
except (TypeError, ValueError):
return HttpResponseBadRequest("scale_logo has to be a numeric value")
if scale not in range(1, 257):
return HttpResponseBadRequest("scale_logo has to be a number from 1 to 256")
if not podcasts:
podcasts = list(ExamplePodcast.objects.get_podcasts())
cache.set("example-podcasts", podcasts)
podcast_ad = Podcast.objects.get_advertised_podcast()
if podcast_ad:
podcasts = [podcast_ad] + podcasts
title = "gPodder Podcast Directory"
domain = RequestSite(request).domain
p_data = lambda p: podcast_data(p, domain, scale)
return format_podcast_list(
podcasts,
format,
title,
json_map=p_data,
xml_template="podcasts.xml",
request=request,
)
|
agpl-3.0
|
dkodnik/Ant
|
addons/auth_openid/controllers/main.py
|
52
|
10261
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
import getpass
import urllib
import werkzeug.urls
import werkzeug.exceptions
from openid import oidutil
from openid.store import filestore
from openid.consumer import consumer
from openid.cryptutil import randomString
from openid.extensions import ax, sreg
import openerp
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
from openerp.addons.web.controllers.main import login_and_redirect, set_cookie_and_redirect
from .. import utils
_logger = logging.getLogger(__name__)
oidutil.log = _logger.debug
def get_system_user():
"""Return system user info string, such as USERNAME-EUID"""
try:
info = getpass.getuser()
except ImportError:
if os.name == 'nt':
# when there is no 'USERNAME' in environment, getpass.getuser()
# fail when trying to import 'pwd' module - which is unix only.
# In that case we have to fallback to real win32 API.
import win32api
info = win32api.GetUserName()
else:
raise
euid = getattr(os, 'geteuid', None) # Non available on some platforms
if euid is not None:
info = '%s-%d' % (info, euid())
return info
_storedir = os.path.join(tempfile.gettempdir(),
'openerp-auth_openid-%s-store' % get_system_user())
class GoogleAppsAwareConsumer(consumer.GenericConsumer):
def complete(self, message, endpoint, return_to):
if message.getOpenIDNamespace() == consumer.OPENID2_NS:
server_url = message.getArg(consumer.OPENID2_NS, 'op_endpoint', '')
if server_url.startswith('https://www.google.com/a/'):
assoc_handle = message.getArg(consumer.OPENID_NS, 'assoc_handle')
assoc = self.store.getAssociation(server_url, assoc_handle)
if assoc:
# update fields
for attr in ['claimed_id', 'identity']:
value = message.getArg(consumer.OPENID2_NS, attr, '')
value = 'https://www.google.com/accounts/o8/user-xrds?uri=%s' % urllib.quote_plus(value)
message.setArg(consumer.OPENID2_NS, attr, value)
# now, resign the message
message.delArg(consumer.OPENID2_NS, 'sig')
message.delArg(consumer.OPENID2_NS, 'signed')
message = assoc.signMessage(message)
return super(GoogleAppsAwareConsumer, self).complete(message, endpoint, return_to)
class OpenIDController(openerp.addons.web.http.Controller):
_cp_path = '/auth_openid/login'
_store = filestore.FileOpenIDStore(_storedir)
_REQUIRED_ATTRIBUTES = ['email']
_OPTIONAL_ATTRIBUTES = 'nickname fullname postcode country language timezone'.split()
def _add_extensions(self, request):
"""Add extensions to the request"""
sreg_request = sreg.SRegRequest(required=self._REQUIRED_ATTRIBUTES,
optional=self._OPTIONAL_ATTRIBUTES)
request.addExtension(sreg_request)
ax_request = ax.FetchRequest()
for alias in self._REQUIRED_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=True, alias=alias))
for alias in self._OPTIONAL_ATTRIBUTES:
uri = utils.SREG2AX[alias]
ax_request.add(ax.AttrInfo(uri, required=False, alias=alias))
request.addExtension(ax_request)
def _get_attributes_from_success_response(self, success_response):
attrs = {}
all_attrs = self._REQUIRED_ATTRIBUTES + self._OPTIONAL_ATTRIBUTES
sreg_resp = sreg.SRegResponse.fromSuccessResponse(success_response)
if sreg_resp:
for attr in all_attrs:
value = sreg_resp.get(attr)
if value is not None:
attrs[attr] = value
ax_resp = ax.FetchResponse.fromSuccessResponse(success_response)
if ax_resp:
for attr in all_attrs:
value = ax_resp.getSingle(utils.SREG2AX[attr])
if value is not None:
attrs[attr] = value
return attrs
def _get_realm(self, req):
return req.httprequest.host_url
@openerp.addons.web.http.httprequest
def verify_direct(self, req, db, url):
result = self._verify(req, db, url)
if 'error' in result:
return werkzeug.exceptions.BadRequest(result['error'])
if result['action'] == 'redirect':
return werkzeug.utils.redirect(result['value'])
return result['value']
@openerp.addons.web.http.jsonrequest
def verify(self, req, db, url):
return self._verify(req, db, url)
def _verify(self, req, db, url):
redirect_to = werkzeug.urls.Href(req.httprequest.host_url + 'auth_openid/login/process')(session_id=req.session_id)
realm = self._get_realm(req)
session = dict(dbname=db, openid_url=url) # TODO add origin page ?
oidconsumer = consumer.Consumer(session, self._store)
try:
request = oidconsumer.begin(url)
except consumer.DiscoveryFailure, exc:
fetch_error_string = 'Error in discovery: %s' % (str(exc[0]),)
return {'error': fetch_error_string, 'title': 'OpenID Error'}
if request is None:
return {'error': 'No OpenID services found', 'title': 'OpenID Error'}
req.session.openid_session = session
self._add_extensions(request)
if request.shouldSendRedirect():
redirect_url = request.redirectURL(realm, redirect_to)
return {'action': 'redirect', 'value': redirect_url, 'session_id': req.session_id}
else:
form_html = request.htmlMarkup(realm, redirect_to)
return {'action': 'post', 'value': form_html, 'session_id': req.session_id}
@openerp.addons.web.http.httprequest
def process(self, req, **kw):
session = getattr(req.session, 'openid_session', None)
if not session:
return set_cookie_and_redirect(req, '/')
oidconsumer = consumer.Consumer(session, self._store, consumer_class=GoogleAppsAwareConsumer)
query = req.httprequest.args
info = oidconsumer.complete(query, req.httprequest.base_url)
display_identifier = info.getDisplayIdentifier()
session['status'] = info.status
if info.status == consumer.SUCCESS:
dbname = session['dbname']
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
Modules = registry.get('ir.module.module')
installed = Modules.search_count(cr, SUPERUSER_ID, ['&', ('name', '=', 'auth_openid'), ('state', '=', 'installed')]) == 1
if installed:
Users = registry.get('res.users')
#openid_url = info.endpoint.canonicalID or display_identifier
openid_url = session['openid_url']
attrs = self._get_attributes_from_success_response(info)
attrs['openid_url'] = openid_url
session['attributes'] = attrs
openid_email = attrs.get('email', False)
domain = []
if openid_email:
domain += ['|', ('openid_email', '=', False)]
domain += [('openid_email', '=', openid_email)]
domain += [('openid_url', '=', openid_url), ('active', '=', True)]
ids = Users.search(cr, SUPERUSER_ID, domain)
assert len(ids) < 2
if ids:
user_id = ids[0]
login = Users.browse(cr, SUPERUSER_ID, user_id).login
key = randomString(utils.KEY_LENGTH, '0123456789abcdef')
Users.write(cr, SUPERUSER_ID, [user_id], {'openid_key': key})
# TODO fill empty fields with the ones from sreg/ax
cr.commit()
return login_and_redirect(req, dbname, login, key)
session['message'] = 'This OpenID identifier is not associated to any active users'
elif info.status == consumer.SETUP_NEEDED:
session['message'] = info.setup_url
elif info.status == consumer.FAILURE and display_identifier:
fmt = "Verification of %s failed: %s"
session['message'] = fmt % (display_identifier, info.message)
else: # FAILURE
# Either we don't understand the code or there is no
# openid_url included with the error. Give a generic
# failure message. The library should supply debug
# information in a log.
session['message'] = 'Verification failed.'
return set_cookie_and_redirect(req, '/#action=login&loginerror=1')
@openerp.addons.web.http.jsonrequest
def status(self, req):
session = getattr(req.session, 'openid_session', {})
return {'status': session.get('status'), 'message': session.get('message')}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mitocw/edx-platform
|
openedx/core/lib/api/permissions.py
|
4
|
5499
|
"""
API library for Django REST Framework permissions-oriented workflows
"""
from django.conf import settings
from django.http import Http404
from edx_django_utils.monitoring import set_custom_metric
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from rest_condition import C
from rest_framework import permissions
from edx_rest_framework_extensions.permissions import IsStaff, IsUserInUrl
from openedx.core.lib.log_utils import audit_log
from student.roles import CourseInstructorRole, CourseStaffRole
class ApiKeyHeaderPermission(permissions.BasePermission):
"""
Django REST Framework permissions class used to manage API Key integrations
Deprecated
"""
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
Allow the request if and only if settings.EDX_API_KEY is set and
the X-Edx-Api-Key HTTP header is present in the request and
matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
if api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key:
audit_log("ApiKeyHeaderPermission used",
path=request.path,
ip=request.META.get("REMOTE_ADDR"))
set_custom_metric('deprecated_api_key_header', True)
return True
return False
class ApiKeyHeaderPermissionIsAuthenticated(ApiKeyHeaderPermission, permissions.IsAuthenticated):
"""
Allow someone to access the view if they have the API key OR they are authenticated.
See ApiKeyHeaderPermission for more information how the API key portion is implemented.
"""
def has_permission(self, request, view):
# TODO We can optimize this later on when we know which of these methods is used more often.
api_permissions = ApiKeyHeaderPermission.has_permission(self, request, view)
is_authenticated_permissions = permissions.IsAuthenticated.has_permission(self, request, view)
return api_permissions or is_authenticated_permissions
class IsCourseStaffInstructor(permissions.BasePermission):
"""
Permission to check that user is a course instructor or staff of
a master course given a course object or the user is a coach of
the course itself.
"""
def has_object_permission(self, request, view, obj):
return (hasattr(request, 'user') and
# either the user is a staff or instructor of the master course
(hasattr(obj, 'course_id') and
(CourseInstructorRole(obj.course_id).has_user(request.user) or
CourseStaffRole(obj.course_id).has_user(request.user))) or
# or it is a safe method and the user is a coach on the course object
(request.method in permissions.SAFE_METHODS
and hasattr(obj, 'coach') and obj.coach == request.user))
class IsMasterCourseStaffInstructor(permissions.BasePermission):
"""
Permission to check that user is instructor or staff of the master course.
"""
def has_permission(self, request, view):
"""
This method is assuming that a `master_course_id` parameter
is available in the request as a GET parameter, a POST parameter
or it is in the JSON payload included in the request.
The reason is because this permission class is going
to check if the user making the request is an instructor
for the specified course.
"""
master_course_id = (request.GET.get('master_course_id')
or request.POST.get('master_course_id')
or request.data.get('master_course_id'))
if master_course_id is not None:
try:
course_key = CourseKey.from_string(master_course_id)
except InvalidKeyError:
raise Http404()
return (hasattr(request, 'user') and
(CourseInstructorRole(course_key).has_user(request.user) or
CourseStaffRole(course_key).has_user(request.user)))
return False
class IsUserInUrlOrStaff(permissions.BasePermission):
def has_permission(self, request, view):
return C(IsStaff) | IsUserInUrl
class IsStaffOrReadOnly(permissions.BasePermission):
"""Permission that checks to see if the user is global or course
staff, permitting only read-only access if they are not.
"""
def has_object_permission(self, request, view, obj):
return (request.user.is_staff or
CourseStaffRole(obj.course_id).has_user(request.user) or
request.method in permissions.SAFE_METHODS)
class IsStaffOrOwner(permissions.BasePermission):
"""
Permission that allows access to admin users or the owner of an object.
The owner is considered the User object represented by obj.user.
"""
def has_object_permission(self, request, view, obj):
return request.user.is_staff or obj.user == request.user
def has_permission(self, request, view):
user = request.user
return user.is_staff \
or (user.username == request.GET.get('username')) \
or (user.username == getattr(request, 'data', {}).get('username')) \
or (user.username == getattr(request, 'data', {}).get('user')) \
or (user.username == getattr(view, 'kwargs', {}).get('username'))
|
agpl-3.0
|
duramato/SickRage
|
lib/guessit/language.py
|
25
|
11576
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import logging
from guessit import u
from guessit.textutils import find_words
from babelfish import Language, Country
import babelfish
from guessit.guess import Guess
__all__ = ['Language', 'UNDETERMINED',
'search_language', 'guess_language']
log = logging.getLogger(__name__)
UNDETERMINED = babelfish.Language('und')
SYN = {('und', None): ['unknown', 'inconnu', 'unk', 'un'],
('ell', None): ['gr', 'greek'],
('spa', None): ['esp', 'español'],
('fra', None): ['français', 'vf', 'vff', 'vfi'],
('swe', None): ['se'],
('por', 'BR'): ['po', 'pb', 'pob', 'br', 'brazilian'],
('cat', None): ['català'],
('ces', None): ['cz'],
('ukr', None): ['ua'],
('zho', None): ['cn'],
('jpn', None): ['jp'],
('hrv', None): ['scr'],
('mul', None): ['multi', 'dl'], # http://scenelingo.wordpress.com/2009/03/24/what-does-dl-mean/
}
class GuessitConverter(babelfish.LanguageReverseConverter):
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self):
self.guessit_exceptions = {}
for (alpha3, country), synlist in SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = (alpha3, country, None)
@property
def codes(self):
return (babelfish.language_converters['alpha3b'].codes |
babelfish.language_converters['alpha2'].codes |
babelfish.language_converters['name'].codes |
babelfish.language_converters['opensubtitles'].codes |
babelfish.country_converters['name'].codes |
frozenset(self.guessit_exceptions.keys()))
@staticmethod
def convert(alpha3, country=None, script=None):
return str(babelfish.Language(alpha3, country, script))
def reverse(self, name):
with_country = (GuessitConverter._with_country_regexp.match(name) or
GuessitConverter._with_country_regexp2.match(name))
name = u(name.lower())
if with_country:
lang = Language.fromguessit(with_country.group(1).strip())
lang.country = babelfish.Country.fromguessit(with_country.group(2).strip())
return lang.alpha3, lang.country.alpha2 if lang.country else None, lang.script or None
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name]
except KeyError:
pass
for conv in [babelfish.Language,
babelfish.Language.fromalpha3b,
babelfish.Language.fromalpha2,
babelfish.Language.fromname,
babelfish.Language.fromopensubtitles]:
try:
c = conv(name)
return c.alpha3, c.country, c.script
except (ValueError, babelfish.LanguageReverseError):
pass
raise babelfish.LanguageReverseError(name)
babelfish.language_converters['guessit'] = GuessitConverter()
COUNTRIES_SYN = {'ES': ['españa'],
'GB': ['UK'],
'BR': ['brazilian', 'bra'],
# FIXME: this one is a bit of a stretch, not sure how to do
# it properly, though...
'MX': ['Latinoamérica', 'latin america']
}
class GuessitCountryConverter(babelfish.CountryReverseConverter):
def __init__(self):
self.guessit_exceptions = {}
for alpha2, synlist in COUNTRIES_SYN.items():
for syn in synlist:
self.guessit_exceptions[syn.lower()] = alpha2
@property
def codes(self):
return (babelfish.country_converters['name'].codes |
frozenset(babelfish.COUNTRIES.values()) |
frozenset(self.guessit_exceptions.keys()))
@staticmethod
def convert(alpha2):
if alpha2 == 'GB':
return 'UK'
return str(Country(alpha2))
def reverse(self, name):
# exceptions come first, as they need to override a potential match
# with any of the other guessers
try:
return self.guessit_exceptions[name.lower()]
except KeyError:
pass
try:
return babelfish.Country(name.upper()).alpha2
except ValueError:
pass
for conv in [babelfish.Country.fromname]:
try:
return conv(name).alpha2
except babelfish.CountryReverseError:
pass
raise babelfish.CountryReverseError(name)
babelfish.country_converters['guessit'] = GuessitCountryConverter()
# list of common words which could be interpreted as languages, but which
# are far too common to be able to say they represent a language in the
# middle of a string (where they most likely carry their commmon meaning)
LNG_COMMON_WORDS = frozenset([
# english words
'is', 'it', 'am', 'mad', 'men', 'man', 'run', 'sin', 'st', 'to',
'no', 'non', 'war', 'min', 'new', 'car', 'day', 'bad', 'bat', 'fan',
'fry', 'cop', 'zen', 'gay', 'fat', 'one', 'cherokee', 'got', 'an', 'as',
'cat', 'her', 'be', 'hat', 'sun', 'may', 'my', 'mr', 'rum', 'pi', 'bb',
'bt', 'tv', 'aw', 'by', 'md', 'mp', 'cd', 'lt', 'gt', 'in', 'ad', 'ice',
'ay', 'at', 'star', 'so',
# french words
'bas', 'de', 'le', 'son', 'ne', 'ca', 'ce', 'et', 'que',
'mal', 'est', 'vol', 'or', 'mon', 'se', 'je', 'tu', 'me',
'ne', 'ma', 'va', 'au',
# japanese words,
'wa', 'ga', 'ao',
# spanish words
'la', 'el', 'del', 'por', 'mar', 'al',
# other
'ind', 'arw', 'ts', 'ii', 'bin', 'chan', 'ss', 'san', 'oss', 'iii',
'vi', 'ben', 'da', 'lt', 'ch', 'sr', 'ps', 'cx',
# new from babelfish
'mkv', 'avi', 'dmd', 'the', 'dis', 'cut', 'stv', 'des', 'dia', 'and',
'cab', 'sub', 'mia', 'rim', 'las', 'une', 'par', 'srt', 'ano', 'toy',
'job', 'gag', 'reel', 'www', 'for', 'ayu', 'csi', 'ren', 'moi', 'sur',
'fer', 'fun', 'two', 'big', 'psy', 'air',
# movie title
'brazil',
# release groups
'bs', # Bosnian
'kz',
# countries
'gt', 'lt', 'im',
# part/pt
'pt'
])
LNG_COMMON_WORDS_STRICT = frozenset(['brazil'])
subtitle_prefixes = ['sub', 'subs', 'st', 'vost', 'subforced', 'fansub', 'hardsub']
subtitle_suffixes = ['subforced', 'fansub', 'hardsub', 'sub', 'subs']
lang_prefixes = ['true']
all_lang_prefixes_suffixes = subtitle_prefixes + subtitle_suffixes + lang_prefixes
def find_possible_languages(string, allowed_languages=None):
"""Find possible languages in the string
:return: list of tuple (property, Language, lang_word, word)
"""
common_words = None
if allowed_languages:
common_words = LNG_COMMON_WORDS_STRICT
else:
common_words = LNG_COMMON_WORDS
words = find_words(string)
valid_words = []
for word in words:
lang_word = word.lower()
key = 'language'
for prefix in subtitle_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
key = 'subtitleLanguage'
for suffix in subtitle_suffixes:
if lang_word.endswith(suffix):
lang_word = lang_word[:len(suffix)]
key = 'subtitleLanguage'
for prefix in lang_prefixes:
if lang_word.startswith(prefix):
lang_word = lang_word[len(prefix):]
if lang_word not in common_words and word.lower() not in common_words:
try:
lang = Language.fromguessit(lang_word)
if allowed_languages:
if lang.name.lower() in allowed_languages or lang.alpha2.lower() in allowed_languages or lang.alpha3.lower() in allowed_languages:
valid_words.append((key, lang, lang_word, word))
# Keep language with alpha2 equivalent. Others are probably
# uncommon languages.
elif lang == 'mul' or hasattr(lang, 'alpha2'):
valid_words.append((key, lang, lang_word, word))
except babelfish.Error:
pass
return valid_words
def search_language(string, allowed_languages=None):
"""Looks for language patterns, and if found return the language object,
its group span and an associated confidence.
you can specify a list of allowed languages using the lang_filter argument,
as in lang_filter = [ 'fr', 'eng', 'spanish' ]
>>> search_language('movie [en].avi')['language']
<Language [en]>
>>> search_language('the zen fat cat and the gay mad men got a new fan', allowed_languages = ['en', 'fr', 'es'])
"""
if allowed_languages:
allowed_languages = set(Language.fromguessit(lang) for lang in allowed_languages)
confidence = 1.0 # for all of them
for prop, language, lang, word in find_possible_languages(string, allowed_languages):
pos = string.find(word)
end = pos + len(word)
# only allow those languages that have a 2-letter code, those that
# don't are too esoteric and probably false matches
# if language.lang not in lng3_to_lng2:
# continue
# confidence depends on alpha2, alpha3, english name, ...
if len(lang) == 2:
confidence = 0.8
elif len(lang) == 3:
confidence = 0.9
elif prop == 'subtitleLanguage':
confidence = 0.6 # Subtitle prefix found with language
else:
# Note: we could either be really confident that we found a
# language or assume that full language names are too
# common words and lower their confidence accordingly
confidence = 0.3 # going with the low-confidence route here
return Guess({prop: language}, confidence=confidence, input=string, span=(pos, end))
return None
def guess_language(text): # pragma: no cover
"""Guess the language in which a body of text is written.
This uses the external guess-language python module, and will fail and return
Language(Undetermined) if it is not installed.
"""
try:
from guess_language import guessLanguage
return Language.fromguessit(guessLanguage(text))
except ImportError:
log.error('Cannot detect the language of the given text body, missing dependency: guess-language')
log.error('Please install it from PyPI, by doing eg: pip install guess-language')
return UNDETERMINED
|
gpl-3.0
|
awemulya/fieldsight-kobocat
|
onadata/apps/logger/south_migrations/0039_auto__del_field_xform_surveys_with_geopoints__add_field_xform_instance.py
|
13
|
12668
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'XForm.surveys_with_geopoints'
db.delete_column(u'odk_logger_xform', 'surveys_with_geopoints')
# Adding field 'XForm.instances_with_geopoints'
db.add_column(u'odk_logger_xform', 'instances_with_geopoints',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Adding field 'XForm.surveys_with_geopoints'
db.add_column(u'odk_logger_xform', 'surveys_with_geopoints',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Deleting field 'XForm.instances_with_geopoints'
db.delete_column(u'odk_logger_xform', 'instances_with_geopoints')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.instancehistory': {
'Meta': {'object_name': 'InstanceHistory'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '249'}),
'xform_instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submission_history'", 'to': "orm['odk_logger.Instance']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.note': {
'Meta': {'object_name': 'Note'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['odk_logger.Instance']"}),
'note': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_crowd_form': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.ziggyinstance': {
'Meta': {'object_name': 'ZiggyInstance'},
'client_version': ('django.db.models.fields.BigIntegerField', [], {'default': 'None', 'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.CharField', [], {'max_length': '249'}),
'form_instance': ('django.db.models.fields.TextField', [], {}),
'form_version': ('django.db.models.fields.CharField', [], {'default': "u'1.0'", 'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '249'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggys'", 'to': u"orm['auth.User']"}),
'server_version': ('django.db.models.fields.BigIntegerField', [], {}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ziggy_submissions'", 'null': 'True', 'to': "orm['odk_logger.XForm']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['logger']
|
bsd-2-clause
|
dturner-tw/pants
|
contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen.py
|
5
|
4867
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from textwrap import dedent
from mock import MagicMock
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.context import Context
from pants.util.dirutil import safe_rmtree
from pants_test.tasks.task_test_base import TaskTestBase
from twitter.common.collections import OrderedSet
from pants.contrib.scrooge.tasks.scrooge_gen import ScroogeGen
# TODO (tdesai) Issue-240: Use JvmToolTaskTestBase for ScroogeGenTest
class ScroogeGenTest(TaskTestBase):
@classmethod
def task_type(cls):
return ScroogeGen
@property
def alias_groups(self):
return BuildFileAliases(targets={'java_thrift_library': JavaThriftLibrary})
def setUp(self):
super(ScroogeGenTest, self).setUp()
self.task_outdir = os.path.join(self.build_root, 'scrooge', 'gen-java')
def tearDown(self):
super(ScroogeGenTest, self).tearDown()
safe_rmtree(self.task_outdir)
def test_validate_compiler_configs(self):
# Set synthetic defaults for the global scope.
self.set_options_for_scope('thrift-defaults',
compiler='unchecked',
language='uniform',
rpc_style='async')
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='one',
sources=[],
dependencies=[],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='two',
sources=[],
dependencies=[':one'],
)
'''))
self.add_to_build_file('test_validate', dedent('''
java_thrift_library(name='three',
sources=[],
dependencies=[':one'],
rpc_style='finagle',
)
'''))
target = self.target('test_validate:one')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._validate_compiler_configs([self.target('test_validate:one')])
task._validate_compiler_configs([self.target('test_validate:two')])
with self.assertRaises(TaskError):
task._validate_compiler_configs([self.target('test_validate:three')])
def test_scala(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='scala',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/example/Example.scala')]
self._test_help(build_string, ScalaLibrary, sources)
def test_android(self):
build_string = '''
java_thrift_library(name='a',
sources=['a.thrift'],
dependencies=[],
compiler='scrooge',
language='android',
rpc_style='finagle'
)
'''
sources = [os.path.join(self.task_outdir, 'org/pantsbuild/android_example/Example.java')]
self._test_help(build_string, JavaLibrary, sources)
def _test_help(self, build_string, library_type, sources):
contents = dedent('''#@namespace android org.pantsbuild.android_example
namespace java org.pantsbuild.example
struct Example {
1: optional i64 number
}
''')
self.create_file(relpath='test_smoke/a.thrift', contents=contents)
self.add_to_build_file('test_smoke', dedent(build_string))
target = self.target('test_smoke:a')
context = self.context(target_roots=[target])
task = self.create_task(context)
task._declares_service = lambda source: False
task._outdir = MagicMock()
task._outdir.return_value = self.task_outdir
task.gen = MagicMock()
task.gen.return_value = {'test_smoke/a.thrift': sources}
saved_add_new_target = Context.add_new_target
try:
mock = MagicMock()
Context.add_new_target = mock
task.execute()
self.assertEquals(1, mock.call_count)
_, call_kwargs = mock.call_args
self.assertEquals(call_kwargs['target_type'], library_type)
self.assertEquals(call_kwargs['dependencies'], OrderedSet())
self.assertEquals(call_kwargs['provides'], None)
self.assertEquals(call_kwargs['sources'], [])
self.assertEquals(call_kwargs['derived_from'], target)
finally:
Context.add_new_target = saved_add_new_target
|
apache-2.0
|
belevtsoff/luigi
|
luigi/scheduler.py
|
1
|
40532
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import functools
import itertools
import logging
import os
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger("luigi.server")
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We should drop the compatibility
# at some point (in particular this would force users to replace all dashes with underscores in the config)
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=None,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
prune_done_tasks = parameter.BoolParameter(default=False)
record_task_history = parameter.BoolParameter(default=False)
visualization_graph = parameter.Parameter(default="svg", config_path=dict(section='scheduler', name='visualization-graph'))
prune_on_get_work = parameter.BoolParameter(default=False)
def fix_time(x):
# Backwards compatibility for a fix in Dec 2014. Prior to the fix, pickled state might store datetime objects
# Let's remove this function soon
if isinstance(x, datetime.datetime):
return time.mktime(x.timetuple())
else:
return x
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and fix_time(self.failures[0]) < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if (self.failures.first_failure_time is not None and
self.disable_hard_timeout):
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
def can_disable(self):
return (self.disable_failures is not None or
self.disable_hard_timeout is not None)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def dump(self):
state = (self._tasks, self._active_workers)
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(state, fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from clean slate.")
return
self._tasks, self._active_workers = state
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
# Convert from old format
# TODO: this is really ugly, we need something more future-proof
# Every time we add an attribute to the Worker or Task class, this
# code needs to be updated
# Compatibility since 2014-06-02
for k, v in six.iteritems(self._active_workers):
if isinstance(v, float):
self._active_workers[k] = Worker(worker_id=k, last_active=v)
# Compatibility since 2015-05-28
if any(not hasattr(w, 'tasks') for k, w in six.iteritems(self._active_workers)):
# If you load from an old format where Workers don't contain tasks.
for k, worker in six.iteritems(self._active_workers):
worker.tasks = set()
for task in six.itervalues(self._tasks):
for worker_id in task.workers:
self._active_workers[worker_id].tasks.add(task)
# Compatibility since 2015-04-28
if any(not hasattr(t, 'disable_hard_timeout') for t in six.itervalues(self._tasks)):
for t in six.itervalues(self._tasks):
t.disable_hard_timeout = None
else:
logger.info("No prior state file exists at %s. Starting with clean slate", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
# not sure why we have SUSPENDED, as it can never be set
if new_status == SUSPENDED:
new_status = PENDING
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None:
return
if new_status == FAILED and task.can_disable():
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def prune(self, task, config):
remove = False
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.info("Task %r has stakeholders %r but none remain connected -> will remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time:
if time.time() - fix_time(task.scheduler_disable_time) > config.disable_persist:
self.re_enable(task, config)
# Remove tasks that have no stakeholders
if task.remove and time.time() > task.remove:
logger.info("Removing task %r (no connected stakeholders)", task.id)
remove = True
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
return remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
# remove workers from tasks
for task in self.get_active_tasks():
task.stakeholders.difference_update(delete_workers)
task.workers.difference_update(delete_workers)
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED) or \
getattr(task, 'scheduler_disable_time', None) is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_override: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.info("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
if task.id not in necessary_tasks and self._state.prune(task, self._config):
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
logger.info("Done pruning task graph")
def update(self, worker_id, worker_reference=None):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
self.update(worker_id)
task = self._state.get_task(task_id, setdefault=self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params))
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING):
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task_id, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host})
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in self._state.get_active_workers())
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
upstream_status = self._upstream_status(task.id, upstream_table)
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers and upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task.id, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
if self._state.has_task(dep_id):
dep = self._state.get_task(dep_id)
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack = task_stack + [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
upstream_status = [upstream_status_table.get(a_task_id, '') for a_task_id in dep.deps]
upstream_status.append('') # to handle empty list
status = max(upstream_status, key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True):
task = self._state.get_task(task_id)
ret = {
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
for task in self._state.get_active_tasks():
serialized[task.id] = self._serialize_task(task.id)
return serialized
def _recurse_deps(self, task_id, serialized):
if task_id not in serialized:
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.warn('Missing task for id [%s]', task_id)
# try to infer family and params from task_id
try:
family, _, param_str = task_id.rstrip(')').partition('(')
params = dict(param.split('=') for param in param_str.split(', '))
except BaseException:
family, params = '', {}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'priority': 0,
}
else:
serialized[task_id] = self._serialize_task(task_id)
for dep in task.deps:
self._recurse_deps(dep, serialized)
def dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._recurse_deps(task_id, serialized)
return serialized
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
filter_func = lambda _: True
else:
terms = search.split()
filter_func = lambda t: all(term in t.id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def inverse_dep_graph(self, task_id, **kwargs):
self.prune()
serialized = {}
if self._state.has_task(task_id):
self._traverse_inverse_deps(task_id, serialized)
return serialized
def _traverse_inverse_deps(self, task_id, serialized):
stack = [task_id]
serialized[task_id] = self._serialize_task(task_id)
while len(stack) > 0:
curr_id = stack.pop()
for task in self._state.get_active_tasks():
if curr_id in task.deps:
serialized[curr_id]["deps"].append(task.id)
if task.id not in serialized:
serialized[task.id] = self._serialize_task(task.id)
serialized[task.id]["deps"] = []
stack.append(task.id)
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
return {"taskId": task_id, "error": self._state.get_task(task_id).expl}
else:
return {"taskId": task_id, "error": ""}
def _update_task_history(self, task_id, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task_id, successful)
elif status == PENDING:
self._task_history.task_scheduled(task_id)
elif status == RUNNING:
self._task_history.task_started(task_id, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
|
apache-2.0
|
tmpgit/intellij-community
|
python/lib/Lib/site-packages/django/utils/simplejson/encoder.py
|
430
|
15620
|
"""Implementation of JSONEncoder
"""
import re
c_encode_basestring_ascii = None
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
apache-2.0
|
zenodo/zenodo
|
zenodo/modules/github/__init__.py
|
8
|
1057
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo GitHub integration."""
from __future__ import absolute_import, print_function
|
gpl-2.0
|
tensorflow/agents
|
tf_agents/policies/async_policy_saver.py
|
1
|
5880
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Async helper for the policy saver."""
import threading
from typing import Text
from absl import logging
from tf_agents.policies import policy_saver as policy_saver_module
class AsyncPolicySaver(object):
"""Triggers `policy_saver` save calls in a separate thread asynchronously."""
def __init__(self, policy_saver: policy_saver_module.PolicySaver):
"""Initialize an AsyncPolicySaver.
Args:
policy_saver: An instance of a `policy_saver.PolicySaver`.
"""
self._policy_saver = policy_saver
self._save_condition_variable = threading.Condition()
# These vars should only be accessed if the lock in save_condition is held.
# export_dir is set to None whenever there is no pending save. Otherwise it
# is used to communicate across threads.
self._export_dir = None
self._saving_checkpoint = False
self._join_save_thread = False
self._save_thread = threading.Thread(target=self._save_loop)
self._save_thread.start()
def _save_loop(self):
"""Helper method for the saving thread to wait and execute save requests."""
while True:
with self._save_condition_variable:
while not self._export_dir:
self._save_condition_variable.wait()
if self._join_save_thread:
return
if self._saving_checkpoint:
logging.info("Saving checkpoint to %s", self._export_dir)
self._policy_saver.save_checkpoint(self._export_dir)
else:
logging.info("Saving policy to %s", self._export_dir)
self._policy_saver.save(self._export_dir)
self._export_dir = None
self._save_condition_variable.notify()
def _assert_save_thread_is_alive(self):
if self._join_save_thread or not self._save_thread.is_alive():
raise ValueError("Saving thread in AsyncPolicySaver is not alive. Either "
"an exception has occured while saving, or the saver "
"was closed.")
def save(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy to the given `export_dir`.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the `saved_model` of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=False, blocking=blocking)
def save_checkpoint(self, export_dir: Text, blocking: bool = False):
"""Triggers an async save of the policy checkpoint.
Only one save can be triggered at a time. If `save` or `save_checkpoint`
are called while another save of either kind is still ongoing the saving is
skipped.
If blocking is set then the call will block until any ongoing saves finish,
and then a new save will be made before returning.
Args:
export_dir: Directory path for the checkpoint of the policy.
blocking: If True the call to save will block until a save can be
performed and finished. If a save was ongoing it will wait for that to
finish, and then do a blocking save before returning.
"""
self._save(export_dir, saving_checkpoint=True, blocking=blocking)
def _save(self, export_dir, saving_checkpoint, blocking):
"""Helper save method, generalizes over save and save_checkpoint."""
self._assert_save_thread_is_alive()
if blocking:
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
if saving_checkpoint:
self._policy_saver.save_checkpoint(export_dir)
else:
self._policy_saver.save(export_dir)
return
if not self._save_condition_variable.acquire(blocking=False):
logging.info("AsyncPolicySaver save is still in progress skipping save.")
return
try:
self._saving_checkpoint = saving_checkpoint
self._export_dir = export_dir
self._save_condition_variable.notify()
finally:
self._save_condition_variable.release()
def flush(self):
"""Blocks until there is no saving happening."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
def close(self):
"""Blocks until there is no saving happening and kills the save_thread."""
with self._save_condition_variable:
while self._export_dir:
logging.info("Waiting for AsyncPolicySaver to finish.")
self._save_condition_variable.wait()
self._join_save_thread = True
self._save_condition_variable.notify()
self._save_thread.join()
def __getattr__(self, name: Text):
"""Forward all other calls to the base saver."""
return getattr(self._policy_saver, name)
|
apache-2.0
|
miquelmartos/geeksphone-kernel-zero-3.0
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
15048
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
gpl-2.0
|
anntzer/scikit-learn
|
sklearn/utils/multiclass.py
|
11
|
16256
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from collections.abc import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from .validation import check_array, _assert_all_finite
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(
check_array(y, accept_sparse=['csr', 'csc', 'coo']).shape[1]
)
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels.
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes
Returns
-------
out : ndarray of shape (n_unique_labels,)
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == {"binary", "multiclass"}:
ys_types = {"multiclass"}
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y,
accept_sparse=['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, str) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : ndarray of shape (n_samples,)
Target values.
Returns
-------
out : bool
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__') or isinstance(y, Sequence):
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter('error', np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except np.VisibleDeprecationWarning:
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.array(y, dtype=object)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : str
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
# DeprecationWarning will be replaced by ValueError, see NEP 34
# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html
with warnings.catch_warnings():
warnings.simplefilter('error', np.VisibleDeprecationWarning)
try:
y = np.asarray(y)
except np.VisibleDeprecationWarning:
# dtype=object should be provided explicitly for ragged arrays,
# see NEP 34
y = np.asarray(y, dtype=object)
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic.
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data.
Parameters
----------
y : {array-like, sparse matrix} of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
classes : list of size n_outputs of ndarray of size (n_classes,)
List of classes for each column.
n_classes : list of int of size n_outputs
Number of classes in each column.
class_prior : list of size n_outputs of ndarray of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = sample_weight[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = np.bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = np.bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking OvR decision function from OvO.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like of shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like of shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``.
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
# Monotonically transform the sum_of_confidences to (-1/3, 1/3)
# and add it with votes. The monotonic transformation is
# f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2
# to ensure that we won't reach the limits and change vote order.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
transformed_confidences = (sum_of_confidences /
(3 * (np.abs(sum_of_confidences) + 1)))
return votes + transformed_confidences
|
bsd-3-clause
|
jindongh/boto
|
boto/cloudsearch/__init__.py
|
145
|
1731
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the Amazon CloudSearch service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
import boto.cloudsearch.layer1
return get_regions(
'cloudsearch',
connection_cls=boto.cloudsearch.layer1.Layer1
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
mit
|
Eksmo/calibre
|
src/calibre/gui2/wizard/device_ui.py
|
1
|
2709
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/wizard/device.ui'
#
# Created: Thu Jul 19 23:32:29 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_WizardPage(object):
def setupUi(self, WizardPage):
WizardPage.setObjectName(_fromUtf8("WizardPage"))
WizardPage.resize(400, 300)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("wizard.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
WizardPage.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(WizardPage)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(WizardPage)
self.label.setText(_fromUtf8(""))
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(WizardPage)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.manufacturer_view = QtGui.QListView(self.groupBox)
self.manufacturer_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.manufacturer_view.setObjectName(_fromUtf8("manufacturer_view"))
self.verticalLayout.addWidget(self.manufacturer_view)
self.gridLayout.addWidget(self.groupBox, 1, 0, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(WizardPage)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.device_view = QtGui.QListView(self.groupBox_2)
self.device_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.device_view.setObjectName(_fromUtf8("device_view"))
self.verticalLayout_2.addWidget(self.device_view)
self.gridLayout.addWidget(self.groupBox_2, 1, 1, 1, 1)
self.retranslateUi(WizardPage)
QtCore.QMetaObject.connectSlotsByName(WizardPage)
def retranslateUi(self, WizardPage):
WizardPage.setWindowTitle(_("Welcome to calibre"))
WizardPage.setTitle(_("Welcome to calibre"))
WizardPage.setSubTitle(_("The one stop solution to all your e-book needs."))
self.groupBox.setTitle(_("&Manufacturers"))
self.groupBox_2.setTitle(_("&Devices"))
|
gpl-3.0
|
jnewland/home-assistant
|
tests/helpers/test_entity_component.py
|
4
|
16190
|
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
from collections import OrderedDict
import logging
from unittest.mock import patch, Mock
from datetime import timedelta
import asynctest
import pytest
import homeassistant.core as ha
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
async def test_setting_up_group(hass):
"""Set up the setting of a group."""
assert await async_setup_component(hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, hass,
group_name='everyone')
# No group after setup
assert len(hass.states.async_entity_ids()) == 0
await component.async_add_entities([MockEntity()])
await hass.async_block_till_done()
# group exists
assert len(hass.states.async_entity_ids()) == 2
assert hass.states.async_entity_ids('group') == ['group.everyone']
grp = hass.states.get('group.everyone')
assert grp.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
# group extended
await component.async_add_entities([MockEntity(name='goodbye')])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 3
grp = hass.states.get('group.everyone')
# Ordered in order of added to the group
assert grp.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
async def test_setup_loads_platforms(hass):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(hass, MockModule('test_component', setup=component_setup))
# mock the dependencies
mock_integration(hass, MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
await hass.async_block_till_done()
assert component_setup.called
assert platform_setup.called
async def test_setup_recovers_when_setup_raises(hass):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
await hass.async_block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@asynctest.patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@asynctest.patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
async def test_setup_does_discovery(mock_setup_component, mock_setup, hass):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({})
discovery.load_platform(hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
await hass.async_block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@asynctest.patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
async def test_set_scan_interval_via_config(mock_track, hass):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_set_entity_namespace_via_config(hass):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call_2)))
async def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
mock_integration(hass, MockModule('mod1'))
mock_entity_platform(hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
async def test_extract_from_service_returns_all_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(await component.async_extract_from_service(call)))
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in await component.async_extract_from_service(call)]
async def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = await group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
await component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = await component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
async def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
mock_integration(hass, MockModule('test_component',
dependencies=['test_component2']))
mock_integration(hass, MockModule('test_component2'))
mock_entity_platform(hass, 'test_domain.test_component', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for _ in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
|
apache-2.0
|
ahamilton55/ansible
|
lib/ansible/modules/network/iosxr/iosxr_command.py
|
42
|
7299
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_command
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Run commands on remote devices running Cisco IOS XR
description:
- Sends arbitrary commands to an IOS XR node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(iosxr_config) to configure iosxr devices.
extends_documentation_fragment: iosxr
options:
commands:
description:
- List of commands to send to the remote iosxr device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
required: false
default: null
aliases: ['waitfor']
version_added: "2.2"
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
required: false
default: all
choices: ['any', 'all']
version_added: "2.2"
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
required: false
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
required: false
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
iosxr_command:
commands: show version
- name: run show version and check to see if output contains iosxr
iosxr_command:
commands: show version
wait_for: result[0] contains IOS-XR
- name: run multiple commands on remote nodes
iosxr_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
iosxr_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains IOS-XR
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import run_commands
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.netcli import Conditional
from ansible.module_utils.six import string_types
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='iosxr_command does not support running config mode '
'commands. Please use iosxr_config instead'
)
return commands
def main():
spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not be satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result = {
'changed': False,
'stdout': responses,
'warnings': warnings,
'stdout_lines': list(to_lines(responses))
}
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
saurabh6790/medsyn-app1
|
hr/doctype/leave_allocation/leave_allocation.py
|
30
|
5292
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, flt
from webnotes import msgprint
class DocType:
def __init__(self, doc, doclist):
self.doc, self.doclist = doc, doclist
def validate(self):
self.validate_new_leaves_allocated_value()
self.check_existing_leave_allocation()
self.validate_new_leaves_allocated()
def on_update_after_submit(self):
self.validate_new_leaves_allocated_value()
self.validate_new_leaves_allocated()
def on_update(self):
self.get_total_allocated_leaves()
def on_cancel(self):
self.check_for_leave_application()
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.doc.new_leaves_allocated) % 0.5:
guess = round(flt(self.doc.new_leaves_allocated) * 2.0) / 2.0
msgprint("""New Leaves Allocated should be a multiple of 0.5.
Perhaps you should enter %s or %s""" % (guess, guess + 0.5),
raise_exception=1)
def check_existing_leave_allocation(self):
"""check whether leave for same type is already allocated or not"""
leave_allocation = webnotes.conn.sql("""select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1""",
(self.doc.employee, self.doc.leave_type, self.doc.fiscal_year))
if leave_allocation:
msgprint("""%s is already allocated to Employee: %s for Fiscal Year: %s.
Please refere Leave Allocation: \
<a href="#Form/Leave Allocation/%s">%s</a>""" % \
(self.doc.leave_type, self.doc.employee, self.doc.fiscal_year,
leave_allocation[0][0], leave_allocation[0][0]), raise_exception=1)
def validate_new_leaves_allocated(self):
"""check if Total Leaves Allocated >= Leave Applications"""
self.doc.total_leaves_allocated = flt(self.doc.carry_forwarded_leaves) + \
flt(self.doc.new_leaves_allocated)
leaves_applied = self.get_leaves_applied(self.doc.fiscal_year)
if leaves_applied > self.doc.total_leaves_allocated:
expected_new_leaves = flt(self.doc.new_leaves_allocated) + \
(leaves_applied - self.doc.total_leaves_allocated)
msgprint("""Employee: %s has already applied for %s leaves.
Hence, New Leaves Allocated should be atleast %s""" % \
(self.doc.employee, leaves_applied, expected_new_leaves),
raise_exception=1)
def get_leave_bal(self, prev_fyear):
return self.get_leaves_allocated(prev_fyear) - self.get_leaves_applied(prev_fyear)
def get_leaves_applied(self, fiscal_year):
leaves_applied = webnotes.conn.sql("""select SUM(ifnull(total_leave_days, 0))
from `tabLeave Application` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1""",
(self.doc.employee, self.doc.leave_type, fiscal_year))
return leaves_applied and flt(leaves_applied[0][0]) or 0
def get_leaves_allocated(self, fiscal_year):
leaves_allocated = webnotes.conn.sql("""select SUM(ifnull(total_leaves_allocated, 0))
from `tabLeave Allocation` where employee=%s and leave_type=%s
and fiscal_year=%s and docstatus=1 and name!=%s""",
(self.doc.employee, self.doc.leave_type, fiscal_year, self.doc.name))
return leaves_allocated and flt(leaves_allocated[0][0]) or 0
def allow_carry_forward(self):
"""check whether carry forward is allowed or not for this leave type"""
cf = webnotes.conn.sql("""select is_carry_forward from `tabLeave Type` where name = %s""",
self.doc.leave_type)
cf = cf and cint(cf[0][0]) or 0
if not cf:
webnotes.conn.set(self.doc,'carry_forward',0)
msgprint("Sorry! You cannot carry forward %s" % (self.doc.leave_type),
raise_exception=1)
def get_carry_forwarded_leaves(self):
if self.doc.carry_forward:
self.allow_carry_forward()
prev_fiscal_year = webnotes.conn.sql("""select name from `tabFiscal Year`
where year_start_date = (select date_add(year_start_date, interval -1 year)
from `tabFiscal Year` where name=%s)
order by name desc limit 1""", self.doc.fiscal_year)
prev_fiscal_year = prev_fiscal_year and prev_fiscal_year[0][0] or ''
prev_bal = 0
if prev_fiscal_year and cint(self.doc.carry_forward) == 1:
prev_bal = self.get_leave_bal(prev_fiscal_year)
ret = {
'carry_forwarded_leaves': prev_bal,
'total_leaves_allocated': flt(prev_bal) + flt(self.doc.new_leaves_allocated)
}
return ret
def get_total_allocated_leaves(self):
leave_det = self.get_carry_forwarded_leaves()
webnotes.conn.set(self.doc,'carry_forwarded_leaves',flt(leave_det['carry_forwarded_leaves']))
webnotes.conn.set(self.doc,'total_leaves_allocated',flt(leave_det['total_leaves_allocated']))
def check_for_leave_application(self):
exists = webnotes.conn.sql("""select name from `tabLeave Application`
where employee=%s and leave_type=%s and fiscal_year=%s and docstatus=1""",
(self.doc.employee, self.doc.leave_type, self.doc.fiscal_year))
if exists:
msgprint("""Cannot cancel this Leave Allocation as \
Employee : %s has already applied for %s.
Please check Leave Application: \
<a href="#Form/Leave Application/%s">%s</a>""" % \
(self.doc.employee, self.doc.leave_type, exists[0][0], exists[0][0]))
raise Exception
|
agpl-3.0
|
ajenkins-cargometrics/pyflow
|
pyflow/workflow_state.py
|
1
|
3093
|
import attr
@attr.s
class InvocationState(object):
"""
Encapsulates the state of an invocation, such as a lambda invocation, activity, timer, etc.
"""
NOT_STARTED = 0
HANDLED = 1
STARTED = 2
FAILED = 3
TIMED_OUT = 4
CANCELED = 5
SUCCEEDED = 6
DONE_STATES = (FAILED, TIMED_OUT, CANCELED, SUCCEEDED)
invocation_id = attr.ib()
invocation_args = attr.ib(default=attr.Factory(dict))
state = attr.ib(default=NOT_STARTED)
retries_left = attr.ib(default=0)
result = attr.ib(default=None)
failure_reason = attr.ib(default=None)
failure_details = attr.ib(default=None)
@property
def done(self):
return self.state in self.DONE_STATES
def update_state(self, state=None, result=None, failure_reason=None, failure_details=None):
if state is not None:
self.state = state
if result is not None:
self.result = result
if failure_reason is not None:
self.failure_reason = failure_reason
if failure_details is not None:
self.failure_details = failure_details
@attr.s
class WorkflowState(object):
"""Encapsulates the state of a workflow instance"""
# Identifies the workflow instance
workflow_id = attr.ib()
run_id = attr.ib()
# The ARN of the lambda role specified when starting this workflow
lambda_role = attr.ib(default=None)
# A datetime object identifying when this workflow instance started
workflow_start_time = attr.ib(default=None)
# Input to the workflow when it was started
input = attr.ib(default=None)
# True if this workflow has completed, whether successfully or not.
completed = attr.ib(default=False)
# A dictionary of invocation states. Keys are invocation ids, and values are InvocationState objects.
invocation_states = attr.ib(default=attr.Factory(dict)) # type: dict[str, InvocationState]
# The id of the last event added to the state
last_seen_event_id = attr.ib(default=None)
def get_invocation_state(self, invocation_id, initial_state=InvocationState.NOT_STARTED, num_retries=0,
invocation_args=None):
"""
Gets the invocation state for an invocation_id, creating a new state if none exists
:param invocation_id: The invocation id of the state to fetch
:param initial_state: The initial value to set the state property to if a new InvocationState is created
:param num_retries: Number of retries this invocation should be created with
:param invocation_args: Arguments used to initiate this invocation
:return: The InvocationState object for invocation_id
"""
invocation_state = self.invocation_states.get(invocation_id)
if invocation_state is None:
invocation_state = self.invocation_states[invocation_id] = InvocationState(
invocation_id=invocation_id, state=initial_state, retries_left=num_retries,
invocation_args=invocation_args)
return invocation_state
|
mit
|
sunlianqiang/kbengine
|
kbe/src/lib/python/Lib/asyncio/unix_events.py
|
61
|
31408
|
"""Selector event loop for Unix with signal handling."""
import errno
import fcntl
import os
import signal
import socket
import stat
import subprocess
import sys
import threading
from . import base_events
from . import base_subprocess
from . import constants
from . import events
from . import selector_events
from . import selectors
from . import transports
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
'FastChildWatcher', 'DefaultEventLoopPolicy',
]
if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
def __init__(self, selector=None):
super().__init__(selector)
self._signal_handlers = {}
def _socketpair(self):
return socket.socketpair()
def close(self):
super().close()
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
def _process_self_data(self, data):
for signum in data:
if not signum:
# ignore null bytes written by _write_to_self()
continue
self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
self._check_signal(sig)
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError('sig {} cannot be caught'.format(sig))
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if not (1 <= sig < signal.NSIG):
raise ValueError(
'sig {} out of range(1, {})'.format(sig, signal.NSIG))
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=extra, **kwargs)
yield from transp._post_init()
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
return transp
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
@coroutine
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
assert server_hostname is None or isinstance(server_hostname, str)
if ssl:
if server_hostname is None:
raise ValueError(
'you have to pass server_hostname when using ssl')
else:
if server_hostname is not None:
raise ValueError('server_hostname is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
yield from self.sock_connect(sock, path)
except:
sock.close()
raise
else:
if sock is None:
raise ValueError('no path and sock were specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
return transport, protocol
@coroutine
def create_unix_server(self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
sock.bind(path)
except OSError as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
msg = 'Address {!r} is already in use'.format(path)
raise OSError(errno.EADDRINUSE, msg) from None
else:
raise
except:
sock.close()
raise
else:
if sock is None:
raise ValueError(
'path was not specified, and no sock specified')
if sock.family != socket.AF_UNIX:
raise ValueError(
'A UNIX Domain Socket was expected, got {!r}'.format(sock))
server = base_events.Server(self, [sock])
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
return server
if hasattr(os, 'set_blocking'):
def _set_nonblocking(fd):
os.set_blocking(fd, False)
else:
def _set_nonblocking(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
mode = os.fstat(self._fileno).st_mode
if not (stat.S_ISFIFO(mode) or
stat.S_ISSOCK(mode) or
stat.S_ISCHR(mode)):
raise ValueError("Pipe transport is for pipes/sockets only.")
_set_nonblocking(self._fileno)
self._protocol = protocol
self._closing = False
self._loop.add_reader(self._fileno, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def __repr__(self):
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
if self._pipe is not None:
polling = selector_events._test_selector_event(
self._loop._selector,
self._fileno, selectors.EVENT_READ)
if polling:
info.append('polling')
else:
info.append('idle')
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def _read_ready(self):
try:
data = os.read(self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
self._closing = True
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None)
def pause_reading(self):
self._loop.remove_reader(self._fileno)
def resume_reading(self):
self._loop.add_reader(self._fileno, self._read_ready)
def close(self):
if not self._closing:
self._close(None)
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc):
self._closing = True
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
mode = os.fstat(self._fileno).st_mode
is_socket = stat.S_ISSOCK(mode)
if not (is_socket or
stat.S_ISFIFO(mode) or
stat.S_ISCHR(mode)):
raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices")
_set_nonblocking(self._fileno)
self._protocol = protocol
self._buffer = []
self._conn_lost = 0
self._closing = False # Set when close() or write_eof() called.
# On AIX, the reader trick only works for sockets.
# On other platforms it works for pipes and sockets.
# (Exception: OS X 10.4? Issue #19294.)
if is_socket or not sys.platform.startswith("aix"):
self._loop.add_reader(self._fileno, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# wait until protocol.connection_made() has been called
self._loop.call_soon(waiter._set_result_unless_cancelled, None)
def __repr__(self):
info = [self.__class__.__name__, 'fd=%s' % self._fileno]
if self._pipe is not None:
polling = selector_events._test_selector_event(
self._loop._selector,
self._fileno, selectors.EVENT_WRITE)
if polling:
info.append('polling')
else:
info.append('idle')
bufsize = self.get_write_buffer_size()
info.append('bufsize=%s' % bufsize)
else:
info.append('closed')
return '<%s>' % ' '.join(info)
def get_write_buffer_size(self):
return sum(len(data) for data in self._buffer)
def _read_ready(self):
# Pipe was closed by peer.
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
if self._buffer:
self._close(BrokenPipeError())
else:
self._close()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
if isinstance(data, bytearray):
data = memoryview(data)
if not data:
return
if self._conn_lost or self._closing:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('pipe closed by peer or '
'os.write(pipe, data) raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
self._conn_lost += 1
self._fatal_error(exc, 'Fatal write error on pipe transport')
return
if n == len(data):
return
elif n > 0:
data = data[n:]
self._loop.add_writer(self._fileno, self._write_ready)
self._buffer.append(data)
self._maybe_pause_protocol()
def _write_ready(self):
data = b''.join(self._buffer)
assert data, 'Data should not be empty'
self._buffer.clear()
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
self._buffer.append(data)
except Exception as exc:
self._conn_lost += 1
# Remove writer here, _fatal_error() doesn't it
# because _buffer is empty.
self._loop.remove_writer(self._fileno)
self._fatal_error(exc, 'Fatal write error on pipe transport')
else:
if n == len(data):
self._loop.remove_writer(self._fileno)
self._maybe_resume_protocol() # May append to buffer.
if not self._buffer and self._closing:
self._loop.remove_reader(self._fileno)
self._call_connection_lost(None)
return
elif n > 0:
data = data[n:]
self._buffer.append(data) # Try again later.
def can_write_eof(self):
return True
# TODO: Make the relationships between write_eof(), close(),
# abort(), _fatal_error() and _close() more straightforward.
def write_eof(self):
if self._closing:
return
assert self._pipe
self._closing = True
if not self._buffer:
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, None)
def close(self):
if not self._closing:
# write_eof is all what we needed to close the write pipe
self.write_eof()
def abort(self):
self._close(None)
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if isinstance(exc, (BrokenPipeError, ConnectionResetError)):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc=None):
self._closing = True
if self._buffer:
self._loop.remove_writer(self._fileno)
self._buffer.clear()
self._loop.remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
stdin_w = None
if stdin == subprocess.PIPE:
# Use a socket pair for stdin, since not all platforms
# support selecting read events on the write end of a
# socket (which we use in order to detect closing of the
# other end). Notably this is needed on AIX, and works
# just fine on other platforms.
stdin, stdin_w = self._loop._socketpair()
self._proc = subprocess.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
stdin.close()
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
class AbstractChildWatcher:
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
def add_child_handler(self, pid, callback, *args):
"""Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
raise NotImplementedError()
def remove_child_handler(self, pid):
"""Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove."""
raise NotImplementedError()
def attach_loop(self, loop):
"""Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
raise NotImplementedError()
def close(self):
"""Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
raise NotImplementedError()
def __enter__(self):
"""Enter the watcher's context and allow starting new processes
This function must return self"""
raise NotImplementedError()
def __exit__(self, a, b, c):
"""Exit the watcher's context"""
raise NotImplementedError()
class BaseChildWatcher(AbstractChildWatcher):
def __init__(self):
self._loop = None
def close(self):
self.attach_loop(None)
def _do_waitpid(self, expected_pid):
raise NotImplementedError()
def _do_waitpid_all(self):
raise NotImplementedError()
def attach_loop(self, loop):
assert loop is None or isinstance(loop, events.AbstractEventLoop)
if self._loop is not None:
self._loop.remove_signal_handler(signal.SIGCHLD)
self._loop = loop
if loop is not None:
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
# Prevent a race condition in case a child terminated
# during the switch.
self._do_waitpid_all()
def _sig_chld(self):
try:
self._do_waitpid_all()
except Exception as exc:
# self._loop should always be available here
# as '_sig_chld' is added as a signal handler
# in 'attach_loop'
self._loop.call_exception_handler({
'message': 'Unknown exception in SIGCHLD handler',
'exception': exc,
})
def _compute_returncode(self, status):
if os.WIFSIGNALED(status):
# The child process died because of a signal.
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
# The child process exited (e.g sys.exit()).
return os.WEXITSTATUS(status)
else:
# The child exited, but we don't understand its status.
# This shouldn't happen, but if it does, let's just
# return that status; perhaps that helps debug it.
return status
class SafeChildWatcher(BaseChildWatcher):
"""'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def __init__(self):
super().__init__()
self._callbacks = {}
def close(self):
self._callbacks.clear()
super().close()
def __enter__(self):
return self
def __exit__(self, a, b, c):
pass
def add_child_handler(self, pid, callback, *args):
self._callbacks[pid] = callback, args
# Prevent a race condition in case the child is already terminated.
self._do_waitpid(pid)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
for pid in list(self._callbacks):
self._do_waitpid(pid)
def _do_waitpid(self, expected_pid):
assert expected_pid > 0
try:
pid, status = os.waitpid(expected_pid, os.WNOHANG)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
pid = expected_pid
returncode = 255
logger.warning(
"Unknown child process pid %d, will report returncode 255",
pid)
else:
if pid == 0:
# The child process is still alive.
return
returncode = self._compute_returncode(status)
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
expected_pid, returncode)
try:
callback, args = self._callbacks.pop(pid)
except KeyError: # pragma: no cover
# May happen if .remove_child_handler() is called
# after os.waitpid() returns.
if self._loop.get_debug():
logger.warning("Child watcher got an unexpected pid: %r",
pid, exc_info=True)
else:
callback(pid, returncode, *args)
class FastChildWatcher(BaseChildWatcher):
"""'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __init__(self):
super().__init__()
self._callbacks = {}
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
def close(self):
self._callbacks.clear()
self._zombies.clear()
super().close()
def __enter__(self):
with self._lock:
self._forks += 1
return self
def __exit__(self, a, b, c):
with self._lock:
self._forks -= 1
if self._forks or not self._zombies:
return
collateral_victims = str(self._zombies)
self._zombies.clear()
logger.warning(
"Caught subprocesses termination from unknown pids: %s",
collateral_victims)
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
with self._lock:
try:
returncode = self._zombies.pop(pid)
except KeyError:
# The child is running.
self._callbacks[pid] = callback, args
return
# The child is dead already. We can fire the callback.
callback(pid, returncode, *args)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
# Because of signal coalescing, we must keep calling waitpid() as
# long as we're able to reap a child.
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
else:
if pid == 0:
# A child process is still alive.
return
returncode = self._compute_returncode(status)
with self._lock:
try:
callback, args = self._callbacks.pop(pid)
except KeyError:
# unknown child
if self._forks:
# It may not be registered yet.
self._zombies[pid] = returncode
if self._loop.get_debug():
logger.debug('unknown process %s exited '
'with returncode %s',
pid, returncode)
continue
callback = None
else:
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
pid, returncode)
if callback is None:
logger.warning(
"Caught subprocess termination from unknown pid: "
"%d -> %d", pid, returncode)
else:
callback(pid, returncode, *args)
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
"""XXX"""
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
super().__init__()
self._watcher = None
def _init_watcher(self):
with events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(),
threading._MainThread):
self._watcher.attach_loop(self._local._loop)
def set_event_loop(self, loop):
"""Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher.
"""
super().set_event_loop(loop)
if self._watcher is not None and \
isinstance(threading.current_thread(), threading._MainThread):
self._watcher.attach_loop(loop)
def get_child_watcher(self):
"""Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
self._init_watcher()
return self._watcher
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
if self._watcher is not None:
self._watcher.close()
self._watcher = watcher
SelectorEventLoop = _UnixSelectorEventLoop
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
|
lgpl-3.0
|
moijes12/oh-mainline
|
vendor/packages/beautifulsoup4/scripts/demonstrate_parser_differences.py
|
73
|
2976
|
"""Demonstrate how different parsers parse the same markup.
Beautiful Soup can use any of a number of different parsers. Every
parser should behave more or less the same on valid markup, and
Beautiful Soup's unit tests make sure this is the case. But every
parser handles invalid markup differently. Even different versions of
the same parser handle invalid markup differently. So instead of unit
tests I've created this educational demonstration script.
The file demonstration_markup.txt contains many lines of HTML. This
script tests each line of markup against every parser you have
installed, and prints out how each parser sees that markup. This may
help you choose a parser, or understand why Beautiful Soup presents
your document the way it does.
"""
import os
import sys
from bs4 import BeautifulSoup
parsers = ['html.parser']
try:
from bs4.builder import _lxml
parsers.append('lxml')
except ImportError, e:
pass
try:
from bs4.builder import _html5lib
parsers.append('html5lib')
except ImportError, e:
pass
class Demonstration(object):
def __init__(self, markup):
self.results = {}
self.markup = markup
def run_against(self, *parser_names):
uniform_results = True
previous_output = None
for parser in parser_names:
try:
soup = BeautifulSoup(self.markup, parser)
if markup.startswith("<div>"):
# Extract the interesting part
output = soup.div
else:
output = soup
except Exception, e:
output = "[EXCEPTION] %s" % str(e)
self.results[parser] = output
if previous_output is None:
previous_output = output
elif previous_output != output:
uniform_results = False
return uniform_results
def dump(self):
print "%s: %s" % ("Markup".rjust(13), self.markup.encode("utf8"))
for parser, output in self.results.items():
print "%s: %s" % (parser.rjust(13), output.encode("utf8"))
different_results = []
uniform_results = []
print "= Testing the following parsers: %s =" % ", ".join(parsers)
print
input_file = sys.stdin
if sys.stdin.isatty():
for filename in [
"demonstration_markup.txt",
os.path.join("scripts", "demonstration_markup.txt")]:
if os.path.exists(filename):
input_file = open(filename)
for markup in input_file:
demo = Demonstration(markup.decode("utf8").strip().replace("\\n", "\n"))
is_uniform = demo.run_against(*parsers)
if is_uniform:
uniform_results.append(demo)
else:
different_results.append(demo)
print "== Markup that's handled the same in every parser =="
print
for demo in uniform_results:
demo.dump()
print
print "== Markup that's not handled the same in every parser =="
print
for demo in different_results:
demo.dump()
print
|
agpl-3.0
|
cloudbau/cinder
|
cinder/backup/driver.py
|
3
|
1107
|
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
from cinder.db import base
class BackupDriver(base.Base):
def backup(self, backup, volume_file):
"""Starts a backup of a specified volume"""
raise NotImplementedError()
def restore(self, backup, volume_id, volume_file):
"""Restores a saved backup"""
raise NotImplementedError()
def delete(self, backup):
"""Deletes a saved backup"""
raise NotImplementedError()
|
apache-2.0
|
ganeshnalawade/ansible-modules-core
|
cloud/openstack/os_server_actions.py
|
14
|
7960
|
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_actions
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@j2sol)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
When I(action) is 'rebuild', then I(image) parameter is required.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
required: false
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,
rebuild]
default: present
image:
description:
- Image the server should be rebuilt with
default: null
version_added: "2.3"
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_actions:
action: pause
auth:
auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
_action_map = {'stop': 'SHUTOFF',
'start': 'ACTIVE',
'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',
'rebuild': 'ACTIVE'}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _wait(timeout, cloud, server, action):
"""Wait for the server to reach the desired state for the given action."""
for count in shade._utils._iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',
'lock', 'unlock', 'suspend', 'resume',
'rebuild']),
image=dict(required=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True,
required_if=[('action', 'rebuild', ['image'])],
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
image = module.params['image']
try:
if action in _admin_actions:
cloud = shade.operator_cloud(**module.params)
else:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'stop':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.stop(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
if action == 'start':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.start(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.pause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.unpause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.nova_client.servers.lock(server=server.id)
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.nova_client.servers.unlock(server=server.id)
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.suspend(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.resume(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'rebuild':
image = cloud.get_image(image)
if image is None:
module.fail_json(msg="Image does not exist")
# rebuild doesn't set a state, just do it
cloud.nova_client.servers.rebuild(server=server.id, image=image.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
franosincic/edx-platform
|
cms/djangoapps/contentstore/management/commands/export_all_courses.py
|
70
|
2311
|
"""
Script for exporting all courseware from Mongo to a directory and listing the courses which failed to export
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""
Export all courses from mongo to the specified data directory and list the courses which failed to export
"""
help = 'Export all courses from mongo to the specified data directory and list the courses which failed to export'
def handle(self, *args, **options):
"""
Execute the command
"""
if len(args) != 1:
raise CommandError("export requires one argument: <output path>")
output_path = args[0]
courses, failed_export_courses = export_courses_to_output_path(output_path)
print "=" * 80
print u"=" * 30 + u"> Export summary"
print u"Total number of courses to export: {0}".format(len(courses))
print u"Total number of courses which failed to export: {0}".format(len(failed_export_courses))
print u"List of export failed courses ids:"
print u"\n".join(failed_export_courses)
print "=" * 80
def export_courses_to_output_path(output_path):
"""
Export all courses to target directory and return the list of courses which failed to export
"""
content_store = contentstore()
module_store = modulestore()
root_dir = output_path
courses = module_store.get_courses()
course_ids = [x.id for x in courses]
failed_export_courses = []
for course_id in course_ids:
print u"-" * 80
print u"Exporting course id = {0} to {1}".format(course_id, output_path)
try:
course_dir = course_id.to_deprecated_string().replace('/', '...')
export_course_to_xml(module_store, content_store, course_id, root_dir, course_dir)
except Exception as err: # pylint: disable=broad-except
failed_export_courses.append(unicode(course_id))
print u"=" * 30 + u"> Oops, failed to export {0}".format(course_id)
print u"Error:"
print err
return courses, failed_export_courses
|
agpl-3.0
|
Panos512/invenio
|
modules/bibcatalog/lib/bibcatalog_dblayer.py
|
18
|
1848
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibCatalog db layer."""
from invenio.dbquery import run_sql
def get_all_new_records(since, last_id):
"""
Get all the newly inserted records since last run.
"""
# Fetch all records inserted since last run
sql = "SELECT `id`, `creation_date` FROM `bibrec` " \
"WHERE `creation_date` >= %s " \
"AND `id` > %s " \
"ORDER BY `creation_date`"
return run_sql(sql, (since.isoformat(), last_id))
def get_all_modified_records(since, last_id):
"""
Get all the newly modified records since last run.
"""
sql = "SELECT `id`, `modification_date` FROM `bibrec` " \
"WHERE `modification_date` >= %s " \
"AND `id` > %s " \
"ORDER BY `modification_date`"
return run_sql(sql, (since.isoformat(), last_id))
def can_launch_bibupload(taskid):
"""
Checks if task can be launched.
"""
if taskid == 0:
return True
sql = 'SELECT status FROM schTASK WHERE id = %s'
if run_sql(sql, [str(taskid)])[0][0] != 'DONE':
return False
return True
|
gpl-2.0
|
davenovak/mtasa-blue
|
vendor/google-breakpad/src/tools/gyp/test/rules-rebuild/gyptest-default.py
|
345
|
2242
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a rule that generates multiple outputs rebuilds
correctly when the inputs change.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('same_target.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog1.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog1.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.sleep()
contents = test.read(['relocate', 'src', 'prog2.in'])
contents = contents.replace('!', ' AGAIN!')
test.write(['relocate', 'src', 'prog2.in'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from prog1.in AGAIN!
Hello from prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
# Test that modifying a rule's inputs (specifically, make-sources.py) causes
# the targets to be built.
test.sleep()
contents = test.read(['relocate', 'src', 'make-sources.py'])
contents = contents.replace('%s', 'the amazing %s')
test.write(['relocate', 'src', 'make-sources.py'], contents)
test.build('same_target.gyp', chdir='relocate/src')
expect = """\
Hello from main.c
Hello from the amazing prog1.in AGAIN!
Hello from the amazing prog2.in AGAIN!
"""
test.run_built_executable('program', chdir='relocate/src', stdout=expect)
test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
test.pass_test()
|
gpl-3.0
|
ThinkOpen-Solutions/odoo
|
openerp/addons/base/tests/test_base.py
|
127
|
33971
|
import unittest2
import openerp.tests.common as common
from openerp.osv.orm import except_orm
class test_base(common.TransactionCase):
def setUp(self):
super(test_base,self).setUp()
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.res_partner_title = self.registry('res.partner.title')
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
self.samples = [
('"Raoul Grosbedon" <[email protected]> ', 'Raoul Grosbedon', '[email protected]'),
('[email protected]', '', '[email protected]'),
('Raoul chirurgiens-dentistes.fr', 'Raoul chirurgiens-dentistes.fr', ''),
(" Raoul O'hara <[email protected]>", "Raoul O'hara", '[email protected]')
]
def test_00_res_partner_name_create(self):
cr, uid = self.cr, self.uid
parse = self.res_partner._parse_partner_name
for text, name, mail in self.samples:
self.assertEqual((name,mail), parse(text), 'Partner name parsing failed')
partner_id, dummy = self.res_partner.name_create(cr, uid, text)
partner = self.res_partner.browse(cr, uid, partner_id)
self.assertEqual(name or mail, partner.name, 'Partner name incorrect')
self.assertEqual(mail or False, partner.email, 'Partner email incorrect')
def test_10_res_partner_find_or_create(self):
cr,uid = self.cr, self.uid
email = self.samples[0][0]
partner_id, dummy = self.res_partner.name_create(cr, uid, email)
found_id = self.res_partner.find_or_create(cr, uid, email)
self.assertEqual(partner_id, found_id, 'find_or_create failed')
new_id = self.res_partner.find_or_create(cr, uid, self.samples[1][0])
self.assertTrue(new_id > partner_id, 'find_or_create failed - should have created new one')
new_id2 = self.res_partner.find_or_create(cr, uid, self.samples[2][0])
self.assertTrue(new_id2 > new_id, 'find_or_create failed - should have created new one again')
def test_15_res_partner_name_search(self):
cr,uid = self.cr, self.uid
for name, active in [
('"A Raoul Grosbedon" <[email protected]>', False),
('B Raoul chirurgiens-dentistes.fr', True),
("C Raoul O'hara <[email protected]>", True),
('[email protected]', True),
]:
partner_id, dummy = self.res_partner.name_create(cr, uid, name, context={'default_active': active})
partners = self.res_partner.name_search(cr, uid, 'Raoul')
self.assertEqual(len(partners), 2, 'Incorrect search number result for name_search')
partners = self.res_partner.name_search(cr, uid, 'Raoul', limit=1)
self.assertEqual(len(partners), 1, 'Incorrect search number result for name_search with a limit')
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_20_res_partner_address_sync(self):
cr, uid = self.cr, self.uid
ghoststep = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'GhostStep',
'is_company': True,
'street': 'Main Street, 10',
'phone': '123456789',
'email': '[email protected]',
'vat': 'BE0477472701',
'type': 'default'}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Denis Bladesmith <[email protected]>')[0])
self.assertEqual(p1.type, 'contact', 'Default type must be "contact"')
p1phone = '123456789#34'
p1.write({'phone': p1phone,
'parent_id': ghoststep.id,
'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
# turn off sync
p1street = 'Different street, 42'
p1.write({'street': p1street,
'use_parent_address': False})
p1.refresh(), ghoststep.refresh()
self.assertEqual(p1.street, p1street, 'Address fields must not be synced after turning sync off')
self.assertNotEqual(ghoststep.street, p1street, 'Parent address must never be touched')
# turn on sync again
p1.write({'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced again')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
# Modify parent, sync to children
ghoststreet = 'South Street, 25'
ghoststep.write({'street': ghoststreet})
p1.refresh()
self.assertEqual(p1.street, ghoststreet, 'Address fields must be synced automatically')
self.assertEqual(p1.phone, p1phone, 'Phone should not be synced')
self.assertEqual(p1.email, '[email protected]', 'Email should be preserved after sync')
p1street = 'My Street, 11'
p1.write({'street': p1street})
ghoststep.refresh()
self.assertEqual(ghoststep.street, ghoststreet, 'Touching contact should never alter parent')
def test_30_res_partner_first_contact_sync(self):
""" Test initial creation of company/contact pair where contact address gets copied to
company """
cr, uid = self.cr, self.uid
ironshield = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'IronShield')[0])
self.assertFalse(ironshield.is_company, 'Partners are not companies by default')
self.assertFalse(ironshield.use_parent_address, 'use_parent_address defaults to False')
self.assertEqual(ironshield.type, 'contact', 'Default type must be "contact"')
ironshield.write({'type': 'default'}) # force default type to double-check sync
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Isen Hardearth',
'street': 'Strongarm Avenue, 12',
'parent_id': ironshield.id}))
self.assertEquals(p1.type, 'contact', 'Default type must be "contact", not the copied parent type')
ironshield.refresh()
self.assertEqual(ironshield.street, p1.street, 'Address fields should be copied to company')
self.assertTrue(ironshield.is_company, 'Company flag should be turned on after first contact creation')
def test_40_res_partner_address_getc(self):
""" Test address_get address resolution mechanism: it should first go down through descendants,
stopping when encountering another is_copmany entity, then go up, stopping again at the first
is_company entity or the root ancestor and if nothing matches, it should use the provided partner
itself """
cr, uid = self.cr, self.uid
elmtree = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Elmtree')[0])
branch1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 1',
'parent_id': elmtree.id,
'is_company': True}))
leaf10 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 10',
'parent_id': branch1.id,
'type': 'invoice'}))
branch11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 11',
'parent_id': branch1.id,
'type': 'other'}))
leaf111 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 111',
'parent_id': branch11.id,
'type': 'delivery'}))
branch11.write({'is_company': False}) # force is_company after creating 1rst child
branch2 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 2',
'parent_id': elmtree.id,
'is_company': True}))
leaf21 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 21',
'parent_id': branch2.id,
'type': 'delivery'}))
leaf22 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 22',
'parent_id': branch2.id}))
leaf23 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 23',
'parent_id': branch2.id,
'type': 'default'}))
# go up, stop at branch1
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': leaf111.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch11.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch11.id}, 'Invalid address resolution')
# go down, stop at at all child companies
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': elmtree.id,
'invoice': elmtree.id,
'contact': elmtree.id,
'other': elmtree.id,
'default': elmtree.id}, 'Invalid address resolution')
# go down through children
self.assertEqual(self.res_partner.address_get(cr, uid, [branch1.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch1.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch2.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution')
# go up then down through siblings
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf21.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id
}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf22.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': leaf22.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf23.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, `default` should only override if no partner with specific type exists')
# empty adr_pref means only 'default'
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], []),
{'default': elmtree.id}, 'Invalid address resolution, no default means commercial entity ancestor')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': leaf111.id}, 'Invalid address resolution, no default means contact itself')
branch11.write({'type': 'default'})
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': branch11.id}, 'Invalid address resolution, branch11 should now be default')
def test_50_res_partner_commercial_sync(self):
cr, uid = self.cr, self.uid
p0 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sigurd Sunknife',
'email': '[email protected]'}))
sunhelm = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sunhelm',
'is_company': True,
'street': 'Rainbow Street, 13',
'phone': '1122334455',
'email': '[email protected]',
'vat': 'BE0477472701',
'child_ids': [(4, p0.id),
(0, 0, {'name': 'Alrik Greenthorn',
'email': '[email protected]'})],
}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Otto Blackwood',
'email': '[email protected]',
'parent_id': sunhelm.id}))
p11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Gini Graywool',
'email': '[email protected]',
'parent_id': p1.id}))
p2 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', '[email protected]')])[0])
self.res_partner.write(cr, uid, sunhelm.id, {'child_ids': [(0, 0, {'name': 'Ulrik Greenthorn',
'email': '[email protected]'})]})
p3 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', '[email protected]')])[0])
for p in (p0, p1, p11, p2, p3):
p.refresh()
self.assertEquals(p.commercial_partner_id, sunhelm, 'Incorrect commercial entity resolution')
self.assertEquals(p.vat, sunhelm.vat, 'Commercial fields must be automatically synced')
sunhelmvat = 'BE0123456789'
sunhelm.write({'vat': sunhelmvat})
for p in (p0, p1, p11, p2, p3):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Commercial fields must be automatically and recursively synced')
p1vat = 'BE0987654321'
p1.write({'vat': p1vat})
for p in (sunhelm, p0, p11, p2, p3):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Sync to children should only work downstream and on commercial entities')
# promote p1 to commercial entity
vals = p1.onchange_type(is_company=True)['value']
p1.write(dict(vals, parent_id=sunhelm.id,
is_company=True,
name='Sunhelm Subsidiary'))
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEquals(p1.commercial_partner_id, p1, 'Incorrect commercial entity resolution after setting is_company')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233445'
sunhelm.write({'vat': sunhelmvat2})
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
p0.refresh()
self.assertEquals(p0.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_60_read_group(self):
cr, uid = self.cr, self.uid
title_sir = self.res_partner_title.create(cr, uid, {'name': 'Sir', 'domain': 'contact'})
title_lady = self.res_partner_title.create(cr, uid, {'name': 'Lady', 'domain': 'contact'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady},
]
ids = [self.res_users.create(cr, uid, u) for u in test_users]
domain = [('id', 'in', ids)]
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = self.res_users.read_group(cr, uid, [], fields=['login'], groupby=['login'], orderby='login DESC', context={'active_test': False})
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = self.res_users.read_group(cr, uid, domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
# group on inherited char field, reverse order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
class test_partner_recursion(common.TransactionCase):
def setUp(self):
super(test_partner_recursion,self).setUp()
self.res_partner = self.registry('res.partner')
cr, uid = self.cr, self.uid
self.p1 = self.res_partner.name_create(cr, uid, 'Elmtree')[0]
self.p2 = self.res_partner.create(cr, uid, {'name': 'Elmtree Child 1', 'parent_id': self.p1})
self.p3 = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.1', 'parent_id': self.p2})
# split 101, 102, 103 tests to force SQL rollback between them
def test_101_res_partner_recursion(self):
cr, uid, p1, p3 = self.cr, self.uid, self.p1, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p1], {'parent_id': p3})
def test_102_res_partner_recursion(self):
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2], {'parent_id': p3})
def test_103_res_partner_recursion(self):
cr, uid, p3 = self.cr, self.uid, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p3], {'parent_id': p3})
def test_104_res_partner_recursion_indirect_cycle(self):
""" Indirect hacky write to create cycle in children """
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
p3b = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.2', 'parent_id': self.p2})
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2],
{'child_ids': [(1, p3, {'parent_id': p3b}), (1, p3b, {'parent_id': p3})]})
def test_110_res_partner_recursion_multi_update(self):
""" multi-write on several partners in same hierarchy must not trigger a false cycle detection """
cr, uid, p1, p2, p3 = self.cr, self.uid, self.p1, self.p2, self.p3
self.assertTrue(self.res_partner.write(cr, uid, [p1,p2,p3], {'phone': '123456'}))
class test_translation(common.TransactionCase):
def setUp(self):
super(test_translation, self).setUp()
self.res_category = self.registry('res.partner.category')
self.ir_translation = self.registry('ir.translation')
cr, uid = self.cr, self.uid
self.registry('ir.translation').load_module_terms(cr, ['base'], ['fr_FR'])
self.cat_id = self.res_category.create(cr, uid, {'name': 'Customers'})
self.ir_translation.create(cr, uid, {'name': 'res.partner.category,name', 'module':'base',
'value': 'Clients', 'res_id': self.cat_id, 'lang':'fr_FR', 'state':'translated', 'type': 'model'})
def test_101_create_translated_record(self):
cr, uid = self.cr, self.uid
no_context_cat = self.res_category.browse(cr, uid, self.cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Error in basic name_get")
fr_context_cat = self.res_category.browse(cr, uid, self.cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
cr, uid = self.cr, self.uid
self.new_cat_id = self.res_category.copy(cr, uid, self.cat_id, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication did not set untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
cr, uid = self.cr, self.uid
self.new_fr_cat_id = self.res_category.copy(cr, uid, self.cat_id, default={'name': 'Clients (copie)'}, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication erased original untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients (copie)', "Did not used default value for translated value")
test_state = None
#: Stores state information across multiple test classes
def setUpModule():
global test_state
test_state = {}
def tearDownModule():
global test_state
test_state = None
class TestPhaseInstall00(unittest2.TestCase):
"""
WARNING: Relies on tests being run in alphabetical order
"""
@classmethod
def setUpClass(cls):
cls.state = None
def test_00_setup(self):
type(self).state = 'init'
@common.at_install(False)
def test_01_no_install(self):
type(self).state = 'error'
def test_02_check(self):
self.assertEqual(
self.state, 'init',
"Testcase state should not have been transitioned from 00")
class TestPhaseInstall01(unittest2.TestCase):
at_install = False
def test_default_norun(self):
self.fail("An unmarket test in a non-at-install case should not run")
@common.at_install(True)
def test_set_run(self):
test_state['set_at_install'] = True
class TestPhaseInstall02(unittest2.TestCase):
"""
Can't put the check for test_set_run in the same class: if
@common.at_install does not work for test_set_run, it won't work for
the other one either. Thus move checking of whether test_set_run has
correctly run indeed to a separate class.
Warning: relies on *classes* being run in alphabetical order in test
modules
"""
def test_check_state(self):
self.assertTrue(
test_state.get('set_at_install'),
"The flag should be set if local overriding of runstate")
if __name__ == '__main__':
unittest2.main()
|
agpl-3.0
|
0111001101111010/hyde
|
hyde/tests/ext/test_requirejs.py
|
1
|
1184
|
# -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.generator import Generator
from hyde.site import Site
from fswrap import File, Folder
RJS_SOURCE = File(__file__).parent.child_folder('requirejs')
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestRequireJS(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder('sites/test_jinja').copy_contents_to(TEST_SITE)
RJS_SOURCE.copy_contents_to(TEST_SITE.child('content/media/js'))
File(TEST_SITE.child('content/media/js/app.js')).delete()
def tearDown(self):
TEST_SITE.delete()
def test_can_execute_rjs(self):
s = Site(TEST_SITE)
s.config.plugins = ['hyde.ext.plugins.js.RequireJSPlugin']
source = TEST_SITE.child('content/media/js/rjs.conf')
target = File(Folder(s.config.deploy_root_path).child('media/js/app.js'))
gen = Generator(s)
gen.generate_resource_at_path(source)
assert target.exists
text = target.read_all()
expected_text = File(RJS_SOURCE.child('app.js')).read_all()
assert text == expected_text
return
|
mit
|
Sentient07/svgpng
|
svgpng/demo/views.py
|
1
|
1741
|
# Create your views here.
from django import forms
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
import math
import cairocffi as cairo
import io
from cairosvg import svg2png
WIDTH, HEIGHT = 256, 256
class TestForm(forms.Form):
pass
def home(request):
title = "SVG Title"
body = svg(WIDTH, HEIGHT)
if request.method == 'POST':
form = TestForm(request.POST)
if form.is_valid():
response = HttpResponse(mimetype="image/png")
response['Content-Disposition'] = 'attachment; filename="svg.png"'
response.write(svg2png(bytestring=body, write_to=None))
return response
else:
form = TestForm()
return render_to_response('home.html',
{'form': form, 'svg': {'title': title,
'body': body}},
context_instance=RequestContext(request))
def svg(width, height):
imageData = io.BytesIO()
surface = cairo.SVGSurface(imageData, width, height)
ctx = cairo.Context(surface)
ctx.scale(width, height)
pat = cairo.LinearGradient(0.0, 0.0, 0.0, 1.0)
pat.add_color_stop_rgba(1, 0.7, 0, 0, 0.5)
pat.add_color_stop_rgba(0, 0.9, 0.7, 0.2, 1)
ctx.rectangle(0, 0, 1, 1)
ctx.set_source(pat)
ctx.fill()
ctx.translate(0.1, 0.1)
ctx.move_to(0, 0)
ctx.arc(0.2, 0.1, 0.1, -math.pi/2, 0)
ctx.line_to(0.5, 0.1)
ctx.curve_to(0.5, 0.2, 0.5, 0.4, 0.2, 0.8)
ctx.close_path()
ctx.set_source_rgb(0.3, 0.2, 0.5)
ctx.set_line_width(0.02)
ctx.stroke()
surface.finish()
return imageData.getvalue()
|
mit
|
AngelinaScheck/BachelorBioinfo
|
libsvm-master/python/svmutil.py
|
64
|
8695
|
#!/usr/bin/env python
import os
import sys
from svm import *
from svm import __all__ as svm_all
__all__ = ['evaluations', 'svm_load_model', 'svm_predict', 'svm_read_problem',
'svm_save_model', 'svm_train'] + svm_all
sys.path = [os.path.dirname(os.path.abspath(__file__))] + sys.path
def svm_read_problem(data_file_name):
"""
svm_read_problem(data_file_name) -> [y, x]
Read LIBSVM-format data from data_file_name and return labels y
and data instances x.
"""
prob_y = []
prob_x = []
for line in open(data_file_name):
line = line.split(None, 1)
# In case an instance with all zero features
if len(line) == 1: line += ['']
label, features = line
xi = {}
for e in features.split():
ind, val = e.split(":")
xi[int(ind)] = float(val)
prob_y += [float(label)]
prob_x += [xi]
return (prob_y, prob_x)
def svm_load_model(model_file_name):
"""
svm_load_model(model_file_name) -> model
Load a LIBSVM model from model_file_name and return.
"""
model = libsvm.svm_load_model(model_file_name.encode())
if not model:
print("can't open model file %s" % model_file_name)
return None
model = toPyModel(model)
return model
def svm_save_model(model_file_name, model):
"""
svm_save_model(model_file_name, model) -> None
Save a LIBSVM model to the file model_file_name.
"""
libsvm.svm_save_model(model_file_name.encode(), model)
def evaluations(ty, pv):
"""
evaluations(ty, pv) -> (ACC, MSE, SCC)
Calculate accuracy, mean squared error and squared correlation coefficient
using the true values (ty) and predicted values (pv).
"""
if len(ty) != len(pv):
raise ValueError("len(ty) must equal to len(pv)")
total_correct = total_error = 0
sumv = sumy = sumvv = sumyy = sumvy = 0
for v, y in zip(pv, ty):
if y == v:
total_correct += 1
total_error += (v-y)*(v-y)
sumv += v
sumy += y
sumvv += v*v
sumyy += y*y
sumvy += v*y
l = len(ty)
ACC = 100.0*total_correct/l
MSE = total_error/l
try:
SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy))
except:
SCC = float('nan')
return (ACC, MSE, SCC)
def svm_train(arg1, arg2=None, arg3=None):
"""
svm_train(y, x [, options]) -> model | ACC | MSE
svm_train(prob [, options]) -> model | ACC | MSE
svm_train(prob, param) -> model | ACC| MSE
Train an SVM model from data (y, x) or an svm_problem prob using
'options' or an svm_parameter param.
If '-v' is specified in 'options' (i.e., cross validation)
either accuracy (ACC) or mean-squared error (MSE) is returned.
options:
-s svm_type : set type of SVM (default 0)
0 -- C-SVC (multi-class classification)
1 -- nu-SVC (multi-class classification)
2 -- one-class SVM
3 -- epsilon-SVR (regression)
4 -- nu-SVR (regression)
-t kernel_type : set type of kernel function (default 2)
0 -- linear: u'*v
1 -- polynomial: (gamma*u'*v + coef0)^degree
2 -- radial basis function: exp(-gamma*|u-v|^2)
3 -- sigmoid: tanh(gamma*u'*v + coef0)
4 -- precomputed kernel (kernel values in training_set_file)
-d degree : set degree in kernel function (default 3)
-g gamma : set gamma in kernel function (default 1/num_features)
-r coef0 : set coef0 in kernel function (default 0)
-c cost : set the parameter C of C-SVC, epsilon-SVR, and nu-SVR (default 1)
-n nu : set the parameter nu of nu-SVC, one-class SVM, and nu-SVR (default 0.5)
-p epsilon : set the epsilon in loss function of epsilon-SVR (default 0.1)
-m cachesize : set cache memory size in MB (default 100)
-e epsilon : set tolerance of termination criterion (default 0.001)
-h shrinking : whether to use the shrinking heuristics, 0 or 1 (default 1)
-b probability_estimates : whether to train a SVC or SVR model for probability estimates, 0 or 1 (default 0)
-wi weight : set the parameter C of class i to weight*C, for C-SVC (default 1)
-v n: n-fold cross validation mode
-q : quiet mode (no outputs)
"""
prob, param = None, None
if isinstance(arg1, (list, tuple)):
assert isinstance(arg2, (list, tuple))
y, x, options = arg1, arg2, arg3
param = svm_parameter(options)
prob = svm_problem(y, x, isKernel=(param.kernel_type == PRECOMPUTED))
elif isinstance(arg1, svm_problem):
prob = arg1
if isinstance(arg2, svm_parameter):
param = arg2
else:
param = svm_parameter(arg2)
if prob == None or param == None:
raise TypeError("Wrong types for the arguments")
if param.kernel_type == PRECOMPUTED:
for xi in prob.x_space:
idx, val = xi[0].index, xi[0].value
if xi[0].index != 0:
raise ValueError('Wrong input format: first column must be 0:sample_serial_number')
if val <= 0 or val > prob.n:
raise ValueError('Wrong input format: sample_serial_number out of range')
if param.gamma == 0 and prob.n > 0:
param.gamma = 1.0 / prob.n
libsvm.svm_set_print_string_function(param.print_func)
err_msg = libsvm.svm_check_parameter(prob, param)
if err_msg:
raise ValueError('Error: %s' % err_msg)
if param.cross_validation:
l, nr_fold = prob.l, param.nr_fold
target = (c_double * l)()
libsvm.svm_cross_validation(prob, param, nr_fold, target)
ACC, MSE, SCC = evaluations(prob.y[:l], target[:l])
if param.svm_type in [EPSILON_SVR, NU_SVR]:
print("Cross Validation Mean squared error = %g" % MSE)
print("Cross Validation Squared correlation coefficient = %g" % SCC)
return MSE
else:
print("Cross Validation Accuracy = %g%%" % ACC)
return ACC
else:
m = libsvm.svm_train(prob, param)
m = toPyModel(m)
# If prob is destroyed, data including SVs pointed by m can remain.
m.x_space = prob.x_space
return m
def svm_predict(y, x, m, options=""):
"""
svm_predict(y, x, m [, options]) -> (p_labels, p_acc, p_vals)
Predict data (y, x) with the SVM model m.
options:
-b probability_estimates: whether to predict probability estimates,
0 or 1 (default 0); for one-class SVM only 0 is supported.
-q : quiet mode (no outputs).
The return tuple contains
p_labels: a list of predicted labels
p_acc: a tuple including accuracy (for classification), mean-squared
error, and squared correlation coefficient (for regression).
p_vals: a list of decision values or probability estimates (if '-b 1'
is specified). If k is the number of classes, for decision values,
each element includes results of predicting k(k-1)/2 binary-class
SVMs. For probabilities, each element contains k values indicating
the probability that the testing instance is in each class.
Note that the order of classes here is the same as 'model.label'
field in the model structure.
"""
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while i < len(argv):
if argv[i] == '-b':
i += 1
predict_probability = int(argv[i])
elif argv[i] == '-q':
info = print_null
else:
raise ValueError("Wrong options")
i+=1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if not is_prob_model:
raise ValueError("Model does not support probabiliy estimates")
if svm_type in [NU_SVR, EPSILON_SVR]:
info("Prob. model for test data: target value = predicted value + z,\n"
"z: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g" % m.get_svr_probability());
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info("Model supports probability estimates, but disabled in predicton.")
if svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC):
nr_classifier = 1
else:
nr_classifier = nr_class*(nr_class-1)//2
dec_values = (c_double * nr_classifier)()
for xi in x:
xi, idx = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if(nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
ACC, MSE, SCC = evaluations(y, pred_labels)
l = len(y)
if svm_type in [EPSILON_SVR, NU_SVR]:
info("Mean squared error = %g (regression)" % MSE)
info("Squared correlation coefficient = %g (regression)" % SCC)
else:
info("Accuracy = %g%% (%d/%d) (classification)" % (ACC, int(l*ACC/100), l))
return pred_labels, (ACC, MSE, SCC), pred_values
|
bsd-3-clause
|
prasannav7/ggrc-core
|
src/ggrc/converters/query_helper.py
|
3
|
10819
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
import datetime
import collections
from sqlalchemy import and_
from sqlalchemy import not_
from sqlalchemy import or_
from ggrc.models.custom_attribute_value import CustomAttributeValue
from ggrc.models.reflection import AttributeInfo
from ggrc.models.relationship_helper import RelationshipHelper
from ggrc.converters import get_exportables
class BadQueryException(Exception):
pass
class QueryHelper(object):
""" Helper class for handling request queries
Primary use for this class is to get list of object ids for each object
defined in the query. All object ids must pass the query filters if they
are defined.
query object = [
{
object_name: search class name,
filters: {
relevant_filters:
these filters will return all ids of the "search class name" object
that are mapped to objects defined in the dictionary insde the list.
[ list of filters joined by OR expression
[ list of filters joined by AND expression
{
"object_name": class of relevant object,
"slugs": list of relevant object slugs,
optional and if exists will be converted into ids
"ids": list of relevant object ids
}
]
],
object_filters: {
TODO: allow filtering by title, description and other object fields
}
}
}
]
"""
def __init__(self, query):
importable = get_exportables()
self.object_map = {o.__name__: o for o in importable.values()}
self.query = self.clean_query(query)
self.set_attr_name_map()
def set_attr_name_map(self):
""" build a map for attributes names and display names
Dict containing all display_name to attr_name mappings
for all objects used in the current query
Example:
{ Program: {"Program URL": "url", "Code": "slug", ...} ...}
"""
self.attr_name_map = {}
for object_query in self.query:
object_name = object_query["object_name"]
object_class = self.object_map[object_name]
aliases = AttributeInfo.gather_aliases(object_class)
self.attr_name_map[object_class] = {}
for key, value in aliases.items():
filter_by = None
if type(value) is dict:
filter_name = value.get("filter_by", None)
if filter_name is not None:
filter_by = getattr(object_class, filter_name, None)
value = value["display_name"]
if value:
self.attr_name_map[object_class][value.lower()] = (key.lower(),
filter_by)
custom_attrs = AttributeInfo.get_custom_attr_definitions(object_class)
for key, definition in custom_attrs.items():
if not key.startswith("__custom__:") or \
"display_name" not in definition:
continue
try:
attr_id = int(key[11:])
except Exception:
continue
filter_by = CustomAttributeValue.mk_filter_by_custom(object_class,
attr_id)
name = definition["display_name"].lower()
self.attr_name_map[object_class][name] = (name, filter_by)
def clean_query(self, query):
""" sanitize the query object """
for object_query in query:
filters = object_query.get("filters", {}).get("expression")
self.clean_filters(filters)
self.macro_expand_object_query(object_query)
return query
def clean_filters(self, expression):
""" prepair the filter expression for building the query """
if not expression or type(expression) != dict:
return
slugs = expression.get("slugs")
if slugs:
ids = expression.get("ids", [])
ids.extend(self.slugs_to_ids(expression["object_name"], slugs))
expression["ids"] = ids
try:
expression["ids"] = map(int, expression.get("ids", []))
except ValueError as e:
# catch missing relevant filter (undefined id)
if expression.get("op", {}).get("name", "") == "relevant":
raise BadQueryException("Invalid relevant filter for {}".format(
expression.get("object_name", "")))
raise e
self.clean_filters(expression.get("left"))
self.clean_filters(expression.get("right"))
def expression_keys(self, exp):
op = exp.get("op", {}).get("name", None)
if op in ["AND", "OR"]:
return self.expression_keys(exp["left"]).union(
self.expression_keys(exp["right"]))
left = exp.get("left", None)
if left is not None and isinstance(left, collections.Hashable):
return set([left])
else:
return set()
def macro_expand_object_query(self, object_query):
def expand_task_dates(exp):
if type(exp) is not dict or "op" not in exp:
return
op = exp["op"]["name"]
if op in ["AND", "OR"]:
expand_task_dates(exp["left"])
expand_task_dates(exp["right"])
elif type(exp["left"]) in [str, unicode]:
key = exp["left"]
if key in ["start", "end"]:
parts = exp["right"].split("/")
if len(parts) == 3:
try:
month, day, year = map(int, parts)
except Exception:
raise BadQueryException("Date must consist of numbers")
exp["left"] = key + "_date"
exp["right"] = datetime.date(year, month, day)
elif len(parts) == 2:
month, day = parts
exp["op"] = {"name": u"AND"}
exp["left"] = {
"op": {"name": op},
"left": "relative_" + key + "_month",
"right": month,
}
exp["right"] = {
"op": {"name": op},
"left": "relative_" + key + "_day",
"right": day,
}
elif len(parts) == 1:
exp["left"] = "relative_" + key + "_day"
else:
raise BadQueryException("Field {} should be a date of one of the"
" following forms: DD, MM/DD, MM/DD/YYYY"
.format(key))
if object_query["object_name"] == "TaskGroupTask":
filters = object_query.get("filters")
if filters is not None:
exp = filters["expression"]
keys = self.expression_keys(exp)
if "start" in keys or "end" in keys:
expand_task_dates(exp)
def get_ids(self):
""" get list of objects and their ids according to the query
Returns:
list of dicts: same query as the input with all ids that match the filter
"""
for object_query in self.query:
object_query["ids"] = self.get_object_ids(object_query)
return self.query
def get_object_ids(self, object_query):
""" get a set of object ids described in the filters """
object_name = object_query["object_name"]
expression = object_query.get("filters", {}).get("expression")
if expression is None:
return set()
object_class = self.object_map[object_name]
def autocast(o_key, value):
if type(o_key) not in [str, unicode]:
return value
key, _ = self.attr_name_map[object_class].get(o_key, (o_key, None))
# handle dates
if ("date" in key and "relative" not in key) or \
key in ["due_on", "requested_on"]:
if isinstance(value, datetime.date):
return value
try:
month, day, year = map(int, value.split("/"))
return datetime.date(year, month, day)
except Exception:
raise BadQueryException("Field \"{}\" expects a MM/DD/YYYY date"
.format(o_key))
# fallback
return value
def build_expression(exp):
if "op" not in exp:
return None
def relevant():
query = (self.query[exp["ids"][0]]
if exp["object_name"] == "__previous__" else exp)
return object_class.id.in_(
RelationshipHelper.get_ids_related_to(
object_name,
query["object_name"],
query["ids"],
)
)
def unknown():
raise BadQueryException("Unknown operator \"{}\""
.format(exp["op"]["name"]))
def with_key(key, p):
key = key.lower()
key, filter_by = self.attr_name_map[object_class].get(key, (key, None))
if hasattr(filter_by, "__call__"):
return filter_by(p)
else:
attr = getattr(object_class, key, None)
if attr is None:
raise BadQueryException("Bad query: object '{}' does "
"not have attribute '{}'."
.format(object_class.__name__, key))
return p(attr)
with_left = lambda p: with_key(exp["left"], p)
lift_bin = lambda f: f(build_expression(exp["left"]),
build_expression(exp["right"]))
def text_search():
existing_fields = self.attr_name_map[object_class]
text = "%{}%".format(exp["text"])
p = lambda f: f.ilike(text)
return or_(*(
with_key(field, p)
for field in object_query.get("fields", [])
if field in existing_fields
))
rhs = lambda: autocast(exp["left"], exp["right"])
ops = {
"AND": lambda: lift_bin(and_),
"OR": lambda: lift_bin(or_),
"=": lambda: with_left(lambda l: l == rhs()),
"!=": lambda: not_(with_left(
lambda l: l == rhs())),
"~": lambda: with_left(lambda l:
l.ilike("%{}%".format(rhs()))),
"!~": lambda: not_(with_left(
lambda l: l.ilike("%{}%".format(rhs())))),
"<": lambda: with_left(lambda l: l < rhs()),
">": lambda: with_left(lambda l: l > rhs()),
"relevant": relevant,
"text_search": text_search
}
return ops.get(exp["op"]["name"], unknown)()
query = object_class.query
filter_expression = build_expression(expression)
if filter_expression is not None:
query = query.filter(filter_expression)
object_ids = [o.id for o in query.all()]
return object_ids
def slugs_to_ids(self, object_name, slugs):
object_class = self.object_map.get(object_name)
if not object_class:
return []
ids = [c.id for c in object_class.query.filter(
object_class.slug.in_(slugs)).all()]
return ids
|
apache-2.0
|
danirus/django-comments-xtd
|
django_comments_xtd/tests/test_models.py
|
1
|
31353
|
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from datetime import datetime
from django.db.models.signals import pre_save
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import TestCase as DjangoTestCase
from django_comments_xtd import get_model
from django_comments_xtd.models import (XtdComment,
MaxThreadLevelExceededException,
publish_or_unpublish_on_pre_save)
from django_comments_xtd.tests.models import Article, Diary, MyComment
class ArticleBaseTestCase(DjangoTestCase):
def setUp(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September...")
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October...")
class XtdCommentManagerTestCase(ArticleBaseTestCase):
def setUp(self):
super(XtdCommentManagerTestCase, self).setUp()
self.article_ct = ContentType.objects.get(app_label="tests",
model="article")
self.site1 = Site.objects.get(pk=1)
self.site2 = Site.objects.create(domain='site2.com', name='site2.com')
def post_comment_1(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site1,
comment="just a testing comment",
submit_date=datetime.now())
def post_comment_2(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="yet another comment",
submit_date=datetime.now())
def post_comment_3(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="and another one",
submit_date=datetime.now())
def post_comment_4(self):
XtdComment.objects.create(content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site2,
comment="just a testing comment in site2",
submit_date=datetime.now())
def test_for_app_models(self):
# there is no comment posted yet to article_1 nor article_2
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 0)
self.post_comment_1()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 1)
self.post_comment_2()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 2)
self.post_comment_3()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 3)
self.post_comment_4()
count = XtdComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 4)
def test_multi_site_for_app_models(self):
self.post_comment_1() # To site1.
self.post_comment_4() # To site2.
count_site1 = XtdComment.objects.for_app_models("tests.article",
site=self.site1).count()
self.assertEqual(count_site1, 1)
count_site2 = XtdComment.objects.for_app_models("tests.article",
site=self.site2).count()
self.assertEqual(count_site2, 1)
# In order to test 'save' and '_calculate_thread_data' methods, simulate the
# following threads, in order of arrival:
#
# testcase cmt.id parent level-0 level-1 level-2 level-3
# step1 1 - c1 <- c1
# step1 2 - c2 <- c2
# step2 3 1 -- c3 <- c3.c1
# step2 4 1 -- c4 <- c4.c1
# step3 5 2 -- c5 <- c5.c2
# step4 6 5 -- -- c6 <- c6.c5.c2
# step4 7 4 -- -- c7 <- c7.c4.c1
# step5 8 3 -- -- c8 <- c8.c3.c1
# step5 9 - c9 <- c9
# step6 10 7 c10 <- c10.c7.c4.c1
# step6 11 8 c11 <- c11.c8.c4.c1
def thread_test_step_1(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 1 with parent_id 0
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c1",
submit_date=datetime.now(),
**kwargs)
# post Comment 2 with parent_id 0
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c2",
submit_date=datetime.now(),
**kwargs)
def thread_test_step_2(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 3 to parent_id 1
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c3.c1",
submit_date=datetime.now(),
parent_id=1,
**kwargs)
# post Comment 4 to parent_id 1
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c4.c1",
submit_date=datetime.now(),
parent_id=1,
**kwargs)
def thread_test_step_3(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 5 to parent_id 2
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c5.c2",
submit_date=datetime.now(),
parent_id=2)
def thread_test_step_4(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 6 to parent_id 5
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c6.c5.c2",
submit_date=datetime.now(),
parent_id=5)
# post Comment 7 to parent_id 4
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c7.c4.c1",
submit_date=datetime.now(),
parent_id=4)
def thread_test_step_5(article):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
# post Comment 8 to parent_id 3
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c8.c3.c1",
submit_date=datetime.now(),
parent_id=3)
# post Comment 9 with parent_id 0
XtdComment.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
site=site,
comment="c3",
submit_date=datetime.now())
def thread_test_step_6(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 10 to parent_id 7
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c10.c7.c4.c1",
submit_date=datetime.now(),
parent_id=7,
**kwargs)
# post Comment 11 to parent_id 8
model.objects.create(content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c11.c8.c3.c1",
submit_date=datetime.now(),
parent_id=8,
**kwargs)
class BaseThreadStep1TestCase(ArticleBaseTestCase):
def setUp(self):
super(BaseThreadStep1TestCase, self).setUp()
thread_test_step_1(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 0
self.c2 # -> 2 2 2 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_1_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 0)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
class ThreadStep2TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep2TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2 # -> 2 2 2 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_2_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
def test_threaded_comments_step_2_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
class ThreadStep3TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep3TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
( # -> content: cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2, # -> 2 2 2 0 1 1
self.c5 # -> 5 2 2 1 2 0
) = XtdComment.objects.all()
def test_threaded_comments_step_3_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 1)
def test_threaded_comments_step_3_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 0)
class ThreadStep4TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep4TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 3
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 1
self.c7, # -> 7 1 4 2 4 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6 # -> 6 2 5 2 3 0
) = XtdComment.objects.all()
def test_threaded_comments_step_4_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 3)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
def test_threaded_comments_step_4_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_4_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 4)
self.assertEqual(self.c7.nested_count, 0)
class ThreadStep5TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep5TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 4
self.c3, # -> 3 1 1 1 2 1
self.c8, # -> 8 1 3 2 3 0
self.c4, # -> 4 1 1 1 4 1
self.c7, # -> 7 1 4 2 5 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
self.c9 # -> 9 9 9 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_5_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 4)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_5_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 1)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 4) # changed
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_5_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 5) # changed
self.assertEqual(self.c7.nested_count, 0)
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 0)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_MAX_THREAD_LEVEL=2)
def test_exceed_max_thread_level_raises_exception(self):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type=article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=site,
comment="cmt 1 to cmt 2 to cmt 1",
submit_date=datetime.now(),
parent_id=8) # already max thread level
def test_removing_c4_withdraws_c7_and_updates_nested_count(self):
cm4 = XtdComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 4)
# Remove comment 4, save, and check again.
cm4.is_removed = True
cm4.save()
cm4 = XtdComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 3)
class ThreadStep6TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep6TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
thread_test_step_6(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 6
self.c3, # -> 3 1 1 1 2 2
self.c8, # -> 8 1 3 2 3 1
self.c11, # -> 11 1 8 3 4 0
self.c4, # -> 4 1 1 1 5 2
self.c7, # -> 7 1 4 2 6 1
self.c10, # -> 10 1 7 3 7 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
self.c9 # -> 9 9 9 0 1 0
) = XtdComment.objects.all()
def test_threaded_comments_step_6_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 6)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_6_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 2)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 5)
self.assertEqual(self.c4.nested_count, 2)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_6_level_2(self):
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 1)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 6)
self.assertEqual(self.c7.nested_count, 1)
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
def test_threaded_comments_step_6_level_3(self):
# comment 10
self.assertTrue(self.c10.parent_id == 7 and self.c10.thread_id == 1)
self.assertTrue(self.c10.level == 3 and self.c10.order == 7)
self.assertEqual(self.c10.nested_count, 0)
# comment 11
self.assertTrue(self.c11.parent_id == 8 and self.c11.thread_id == 1)
self.assertTrue(self.c11.level == 3 and self.c11.order == 4)
self.assertEqual(self.c11.nested_count, 0)
def add_comment_to_diary_entry(diary_entry):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
XtdComment.objects.create(content_type=diary_ct,
object_pk=diary_entry.id,
content_object=diary_entry,
site=site,
comment="cmt to day in diary",
submit_date=datetime.now())
class DiaryBaseTestCase(DjangoTestCase):
def setUp(self):
self.day_in_diary = Diary.objects.create(body="About Today...")
add_comment_to_diary_entry(self.day_in_diary)
def test_max_thread_level_by_app_model(self):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
XtdComment.objects.create(content_type=diary_ct,
object_pk=self.day_in_diary.id,
content_object=self.day_in_diary,
site=site,
comment="cmt to cmt to day in diary",
submit_date=datetime.now(),
parent_id=1) # already max thread level
class PublishOrUnpublishNestedComments_1_TestCase(ArticleBaseTestCase):
# Add a threaded comment structure (c1, c2, c3) and verify that
# removing c1 unpublishes c3.
def setUp(self):
super(PublishOrUnpublishNestedComments_1_TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = XtdComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in XtdComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
def test_removing_c1_unpublishes_c3_and_c4(self):
cm1 = XtdComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 2) # nested_count should be 2.
cm1.is_removed = True
cm1.save()
cm1 = XtdComment.objects.get(pk=1)
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
# Is still public, so the nested_count doesn't change.
self.assertEqual(cm1.nested_count, 2)
cm3 = XtdComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = XtdComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
_model = "django_comments_xtd.tests.models.MyComment"
class PublishOrUnpublishNestedComments_2_TestCase(ArticleBaseTestCase):
# Then mock the settings so that the project uses a customized
# comment model (django_comments_xtd.tests.models.MyComment), and repeat
# the logic adding MyComment instances. Then remove c1 and be sure
# that c3 gets unpublished.
def setUp(self):
super(PublishOrUnpublishNestedComments_2_TestCase, self).setUp()
thread_test_step_1(self.article_1, model=MyComment,
title="Can't be empty 1")
thread_test_step_2(self.article_1, model=MyComment,
title="Can't be empty 2")
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = MyComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in MyComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
@patch.multiple('django_comments_xtd.conf.settings',
COMMENTS_XTD_MODEL=_model)
def test_removing_c1_unpublishes_c3_and_c4(self):
# Register the receiver again. It was registered in apps.py, but we
# have patched the COMMENTS_XTD_MODEL, however we won't fake the ready.
# It's easier to just register again the receiver, to test only what
# depends on django-comments-xtd.
model_app_label = get_model()._meta.label
pre_save.connect(publish_or_unpublish_on_pre_save,
sender=model_app_label)
cm1 = MyComment.objects.get(pk=1)
cm1.is_removed = True
cm1.save()
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
cm3 = MyComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = MyComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
|
bsd-2-clause
|
jonypx09/new_kernel_kylessopen
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
caio2k/kernel-n9
|
scripts/tracing/draw_functrace.py
|
14676
|
3560
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
gpl-2.0
|
ramiro/scrapy
|
scrapy/commands/settings.py
|
33
|
1511
|
from __future__ import print_function
from scrapy.command import ScrapyCommand
class Command(ScrapyCommand):
requires_project = False
default_settings = {'LOG_ENABLED': False}
def syntax(self):
return "[options]"
def short_desc(self):
return "Get settings values"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--get", dest="get", metavar="SETTING", \
help="print raw setting value")
parser.add_option("--getbool", dest="getbool", metavar="SETTING", \
help="print setting value, intepreted as a boolean")
parser.add_option("--getint", dest="getint", metavar="SETTING", \
help="print setting value, intepreted as an integer")
parser.add_option("--getfloat", dest="getfloat", metavar="SETTING", \
help="print setting value, intepreted as an float")
parser.add_option("--getlist", dest="getlist", metavar="SETTING", \
help="print setting value, intepreted as an float")
def run(self, args, opts):
settings = self.crawler_process.settings
if opts.get:
print(settings.get(opts.get))
elif opts.getbool:
print(settings.getbool(opts.getbool))
elif opts.getint:
print(settings.getint(opts.getint))
elif opts.getfloat:
print(settings.getfloat(opts.getfloat))
elif opts.getlist:
print(settings.getlist(opts.getlist))
|
bsd-3-clause
|
Leoniela/nipype
|
nipype/interfaces/camino/tests/test_auto_LinRecon.py
|
9
|
1320
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.camino.odf import LinRecon
def test_LinRecon_inputs():
input_map = dict(args=dict(argstr='%s',
),
bgmask=dict(argstr='-bgmask %s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=1,
),
log=dict(argstr='-log',
),
normalize=dict(argstr='-normalize',
),
out_file=dict(argstr='> %s',
genfile=True,
position=-1,
),
qball_mat=dict(argstr='%s',
mandatory=True,
position=3,
),
scheme_file=dict(argstr='%s',
mandatory=True,
position=2,
),
terminal_output=dict(nohash=True,
),
)
inputs = LinRecon.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_LinRecon_outputs():
output_map = dict(recon_data=dict(),
)
outputs = LinRecon.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
bsd-3-clause
|
MattsFleaMarket/python-for-android
|
python3-alpha/python3-src/Lib/multiprocessing/heap.py
|
50
|
8582
|
#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
import _multiprocessing
from multiprocessing.util import Finalize, info
from multiprocessing.forking import assert_spawning
__all__ = ['BufferWrapper']
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
from _multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter))
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
|
apache-2.0
|
jose36/jmdl2
|
servers/facebook.py
|
44
|
2955
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para videos externos de facebook
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[facebook.py] get_video_url(page_url='%s')" % page_url)
page_url = page_url.replace("amp;","")
data = scrapertools.cache_page(page_url)
logger.info("data="+data)
video_urls = []
patron = "video_src.*?(http.*?)%22%2C%22video_timestamp"
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
videourl = match
logger.info(match)
videourl = videourl.replace('%5C','')
videourl = urllib.unquote(videourl)
video_urls.append( [ "[facebook]" , videourl ] )
for video_url in video_urls:
logger.info("[facebook.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
# Facebook para AnimeID src="http://www.facebook.com/v/194008590634623" type="application/x-shockwave-flash"
# Facebook para Buena isla src='http://www.facebook.com/v/134004263282552_44773.mp4&video_title=Vid&v=1337'type='application/x-shockwave-flash'
patronvideos = 'http://www.facebook.com/v/([\d]+)'
logger.info("[facebook.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[Facebook]"
url = "http://www.facebook.com/video/external_video.php?v="+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'facebook' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# Estos vídeos son en realidad enlaces directos
#http://video.ak.facebook.com/cfs-ak-ash2/33066/239/133241463372257_27745.mp4
patronvideos = '(http://video.ak.facebook.com/.*?\.mp4)'
logger.info("[facebook.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[facebook]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'directo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
def test():
#video_urls = get_video_url("http://www.facebook.com/v/194008590634623")
return len(video_urls)>0
|
gpl-2.0
|
alsrgv/tensorflow
|
tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py
|
2
|
4301
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.snapshot()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.client import session
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors_impl as errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SnapshotDatasetBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.experimental.snapshot()`."""
def _makeSnapshotDirectory(self):
tmp_dir = test.get_temp_dir()
tmp_dir = os.path.join(tmp_dir, "snapshot")
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
return tmp_dir
def _createSimpleDataset(self, num_elems, tmp_dir=None,
compression=snapshot.COMPRESSION_NONE):
if not tmp_dir:
tmp_dir = self._makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(
lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
dataset = dataset.repeat(num_elems)
dataset = dataset.apply(snapshot.snapshot(tmp_dir, compression=compression))
return dataset
def _consumeDataset(self, dataset, num_elems):
dataset = dataset.skip(num_elems)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
try:
sess.run(next_element)
except errors.OutOfRangeError:
pass
def benchmarkWriteSnapshotGzipCompression(self):
num_elems = 500000
dataset = self._createSimpleDataset(
num_elems, compression=snapshot.COMPRESSION_GZIP)
self.run_and_report_benchmark(dataset, num_elems, "write_gzip",
warmup=False, iters=1)
def benchmarkWriteSnapshotSimple(self):
num_elems = 500000
dataset = self._createSimpleDataset(num_elems)
# We only run one iteration here because running multiple iterations will
# cause the later iterations to simply read from the already written
# snapshot rather than write a new one.
self.run_and_report_benchmark(dataset, num_elems, "write_simple",
warmup=False, iters=1)
def benchmarkPassthroughSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# Consume only 1 element, thus making sure we don't finalize.
self._consumeDataset(dataset, 1)
self.run_and_report_benchmark(dataset, num_elems, "passthrough_simple")
def benchmarkReadSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# consume all the elements to let snapshot write things to disk
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_simple")
def benchmarkReadSnapshotGzipCompression(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elems, tmp_dir, compression=snapshot.COMPRESSION_GZIP)
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_gzip")
if __name__ == "__main__":
test.main()
|
apache-2.0
|
melodous/designate
|
designate/openstack/common/fixture/config.py
|
69
|
3062
|
#
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
"""Allows overriding configuration settings for the test.
`conf` will be reset on cleanup.
"""
def __init__(self, conf=cfg.CONF):
self.conf = conf
def setUp(self):
super(Config, self).setUp()
# NOTE(morganfainberg): unregister must be added to cleanup before
# reset is because cleanup works in reverse order of registered items,
# and a reset must occur before unregistering options can occur.
self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
self._registered_config_opts = {}
def config(self, **kw):
"""Override configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a `group` argument is supplied, the overrides are applied to
the specified configuration option group, otherwise the overrides
are applied to the ``default`` group.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
def _unregister_config_opts(self):
for group in self._registered_config_opts:
self.conf.unregister_opts(self._registered_config_opts[group],
group=group)
def register_opt(self, opt, group=None):
"""Register a single option for the test run.
Options registered in this manner will automatically be unregistered
during cleanup.
If a `group` argument is supplied, it will register the new option
to that group, otherwise the option is registered to the ``default``
group.
"""
self.conf.register_opt(opt, group=group)
self._registered_config_opts.setdefault(group, set()).add(opt)
def register_opts(self, opts, group=None):
"""Register multiple options for the test run.
This works in the same manner as register_opt() but takes a list of
options as the first argument. All arguments will be registered to the
same group if the ``group`` argument is supplied, otherwise all options
will be registered to the ``default`` group.
"""
for opt in opts:
self.register_opt(opt, group=group)
|
apache-2.0
|
opax/exist
|
bin/deprecated/query.py
|
18
|
8079
|
#!/usr/bin/python
# eXist xml document repository and xpath implementation
# Copyright (C) 2001, Wolfgang M. Meier ([email protected])
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import httplib, getopt, sys, readline
from string import split, replace, atoi, rfind
import re, time
class eXistClient:
host = '127.0.0.1:8088'
requestFile = ''
xslStyle = ''
display = 1
start = 1
howmany = 15
outfile = ''
indent = 'true'
def __init__(self, args):
optlist, args = getopt.getopt(args[1:], 'hqis:b:p:')
quiet = 0
for i in optlist:
if i[0] == '-h':
self.printUsage()
sys.exit(0)
elif i[0] == '-s':
self.host = i[1]
elif i[0] == '-b':
self.benchmark(i[1])
return
elif i[0] == '-q':
self.quiet = 1
elif i[0] == '-i':
self.indent = 'false'
elif i[0] == '-p':
self.parse(i[1], args)
return
if not quiet:
self.printBanner()
if len(args) < 1:
self.interactive()
return
else:
try:
freq = open(args[0], 'r')
except IOError:
print 'Unable to open file ', args[0]
sys.exit(0)
else:
req = freq.read()
freq.close()
self.doQuery(req)
def interactive(self):
print '\npress h or ? for help on available commands.'
while 1:
s = raw_input('exist> ')
line = split(s, ' ', 1)
if line[0] in ('find', 'f'):
r = self.queryRequest(line[1], 1, self.howmany)
print r
resp = self.doQuery(r)
if self.outfile:
o = open(self.outfile, 'w')
o.write(resp)
print 'output written to ', self.outfile
else:
print '\nserver responded:\n'
print resp
elif line[0] in ('get', 'g'):
args = split(line[1])
if len(args) > 1:
self.getRequest(args[0], args[1])
else:
self.getRequest(args[0])
elif line[0] in ('url', 'u'):
self.host = line[1]
print 'set host address = %s' % self.host
elif line[0] in ('display', 'd'):
self.setDisplay(split(line[1]))
elif line[0] in ('output', 'o'):
self.outfile = line[1]
print 'set output file = %s' % self.outfile
elif line[0] in ('bench', 'b'):
self.benchmark(line[1])
elif line[0] in ('remove', 'r'):
self.remove(line[1])
elif line[0] in ('parse', 'p'):
args = split(line[1], ' ', 1)
if len(args) > 1:
self.parse(args[0], [ args[1] ])
else:
self.parse(args[0], [])
elif line[0] in ('help', '?', 'h'):
self.getHelp()
elif line[0] in ('quit', 'q'):
break
else:
print 'unknown command: ' + `line[0]`
def setDisplay(self, line):
self.display = 1
for i in line:
self.setArg(i)
def setArg(self, arg):
if arg in ('summary', 's'):
self.display = 0
print 'summarize = %i' % self.display
elif arg in ('all', 'a'):
self.display = 1
print 'summarize = %i' % self.display
else:
self.howmany = atoi(arg)
print 'howmany = %s' % self.howmany
def getRequest(self, document, gid = ""):
if gid != "":
gid = 'id="%s"'
temp = """
<exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist">
<exist:display indent="%s"/>
<exist:get document="%s" %s/>
</exist:request>
"""
req = temp % (self.indent, document, gid)
print req
resp = self.doQuery(req)
if self.outfile:
o = open(self.outfile, 'w')
o.write(resp)
print 'output written to ', self.outfile
else:
print '\nserver responded:\n'
print resp
def queryRequest(self, query, start, howmany):
temp = """
<exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist">
<exist:query>%s</exist:query>
<exist:%s indent="%s" howmany="%i" start="%i"/>
</exist:request>
"""
if self.display:
disp = "display"
else:
disp = "summarize"
return temp % ( self.escape(query), disp, self.indent, howmany, start)
def remove(self, doc):
temp = """
<exist:request xmlns:exist="http://exist.sourceforge.net/NS/exist">
<exist:remove document="%s"/>
</exist:request>
"""
req = temp % ( doc )
print req
resp = self.doQuery(req)
print resp
def escape(self, str):
n = ''
for c in str:
if c == '&':
n = n + '&'
elif c == '<':
n = n + '<'
elif c == '>':
n = n + '>'
else:
n = n + c
return n
def parse(self, file, args):
p = rfind(file, '/')
if p > -1:
doc = file[p+1:]
else:
doc = file
if(len(args) > 0):
doc = args[0] + "/" + doc
f = open(file, 'r')
print "reading file %s ..." % file
xml = f.read()
f.close()
print "ok.\nsending %s to server ..." % doc
con = httplib.HTTP(self.host)
con.putrequest('PUT', doc)
con.putheader('Accept', 'text/xml')
clen = len(xml)
con.putheader('Content-Length', `clen`)
con.endheaders()
con.send(xml)
errcode, errmsg, headers = con.getreply()
if errcode != 200:
print 'an error occurred: %s' % errmsg
else:
print "ok."
def doQuery(self, request):
con = httplib.HTTP(self.host)
con.putrequest('POST', '/')
con.putheader('Accept', 'text/xml')
clen = len(request)
con.putheader('Content-Length', `clen`)
con.endheaders()
print 'Sending request ...\n'
con.send(request)
errcode, errmsg, headers = con.getreply()
if errcode != 200:
print 'an error occurred: %s' % errmsg
return
f = con.getfile()
data = f.read()
f.close()
return data
def benchmark(self, benchfile):
bench = open(benchfile, 'r')
o = open('benchmark.out', 'w')
queries = bench.readlines()
print '%-10s | %-10s | %-50s' % ("query", "retrieve", "query string")
print '=' * 75
qt = 0.0
rt = 0.0
i = 1
for qu in queries:
start = time.clock()
req = self.queryRequest(qu, 1, 20)
data = self.doQuery(req)
queryTime = re.search('queryTime="([0-9]+)"', data).group(1)
#retrTime = re.search('retrieveTime="([0-9]+)"', data).group(1)
retrTime = 0
print '%-10s | %-10s ==> %-47s' % (queryTime, retrTime, qu[0:50])
i = i + 1
bench.close()
def getHelp(self):
print """
Available commands:
h|help print this help message
g|get docName retrieve document docName from the database
r|remove docName remove document docName from the database
p|parse file [collection]
parse and store file to the repository
f|find expr create query request with expr as query argument
d|display [ [a|all] | [s|summary] ] [howmany]
all : return the actual content of matching nodes
summary: just return a short summary of hits per document
howmany: howmany nodes should be returned at maximum
o|output file write server response to file
u|url host:port set server address to host:port
b|bench file execute queries contained in file and print statistics
"""
def printUsage(self):
print """
Usage: query.py [-h] [-s server] [-b benchmark-file] request-file
-h Display this message
-s Server address (e.g. localhost:8088)
-b Benchmark: Execute queries from benchmark-file and print statistics
-i Switch off indentation of results
"""
def printBanner(self):
print """
eXist version 0.5, Copyright (C) 2001 Wolfgang M. Meier
eXist comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to
redistribute it under certain conditions;
for details read the license file.
"""
c = eXistClient(sys.argv)
|
lgpl-2.1
|
alejo8591/cymetria-php
|
lab9/vendor/phpdocumentor/phpdocumentor/docs/.exts/plantuml.py
|
31
|
3621
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.plantuml
~~~~~~~~~~~~~~~~~~~~~~
Embed PlantUML diagrams on your documentation.
:copyright: Copyright 2010 by Yuya Nishihara <[email protected]>.
:license: BSD, see LICENSE for details.
"""
import os, subprocess
try:
from hashlib import sha1
except ImportError: # Python<2.5
from sha import sha as sha1
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.errors import SphinxError
from sphinx.util.compat import Directive
from sphinx.util.osutil import ensuredir, ENOENT
class PlantUmlError(SphinxError):
pass
class plantuml(nodes.General, nodes.Element):
pass
class UmlDirective(Directive):
"""Directive to insert PlantUML markup
Example::
.. uml::
:alt: Alice and Bob
Alice -> Bob: Hello
Alice <- Bob: Hi
"""
has_content = True
option_spec = {'alt': directives.unchanged}
def run(self):
node = plantuml()
node['uml'] = '\n'.join(self.content)
node['alt'] = self.options.get('alt', None)
return [node]
def generate_name(self, node):
key = sha1(node['uml'].encode('utf-8')).hexdigest()
fname = 'plantuml-%s.png' % key
imgpath = getattr(self.builder, 'imgpath', None)
if imgpath:
return ('/'.join((self.builder.imgpath, fname)),
os.path.join(self.builder.outdir, '_images', fname))
else:
return fname, os.path.join(self.builder.outdir, fname)
def generate_plantuml_args(self):
if isinstance(self.builder.config.plantuml, basestring):
args = [self.builder.config.plantuml]
else:
args = list(self.builder.config.plantuml)
args.extend('-pipe -charset utf-8'.split())
return args
def render_plantuml(self, node):
refname, outfname = generate_name(self, node)
if os.path.exists(outfname):
return refname # don't regenerate
ensuredir(os.path.dirname(outfname))
f = open(outfname, 'wb')
try:
try:
p = subprocess.Popen(generate_plantuml_args(self), stdout=f,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, err:
if err.errno != ENOENT:
raise
raise PlantUmlError('plantuml command %r cannot be run'
% self.builder.config.plantuml)
serr = p.communicate(node['uml'].encode('utf-8'))[1]
if p.returncode != 0:
raise PlantUmlError('error while running plantuml\n\n' + serr)
return refname
finally:
f.close()
def html_visit_plantuml(self, node):
try:
refname = render_plantuml(self, node)
except PlantUmlError, err:
self.builder.warn(str(err))
raise nodes.SkipNode
self.body.append(self.starttag(node, 'p', CLASS='plantuml'))
self.body.append('<img src="%s" alt="%s" />\n'
% (self.encode(refname),
self.encode(node['alt'] or node['uml'])))
self.body.append('</p>\n')
raise nodes.SkipNode
def latex_visit_plantuml(self, node):
try:
refname = render_plantuml(self, node)
except PlantUmlError, err:
self.builder.warn(str(err))
raise nodes.SkipNode
self.body.append('\\includegraphics{%s}' % self.encode(refname))
raise nodes.SkipNode
def setup(app):
app.add_node(plantuml,
html=(html_visit_plantuml, None),
latex=(latex_visit_plantuml, None))
app.add_directive('uml', UmlDirective)
app.add_config_value('plantuml', 'plantuml', 'html')
|
mit
|
nelango/ViralityAnalysis
|
model/lib/nltk/translate/ibm3.py
|
3
|
13875
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 3
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Translation model that considers how a word can be aligned to
multiple words in another language.
IBM Model 3 improves on Model 2 by directly modeling the phenomenon
where a word in one language may be translated into zero or more words
in another. This is expressed by the fertility probability,
n(phi | source word).
If a source word translates into more than one word, it is possible to
generate sentences that have the same alignment in multiple ways. This
is modeled by a distortion step. The distortion probability, d(j|i,l,m),
predicts a target word position, given its aligned source word's
position. The distortion probability replaces the alignment probability
of Model 2.
The fertility probability is not applicable for NULL. Target words that
align to NULL are assumed to be distributed uniformly in the target
sentence. The existence of these words is modeled by p1, the probability
that a target word produced by a real source word requires another
target word that is produced by NULL.
The EM algorithm used in Model 3 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the target
sentence is aligned to a particular position in the source
sentence
(c) count how many times a source word is aligned to phi number
of target words
(d) count how many times NULL is aligned to a target word
M step - Estimate new probabilities based on the counts from the E step
Because there are too many possible alignments, only the most probable
ones are considered. First, the best alignment is determined using prior
probabilities. Then, a hill climbing approach is used to find other good
candidates.
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
phi: Fertility, the number of target words produced by a source word
p1: Probability that a target word produced by a source word is
accompanied by another target word that is aligned to NULL
p0: 1 - p1
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from math import factorial
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel2
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel3(IBMModel):
"""
Translation model that considers how a word can be aligned to
multiple words in another language
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book']))
>>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize']))
>>> ibm3 = IBMModel3(bitext, 5)
>>> print(round(ibm3.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm3.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm3.translation_table['ja'][None], 3))
1.0
>>> print(round(ibm3.distortion_table[1][1][2][2], 3))
1.0
>>> print(round(ibm3.distortion_table[1][2][2][2], 3))
0.0
>>> print(round(ibm3.distortion_table[2][2][4][5], 3))
0.75
>>> print(round(ibm3.fertility_table[2]['summarize'], 3))
1.0
>>> print(round(ibm3.fertility_table[1]['book'], 3))
1.0
>>> print(ibm3.p1)
0.054...
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model, a distortion model, a fertility model, and a
model for generating NULL-aligned words.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``,
``fertility_table``, ``p1``, ``distortion_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel3, self).__init__(sentence_aligned_corpus)
self.reset_probabilities()
if probability_tables is None:
# Get translation and alignment probabilities from IBM Model 2
ibm2 = IBMModel2(sentence_aligned_corpus, iterations)
self.translation_table = ibm2.translation_table
self.alignment_table = ibm2.alignment_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
self.fertility_table = probability_tables['fertility_table']
self.p1 = probability_tables['p1']
self.distortion_table = probability_tables['distortion_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
def reset_probabilities(self):
super(IBMModel3, self).reset_probabilities()
self.distortion_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: self.MIN_PROB))))
"""
dict[int][int][int][int]: float. Probability(j | i,l,m).
Values accessed as ``distortion_table[j][i][l][m]``.
"""
def set_uniform_probabilities(self, sentence_aligned_corpus):
# d(j | i,l,m) = 1 / m for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / float(m)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A target sentence is too long (" + str(m) +
" words). Results may be less accurate.")
for j in range(1, m + 1):
for i in range(0, l + 1):
self.distortion_table[j][i][l][m] = initial_prob
# simple initialization, taken from GIZA++
self.fertility_table[0] = defaultdict(lambda: 0.2)
self.fertility_table[1] = defaultdict(lambda: 0.65)
self.fertility_table[2] = defaultdict(lambda: 0.1)
self.fertility_table[3] = defaultdict(lambda: 0.04)
MAX_FERTILITY = 10
initial_fert_prob = 0.01 / (MAX_FERTILITY - 4)
for phi in range(4, MAX_FERTILITY):
self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob)
self.p1 = 0.5
def train(self, parallel_corpus):
counts = Model3Counts()
for aligned_sentence in parallel_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# Sample the alignment space
sampled_alignments, best_alignment = self.sample(aligned_sentence)
# Record the most probable alignment
aligned_sentence.alignment = Alignment(
best_alignment.zero_indexed_alignment())
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_of_alignments(sampled_alignments)
# E step (b): Collect counts
for alignment_info in sampled_alignments:
count = self.prob_t_a_given_s(alignment_info)
normalized_count = count / total_count
for j in range(1, m + 1):
counts.update_lexical_translation(
normalized_count, alignment_info, j)
counts.update_distortion(
normalized_count, alignment_info, j, l, m)
counts.update_null_generation(normalized_count, alignment_info)
counts.update_fertility(normalized_count, alignment_info)
# M step: Update probabilities with maximum likelihood estimates
# If any probability is less than MIN_PROB, clamp it to MIN_PROB
existing_alignment_table = self.alignment_table
self.reset_probabilities()
self.alignment_table = existing_alignment_table # don't retrain
self.maximize_lexical_translation_probabilities(counts)
self.maximize_distortion_probabilities(counts)
self.maximize_fertility_probabilities(counts)
self.maximize_null_generation_probabilities(counts)
def maximize_distortion_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for j, i_s in counts.distortion.items():
for i, src_sentence_lengths in i_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.distortion[j][i][l][m] /
counts.distortion_for_any_j[i][l][m])
self.distortion_table[j][i][l][m] = max(estimate,
MIN_PROB)
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1 # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1
probability = 1.0
MIN_PROB = IBMModel.MIN_PROB
# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB
# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB
# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB
# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]
probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB
return probability
class Model3Counts(Counts):
"""
Data object to store counts of various parameters during training.
Includes counts for distortion.
"""
def __init__(self):
super(Model3Counts, self).__init__()
self.distortion = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: 0.0))))
self.distortion_for_any_j = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
def update_distortion(self, count, alignment_info, j, l, m):
i = alignment_info.alignment[j]
self.distortion[j][i][l][m] += count
self.distortion_for_any_j[i][l][m] += count
|
mit
|
arshesney/winelauncher
|
setup.py
|
1
|
3808
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='winelauncher',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.3',
description='WINE commandline wrapper',
long_description=long_description,
# The project's main homepage.
url='https://github.com/arshesney/winelauncher',
# Author details
author='Arshesney',
author_email='[email protected]',
# Choose your license
license='GPLv2',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='wine cli wrapper',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'scraps']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'winelauncher=winelauncher.main:main',
],
},
)
|
gpl-3.0
|
yvesalexandre/bandicoot
|
bandicoot/tests/testing_tools.py
|
1
|
3237
|
# The MIT License (MIT)
#
# Copyright (c) 2015-2016 Massachusetts Institute of Technology.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import bandicoot as bc
import numpy as np
def parse_dict(path):
with open(path, 'r') as f:
dict_data = json.load(f)
return dict_data
def file_equality(f1, f2):
"""
Returns true if the files are the same, without taking into account
line endings.
"""
with open(f1, "r") as a:
with open(f2, "r") as b:
return all(
lineA.strip('\n').strip('\r') == lineB.strip('\n').strip('\r')
for lineA, lineB in zip(a.readlines(), b.readlines())
)
def metric_suite(user, answers, decimal=7, **kwargs):
"""
Runs the complete metric suite.
If any of the metrics is different than the expected answer, return False.
"""
results = bc.utils.all(user, **kwargs)
# Never compare the hash of bandicoot source code:
if 'reporting__code_signature' in answers:
answers.pop('reporting__code_signature')
test_result, msg = compare_dict(answers, results, decimal=decimal)
return test_result, msg
def compare_dict(answer, result, decimal=7):
"""
Returns true if two dictionaries are approximately equal.
Returns false otherwise.
"""
flat_answer = bc.utils.flatten(answer)
flat_result = bc.utils.flatten(result)
for key in flat_answer.keys():
if key not in flat_result.keys():
return False, "The key {} was not there.".format(key)
answer_v, result_v = flat_answer[key], flat_result[key]
if isinstance(answer_v, (float, int)) and isinstance(result_v, (float, int)):
try:
np.testing.assert_almost_equal(answer_v, result_v,
decimal=decimal)
except AssertionError:
err_msg = "The key {} produced a different result: expected {}, got {}.".format(key, answer_v, result_v)
return False, err_msg
elif answer_v != result_v:
return False, "The key {} produced a different result: expected {}, got {}.".format(key, answer_v, result_v)
return True, ""
|
mit
|
jmztaylor/android_kernel_htc_m4
|
scripts/gcc-wrapper.py
|
74
|
3544
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"inet_hashtables.h:356",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"xc4000.c:1049",
"xc4000.c:1063",
"xt_socket.c:161",
"xt_socket.c:307",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
gpl-2.0
|
bendykst/deluge
|
deluge/configmanager.py
|
3
|
3497
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Andrew Resch <[email protected]>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import os
import deluge.common
import deluge.log
from deluge.config import Config
log = logging.getLogger(__name__)
class _ConfigManager:
def __init__(self):
log.debug("ConfigManager started..")
self.config_files = {}
self.__config_directory = None
@property
def config_directory(self):
if self.__config_directory is None:
self.__config_directory = deluge.common.get_default_config_dir()
return self.__config_directory
def __del__(self):
del self.config_files
def set_config_dir(self, directory):
"""
Sets the config directory.
:param directory: str, the directory where the config info should be
:returns bool: True if successfully changed directory, False if not
"""
if not directory:
return False
log.info("Setting config directory to: %s", directory)
if not os.path.exists(directory):
# Try to create the config folder if it doesn't exist
try:
os.makedirs(directory)
except OSError as ex:
log.error("Unable to make config directory: %s", ex)
return False
elif not os.path.isdir(directory):
log.error("Config directory needs to be a directory!")
return False
self.__config_directory = directory
# Reset the config_files so we don't get config from old config folder
# XXX: Probably should have it go through the config_files dict and try
# to reload based on the new config directory
self.save()
self.config_files = {}
deluge.log.tweak_logging_levels()
return True
def get_config_dir(self):
return self.config_directory
def close(self, config):
"""Closes a config file."""
try:
del self.config_files[config]
except KeyError:
pass
def save(self):
"""Saves all the configs to disk."""
for value in self.config_files.values():
value.save()
# We need to return True to keep the timer active
return True
def get_config(self, config_file, defaults=None):
"""Get a reference to the Config object for this filename"""
log.debug("Getting config '%s'", config_file)
# Create the config object if not already created
if config_file not in self.config_files.keys():
self.config_files[config_file] = Config(config_file, defaults, self.config_directory)
return self.config_files[config_file]
# Singleton functions
_configmanager = _ConfigManager()
def ConfigManager(config, defaults=None): # NOQA
return _configmanager.get_config(config, defaults)
def set_config_dir(directory):
"""Sets the config directory, else just uses default"""
return _configmanager.set_config_dir(directory)
def get_config_dir(filename=None):
if filename is not None:
return os.path.join(_configmanager.get_config_dir(), filename)
else:
return _configmanager.get_config_dir()
def close(config):
return _configmanager.close(config)
|
gpl-3.0
|
jaddison/ansible-modules-core
|
utilities/logic/wait_for.py
|
82
|
18083
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import socket
import datetime
import time
import sys
import re
import binascii
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
# just because we can import it on Linux doesn't mean we will use it
except ImportError:
pass
DOCUMENTATION = '''
---
module: wait_for
short_description: Waits for a condition before continuing.
description:
- You can wait for a set amount of time C(timeout), this is the default if nothing is specified.
- Waiting for a port to become available is useful for when services
are not immediately available after their init scripts return
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
needing to pause until they are ready.
- This module can also be used to wait for a regex match a string to be present in a file.
- In 1.6 and later, this module can also be used to wait for a file to be available or
absent on the filesystem.
- In 1.8 and later, this module can also be used to wait for active
connections to be closed before continuing, useful if a node
is being rotated out of a load balancer pool.
version_added: "0.7"
options:
host:
description:
- A resolvable hostname or IP address to wait for
required: false
default: "127.0.0.1"
timeout:
description:
- maximum number of seconds to wait for
required: false
default: 300
connect_timeout:
description:
- maximum number of seconds to wait for a connection to happen before closing and retrying
required: false
default: 5
delay:
description:
- number of seconds to wait before starting to poll
required: false
default: 0
port:
description:
- port number to poll
required: false
state:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
default: "started"
path:
version_added: "1.4"
required: false
description:
- path to a file on the filesytem that must exist before continuing
search_regex:
version_added: "1.4"
required: false
description:
- Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex.
exclude_hosts:
version_added: "1.8"
required: false
description:
- list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
author:
- "Jeroen Hoekx (@jhoekx)"
- "John Jarvis (@jarv)"
- "Andrii Radyk (@AnderEnder)"
'''
EXAMPLES = '''
# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds
- wait_for: port=8000 delay=10
# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds
- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained
# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts
- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3
# wait until the file /tmp/foo is present before continuing
- wait_for: path=/tmp/foo
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
# wait until the lock file is removed
- wait_for: path=/var/lock/file.lock state=absent
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable
# and don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
'''
class TCPConnectionInfo(object):
"""
This is a generic TCP Connection Info strategy class that relies
on the psutil module, which is not ideal for targets, but necessary
for cross platform support.
A subclass may wish to override some or all of these methods.
- _get_exclude_ips()
- get_active_connections()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
match_all_ips = {
socket.AF_INET: '0.0.0.0',
socket.AF_INET6: '::',
}
connection_states = {
'01': 'ESTABLISHED',
'02': 'SYN_SENT',
'03': 'SYN_RECV',
'04': 'FIN_WAIT1',
'05': 'FIN_WAIT2',
'06': 'TIME_WAIT',
}
def __new__(cls, *args, **kwargs):
return load_platform_subclass(TCPConnectionInfo, args, kwargs)
def __init__(self, module):
self.module = module
(self.family, self.ip) = _convert_host_to_ip(self.module.params['host'])
self.port = int(self.module.params['port'])
self.exclude_ips = self._get_exclude_ips()
if not HAS_PSUTIL:
module.fail_json(msg="psutil module required for wait_for")
def _get_exclude_ips(self):
if self.module.params['exclude_hosts'] is None:
return []
exclude_hosts = self.module.params['exclude_hosts']
return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ]
def get_active_connections_count(self):
active_connections = 0
for p in psutil.process_iter():
connections = p.get_connections(kind='inet')
for conn in connections:
if conn.status not in self.connection_states.values():
continue
(local_ip, local_port) = conn.local_address
if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]:
(remote_ip, remote_port) = conn.remote_address
if remote_ip not in self.exclude_ips:
active_connections += 1
return active_connections
# ===========================================
# Subclass: Linux
class LinuxTCPConnectionInfo(TCPConnectionInfo):
"""
This is a TCP Connection Info evaluation strategy class
that utilizes information from Linux's procfs. While less universal,
does allow Linux targets to not require an additional library.
"""
platform = 'Linux'
distribution = None
source_file = {
socket.AF_INET: '/proc/net/tcp',
socket.AF_INET6: '/proc/net/tcp6'
}
match_all_ips = {
socket.AF_INET: '00000000',
socket.AF_INET6: '00000000000000000000000000000000',
}
local_address_field = 1
remote_address_field = 2
connection_state_field = 3
def __init__(self, module):
self.module = module
(self.family, self.ip) = _convert_host_to_hex(module.params['host'])
self.port = "%0.4X" % int(module.params['port'])
self.exclude_ips = self._get_exclude_ips()
def _get_exclude_ips(self):
if self.module.params['exclude_hosts'] is None:
return []
exclude_hosts = self.module.params['exclude_hosts']
return [ _convert_host_to_hex(h) for h in exclude_hosts ]
def get_active_connections_count(self):
active_connections = 0
f = open(self.source_file[self.family])
for tcp_connection in f.readlines():
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
if tcp_connection[self.connection_state_field] not in self.connection_states:
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]:
(remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':')
if remote_ip not in self.exclude_ips:
active_connections += 1
f.close()
return active_connections
def _convert_host_to_ip(host):
"""
Perform forward DNS resolution on host, IP will give the same IP
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
Tuple containing address family and IP
"""
addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0]
return (addrinfo[0], addrinfo[4][0])
def _convert_host_to_hex(host):
"""
Convert the provided host to the format in /proc/net/tcp*
/proc/net/tcp uses little-endian four byte hex for ipv4
/proc/net/tcp6 uses little-endian per 4B word for ipv6
Args:
host: String with either hostname, IPv4, or IPv6 address
Returns:
Tuple containing address family and the little-endian converted host
"""
(family, ip) = _convert_host_to_ip(host)
hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper()
if family == socket.AF_INET:
hexed = _little_endian_convert_32bit(hexed)
elif family == socket.AF_INET6:
# xrange loops through each 8 character (4B) set in the 128bit total
hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ])
return (family, hexed)
def _little_endian_convert_32bit(block):
"""
Convert to little-endian, effectively transposing
the order of the four byte word
12345678 -> 78563412
Args:
block: String containing a 4 byte hex representation
Returns:
String containing the little-endian converted block
"""
# xrange starts at 6, and increments by -2 until it reaches -2
# which lets us start at the end of the string block and work to the begining
return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ])
def _create_connection( (host, port), connect_timeout):
"""
Connect to a 2-tuple (host, port) and return
the socket object.
Args:
2-tuple (host, port) and connection timeout
Returns:
Socket object
"""
if sys.version_info < (2, 6):
(family, _) = _convert_host_to_ip(host)
connect_socket = socket.socket(family, socket.SOCK_STREAM)
connect_socket.settimeout(connect_timeout)
connect_socket.connect( (host, port) )
else:
connect_socket = socket.create_connection( (host, port), connect_timeout)
return connect_socket
def main():
module = AnsibleModule(
argument_spec = dict(
host=dict(default='127.0.0.1'),
timeout=dict(default=300),
connect_timeout=dict(default=5),
delay=dict(default=0),
port=dict(default=None),
path=dict(default=None),
search_regex=dict(default=None),
state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']),
exclude_hosts=dict(default=None, type='list')
),
)
params = module.params
host = params['host']
timeout = int(params['timeout'])
connect_timeout = int(params['connect_timeout'])
delay = int(params['delay'])
if params['port']:
port = int(params['port'])
else:
port = None
state = params['state']
path = params['path']
search_regex = params['search_regex']
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
if path and state == 'stopped':
module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module")
if path and state == 'drained':
module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module")
if params['exclude_hosts'] is not None and state != 'drained':
module.fail_json(msg="exclude_hosts should only be with state=drained")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not port and not path and state != 'drained':
time.sleep(timeout)
elif state in [ 'stopped', 'absent' ]:
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
f = open(path)
f.close()
time.sleep(1)
pass
except IOError:
break
elif port:
try:
s = _create_connection( (host, port), connect_timeout)
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(1)
except:
break
else:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
if port:
module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
elif path:
module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
if path:
try:
os.stat(path)
if search_regex:
try:
f = open(path)
try:
if re.search(search_regex, f.read(), re.MULTILINE):
break
else:
time.sleep(1)
finally:
f.close()
except IOError:
time.sleep(1)
pass
else:
break
except OSError, e:
# File not present
if e.errno == 2:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
elif port:
try:
s = _create_connection( (host, port), connect_timeout)
if search_regex:
data = ''
matched = False
while 1:
data += s.recv(1024)
if not data:
break
elif re.search(search_regex, data, re.MULTILINE):
matched = True
break
if matched:
s.shutdown(socket.SHUT_RDWR)
s.close()
break
else:
s.shutdown(socket.SHUT_RDWR)
s.close()
break
except:
time.sleep(1)
pass
else:
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
if port:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds)
elif path:
if search_regex:
module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds)
else:
module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds)
elif state == 'drained':
### wait until all active connections are gone
end = start + datetime.timedelta(seconds=timeout)
tcpconns = TCPConnectionInfo(module)
while datetime.datetime.now() < end:
try:
if tcpconns.get_active_connections_count() == 0:
break
except IOError:
pass
time.sleep(1)
else:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
kennho/oppia
|
core/controllers/editor.py
|
1
|
37827
|
# coding: utf-8
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the editor view."""
import imghdr
import logging
import jinja2
from core.controllers import base
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import email_manager
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import gadget_registry
from core.domain import interaction_registry
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import rule_domain
from core.domain import stats_services
from core.domain import user_services
from core.domain import value_generators_domain
from core.platform import models
import feconf
import utils
current_user_services = models.Registry.import_current_user_services()
# The frontend template for a new state. It is sent to the frontend when the
# exploration editor page is first loaded, so that new states can be
# added in a way that is completely client-side.
# IMPORTANT: Before adding this state to an existing exploration, the
# state name and the destination of the default rule should first be
# changed to the desired new state name.
NEW_STATE_TEMPLATE = {
'content': [{
'type': 'text',
'value': ''
}],
'interaction': exp_domain.State.NULL_INTERACTION_DICT,
'param_changes': [],
'unresolved_answers': {},
}
MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE = (
'https://moderator/request/forum/url')
MODERATOR_REQUEST_FORUM_URL = config_domain.ConfigProperty(
'moderator_request_forum_url', {'type': 'unicode'},
'A link to the forum for nominating explorations to be featured '
'in the gallery',
default_value=MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE)
DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR = config_domain.ConfigProperty(
'default_twitter_share_message_editor', {
'type': 'unicode',
},
'Default text for the Twitter share message for the editor',
default_value=(
'Check out this interactive lesson I created on Oppia - a free '
'platform for teaching and learning!'))
def get_value_generators_js():
"""Return a string that concatenates the JS for all value generators."""
all_value_generators = (
value_generators_domain.Registry.get_all_generator_classes())
value_generators_js = ''
for _, generator_cls in all_value_generators.iteritems():
value_generators_js += generator_cls.get_js_template()
return value_generators_js
def _require_valid_version(version_from_payload, exploration_version):
"""Check that the payload version matches the given exploration version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != exploration_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given exploration."""
def test_editor(self, exploration_id, escaped_state_name=None, **kwargs):
"""Gets the user and exploration id if the user can edit it.
Args:
self: the handler instance
exploration_id: the exploration id
escaped_state_name: the URL-escaped state name, if it exists
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
exploration.
Raises:
self.PageNotFoundException: if no such exploration or state exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if self.username in config_domain.BANNED_USERNAMES.value:
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.UnauthorizedUserException(
'You do not have the credentials to edit this exploration.',
self.user_id)
if not escaped_state_name:
return handler(self, exploration_id, **kwargs)
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
return handler(self, exploration_id, state_name, **kwargs)
return test_editor
class EditorHandler(base.BaseHandler):
"""Base class for all handlers for the editor page."""
# The page name to use as a key for generating CSRF tokens.
PAGE_NAME_FOR_CSRF = 'editor'
class ExplorationPage(EditorHandler):
"""The editor page for a single exploration."""
EDITOR_PAGE_DEPENDENCY_IDS = ['codemirror']
def get(self, exploration_id):
"""Handles GET requests."""
if exploration_id in base.DISABLED_EXPLORATIONS.value:
self.render_template(
'error/disabled_exploration.html', iframe_restriction=None)
return
exploration = exp_services.get_exploration_by_id(
exploration_id, strict=False)
if (exploration is None or
not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id)):
self.redirect('/')
return
can_edit = (
bool(self.user_id) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id))
interaction_ids = (
interaction_registry.Registry.get_all_interaction_ids())
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids + self.EDITOR_PAGE_DEPENDENCY_IDS))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
interaction_validators_html = (
interaction_registry.Registry.get_validators_html(
interaction_ids))
gadget_types = gadget_registry.Registry.get_all_gadget_types()
gadget_templates = (
gadget_registry.Registry.get_gadget_html(gadget_types))
self.values.update({
'GADGET_SPECS': gadget_registry.Registry.get_all_specs(),
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'PANEL_SPECS': feconf.PANELS_PROPERTIES,
'DEFAULT_OBJECT_VALUES': rule_domain.get_default_object_values(),
'SHARING_OPTIONS': base.SHARING_OPTIONS.value,
'DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR': (
DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR.value),
'additional_angular_modules': additional_angular_modules,
'can_delete': rights_manager.Actor(
self.user_id).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_edit': can_edit,
'can_modify_roles': rights_manager.Actor(
self.user_id).can_modify_roles(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publicize': rights_manager.Actor(
self.user_id).can_publicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_publish': rights_manager.Actor(
self.user_id).can_publish(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_release_ownership': rights_manager.Actor(
self.user_id).can_release_ownership(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublicize': rights_manager.Actor(
self.user_id).can_unpublicize(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'can_unpublish': rights_manager.Actor(
self.user_id).can_unpublish(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'gadget_templates': jinja2.utils.Markup(gadget_templates),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'interaction_validators_html': jinja2.utils.Markup(
interaction_validators_html),
'moderator_request_forum_url': MODERATOR_REQUEST_FORUM_URL.value,
'nav_mode': feconf.NAV_MODE_CREATE,
'value_generators_js': jinja2.utils.Markup(
get_value_generators_js()),
'title': exploration.title,
'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES,
'ALLOWED_GADGETS': feconf.ALLOWED_GADGETS,
'ALLOWED_INTERACTION_CATEGORIES': (
feconf.ALLOWED_INTERACTION_CATEGORIES),
# This is needed for the exploration preview.
'CATEGORIES_TO_COLORS': feconf.CATEGORIES_TO_COLORS,
'INVALID_PARAMETER_NAMES': feconf.INVALID_PARAMETER_NAMES,
'NEW_STATE_TEMPLATE': NEW_STATE_TEMPLATE,
'SHOW_TRAINABLE_UNRESOLVED_ANSWERS': (
feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS),
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('editor/exploration_editor.html')
class ExplorationHandler(EditorHandler):
"""Page with editor data for a single exploration."""
PAGE_NAME_FOR_CSRF = 'editor'
def _get_exploration_data(self, exploration_id, version=None):
"""Returns a description of the given exploration."""
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except:
raise self.PageNotFoundException
states = {}
for state_name in exploration.states:
state_dict = exploration.states[state_name].to_dict()
state_dict['unresolved_answers'] = (
stats_services.get_top_unresolved_answers_for_default_rule(
exploration_id, state_name))
states[state_name] = state_dict
editor_dict = {
'category': exploration.category,
'exploration_id': exploration_id,
'init_state_name': exploration.init_state_name,
'language_code': exploration.language_code,
'objective': exploration.objective,
'param_changes': exploration.param_change_dicts,
'param_specs': exploration.param_specs_dict,
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict(),
'show_state_editor_tutorial_on_load': (
self.user_id and not self.has_seen_editor_tutorial),
'skin_customizations': exploration.skin_instance.to_dict()[
'skin_customizations'],
'states': states,
'tags': exploration.tags,
'title': exploration.title,
'version': exploration.version,
}
return editor_dict
def get(self, exploration_id):
"""Gets the data for the exploration overview page."""
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=None)
self.values.update(
self._get_exploration_data(exploration_id, version=version))
self.render_json(self.values)
@require_editor
def put(self, exploration_id):
"""Updates properties of the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
exp_services.update_exploration(
self.user_id, exploration_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.values.update(self._get_exploration_data(exploration_id))
self.render_json(self.values)
@require_editor
def delete(self, exploration_id):
"""Deletes the given exploration."""
role = self.request.get('role')
if not role:
role = None
if role == rights_manager.ROLE_ADMIN:
if not self.is_admin:
logging.error(
'%s tried to delete an exploration, but is not an admin.'
% self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role == rights_manager.ROLE_MODERATOR:
if not self.is_moderator:
logging.error(
'%s tried to delete an exploration, but is not a '
'moderator.' % self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role is not None:
raise self.InvalidInputException('Invalid role: %s' % role)
logging.info(
'%s %s tried to delete exploration %s' %
(role, self.user_id, exploration_id))
exploration = exp_services.get_exploration_by_id(exploration_id)
can_delete = rights_manager.Actor(self.user_id).can_delete(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration.id)
if not can_delete:
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration %s' %
(self.user_id, exploration_id))
is_exploration_cloned = rights_manager.is_exploration_cloned(
exploration_id)
exp_services.delete_exploration(
self.user_id, exploration_id, force_deletion=is_exploration_cloned)
logging.info(
'%s %s deleted exploration %s' %
(role, self.user_id, exploration_id))
class ExplorationRightsHandler(EditorHandler):
"""Handles management of exploration editing rights."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id):
"""Updates the editing rights for the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
is_public = self.payload.get('is_public')
is_publicized = self.payload.get('is_publicized')
is_community_owned = self.payload.get('is_community_owned')
new_member_username = self.payload.get('new_member_username')
new_member_role = self.payload.get('new_member_role')
viewable_if_private = self.payload.get('viewable_if_private')
if new_member_username:
if not rights_manager.Actor(
self.user_id).can_modify_roles(
rights_manager.ACTIVITY_TYPE_EXPLORATION,
exploration_id):
raise self.UnauthorizedUserException(
'Only an owner of this exploration can add or change '
'roles.')
new_member_id = user_services.get_user_id_from_username(
new_member_username)
if new_member_id is None:
raise Exception(
'Sorry, we could not find the specified user.')
rights_manager.assign_role_for_exploration(
self.user_id, exploration_id, new_member_id, new_member_role)
elif is_public is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_public:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
exp_services.publish_exploration_and_update_user_profiles(
self.user_id, exploration_id)
exp_services.index_explorations_given_ids([exploration_id])
else:
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif is_publicized is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_publicized:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
rights_manager.unpublicize_exploration(
self.user_id, exploration_id)
elif is_community_owned:
exploration = exp_services.get_exploration_by_id(exploration_id)
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.release_ownership_of_exploration(
self.user_id, exploration_id)
elif viewable_if_private is not None:
rights_manager.set_private_viewability_of_exploration(
self.user_id, exploration_id, viewable_if_private)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
self.render_json({
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict()
})
class ExplorationModeratorRightsHandler(EditorHandler):
"""Handles management of exploration rights by moderators."""
PAGE_NAME_FOR_CSRF = 'editor'
@base.require_moderator
def put(self, exploration_id):
"""Updates the publication status of the given exploration, and sends
an email to all its owners.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
action = self.payload.get('action')
email_body = self.payload.get('email_body')
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
if action not in feconf.VALID_MODERATOR_ACTIONS:
raise self.InvalidInputException('Invalid moderator action.')
# If moderator emails can be sent, check that all the prerequisites are
# satisfied, otherwise do nothing.
if feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
if not email_body:
raise self.InvalidInputException(
'Moderator actions should include an email to the '
'recipient.')
email_manager.require_moderator_email_prereqs_are_satisfied()
# Perform the moderator action.
if action == 'unpublish_exploration':
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif action == 'publicize_exploration':
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
exp_rights = rights_manager.get_exploration_rights(exploration_id)
# If moderator emails can be sent, send an email to the all owners of
# the exploration notifying them of the change.
if feconf.REQUIRE_EMAIL_ON_MODERATOR_ACTION:
for owner_id in exp_rights.owner_ids:
email_manager.send_moderator_action_email(
self.user_id, owner_id,
feconf.VALID_MODERATOR_ACTIONS[action]['email_intent'],
exploration.title, email_body)
self.render_json({
'rights': exp_rights.to_dict(),
})
class ResolvedAnswersHandler(EditorHandler):
"""Allows learners' answers for a state to be marked as resolved."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id, state_name):
"""Marks learners' answers as resolved."""
resolved_answers = self.payload.get('resolved_answers')
if not isinstance(resolved_answers, list):
raise self.InvalidInputException(
'Expected a list of resolved answers; received %s.' %
resolved_answers)
if 'resolved_answers' in self.payload:
event_services.DefaultRuleAnswerResolutionEventHandler.record(
exploration_id, state_name, resolved_answers)
self.render_json({})
class UntrainedAnswersHandler(EditorHandler):
"""Returns answers that learners have submitted, but that Oppia hasn't been
explicitly trained to respond to be an exploration author.
"""
NUMBER_OF_TOP_ANSWERS_PER_RULE = 50
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
# If trying to access a non-existing state, there is no training
# data associated with it.
self.render_json({'unhandled_answers': []})
return
state = exploration.states[state_name]
# TODO(bhenning): Answers should be bound to a particular exploration
# version or interaction ID.
# TODO(bhenning): If the top 100 answers have already been classified,
# then this handler will always return an empty list.
# TODO(bhenning): This entire function will not work as expected until
# the answers storage backend stores answers in a non-lossy way.
# Currently, answers are stored as HTML strings and they are not able
# to be converted back to the original objects they started as, so the
# normalization calls in this function will not work correctly on those
# strings. Once this happens, this handler should also be tested.
# The total number of possible answers is 100 because it requests the
# top 50 answers matched to the default rule and the top 50 answers
# matched to a fuzzy rule individually.
answers = stats_services.get_top_state_rule_answers(
exploration_id, state_name, [
exp_domain.DEFAULT_RULESPEC_STR, rule_domain.FUZZY_RULE_TYPE],
self.NUMBER_OF_TOP_ANSWERS_PER_RULE)
interaction = state.interaction
unhandled_answers = []
if feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS and interaction.id:
interaction_instance = (
interaction_registry.Registry.get_interaction_by_id(
interaction.id))
try:
# Normalize the answers.
for answer in answers:
answer['value'] = interaction_instance.normalize_answer(
answer['value'])
trained_answers = set()
for answer_group in interaction.answer_groups:
for rule_spec in answer_group.rule_specs:
if rule_spec.rule_type == rule_domain.FUZZY_RULE_TYPE:
trained_answers.update(
interaction_instance.normalize_answer(trained)
for trained
in rule_spec.inputs['training_data'])
# Include all the answers which have been confirmed to be
# associated with the default outcome.
trained_answers.update(set(
interaction_instance.normalize_answer(confirmed)
for confirmed
in interaction.confirmed_unclassified_answers))
unhandled_answers = [
answer for answer in answers
if answer['value'] not in trained_answers
]
except Exception as e:
logging.warning(
'Error loading untrained answers for interaction %s: %s.' %
(interaction.id, e))
self.render_json({
'unhandled_answers': unhandled_answers
})
class ExplorationDownloadHandler(EditorHandler):
"""Downloads an exploration as a zip file, or dict of YAML strings
representing states.
"""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
output_format = self.request.get('output_format', default_value='zip')
width = int(self.request.get('width', default_value=80))
# If the title of the exploration has changed, we use the new title
filename = 'oppia-%s-v%s' % (
utils.to_ascii(exploration.title.replace(' ', '')), version)
if output_format == feconf.OUTPUT_FORMAT_ZIP:
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.zip' % str(filename))
self.response.write(
exp_services.export_to_zip_file(exploration_id, version))
elif output_format == feconf.OUTPUT_FORMAT_JSON:
self.render_json(exp_services.export_states_to_yaml(
exploration_id, version=version, width=width))
else:
raise self.InvalidInputException(
'Unrecognized output format %s' % output_format)
class StateDownloadHandler(EditorHandler):
"""Downloads a state as a YAML string."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_view(
rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
width = int(self.request.get('width', default_value=80))
try:
state = self.request.get('state')
except:
raise self.InvalidInputException('State not found')
exploration_dict = exp_services.export_states_to_yaml(
exploration_id, version=version, width=width)
if state not in exploration_dict:
raise self.PageNotFoundException
self.response.write(exploration_dict[state])
class ExplorationResourcesHandler(EditorHandler):
"""Manages assets associated with an exploration."""
@require_editor
def get(self, exploration_id):
"""Handles GET requests."""
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
self.render_json({'filepaths': dir_list})
class ExplorationSnapshotsHandler(EditorHandler):
"""Returns the exploration snapshot history."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
snapshots = exp_services.get_exploration_snapshots_metadata(
exploration_id)
except:
raise self.PageNotFoundException
# Patch `snapshots` to use the editor's display name.
for snapshot in snapshots:
if snapshot['committer_id'] != feconf.SYSTEM_COMMITTER_ID:
snapshot['committer_id'] = user_services.get_username(
snapshot['committer_id'])
self.render_json({
'snapshots': snapshots,
})
class ExplorationRevertHandler(EditorHandler):
"""Reverts an exploration to an older version."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
current_version = self.payload.get('current_version')
revert_to_version = self.payload.get('revert_to_version')
if not isinstance(revert_to_version, int):
raise self.InvalidInputException(
'Expected an integer version to revert to; received %s.' %
revert_to_version)
if not isinstance(current_version, int):
raise self.InvalidInputException(
'Expected an integer current version; received %s.' %
current_version)
if revert_to_version < 1 or revert_to_version >= current_version:
raise self.InvalidInputException(
'Cannot revert to version %s from version %s.' %
(revert_to_version, current_version))
exp_services.revert_exploration(
self.user_id, exploration_id, current_version, revert_to_version)
self.render_json({})
class ExplorationStatisticsHandler(EditorHandler):
"""Returns statistics for an exploration."""
def get(self, exploration_id, exploration_version):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json(stats_services.get_exploration_stats(
exploration_id, exploration_version))
class ExplorationStatsVersionsHandler(EditorHandler):
"""Returns statistics versions for an exploration."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json({
'versions': stats_services.get_versions_for_exploration_stats(
exploration_id)})
class StateRulesStatsHandler(EditorHandler):
"""Returns detailed learner answer statistics for a state."""
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
self.render_json({
'rules_stats': stats_services.get_state_rules_stats(
exploration_id, state_name)
})
class ImageUploadHandler(EditorHandler):
"""Handles image uploads."""
@require_editor
def post(self, exploration_id):
"""Saves an image uploaded by a content creator."""
raw = self.request.get('image')
filename = self.payload.get('filename')
if not raw:
raise self.InvalidInputException('No image supplied')
file_format = imghdr.what(None, h=raw)
if file_format not in feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS:
allowed_formats = ', '.join(
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS.keys())
raise Exception('Image file not recognized: it should be in '
'one of the following formats: %s.' %
allowed_formats)
if not filename:
raise self.InvalidInputException('No filename supplied')
if '/' in filename or '..' in filename:
raise self.InvalidInputException(
'Filenames should not include slashes (/) or consecutive dot '
'characters.')
if '.' in filename:
dot_index = filename.rfind('.')
primary_name = filename[:dot_index]
extension = filename[dot_index + 1:].lower()
if (extension not in
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS[file_format]):
raise self.InvalidInputException(
'Expected a filename ending in .%s; received %s' %
(file_format, filename))
else:
primary_name = filename
filepath = '%s.%s' % (primary_name, file_format)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
if fs.isfile(filepath):
raise self.InvalidInputException(
'A file with the name %s already exists. Please choose a '
'different name.' % filepath)
fs.commit(self.user_id, filepath, raw)
self.render_json({'filepath': filepath})
class ChangeListSummaryHandler(EditorHandler):
"""Returns a summary of a changelist applied to a given exploration."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
change_list = self.payload.get('change_list')
version = self.payload.get('version')
current_exploration = exp_services.get_exploration_by_id(
exploration_id)
if version != current_exploration.version:
# TODO(sll): Improve this.
self.render_json({
'error': (
'Sorry! Someone else has edited and committed changes to '
'this exploration while you were editing it. We suggest '
'opening another browser tab -- which will load the new '
'version of the exploration -- then transferring your '
'changes there. We will try to make this easier in the '
'future -- we have not done it yet because figuring out '
'how to merge different people\'s changes is hard. '
'(Trying to edit version %s, but the current version is '
'%s.).' % (version, current_exploration.version)
)
})
else:
utils.recursively_remove_key(change_list, '$$hashKey')
summary = exp_services.get_summary_of_change_list(
current_exploration, change_list)
updated_exploration = exp_services.apply_change_list(
exploration_id, change_list)
warning_message = ''
try:
updated_exploration.validate(strict=True)
except utils.ValidationError as e:
warning_message = unicode(e)
self.render_json({
'summary': summary,
'warning_message': warning_message
})
class StartedTutorialEventHandler(EditorHandler):
"""Records that this user has started the state editor tutorial."""
def post(self):
"""Handles GET requests."""
user_services.record_user_started_state_editor_tutorial(self.user_id)
|
apache-2.0
|
alistairlow/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/resnet_v2.py
|
31
|
14548
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Typical use:
from tensorflow.contrib.slim.python.slim.nets import
resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
resnet_arg_scope = resnet_utils.resnet_arg_scope
@add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with variable_scope.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = layers.batch_norm(
inputs, activation_fn=nn_ops.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers_lib.conv2d(
preact,
depth, [1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope='shortcut')
residual = layers_lib.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope='conv1')
residual = resnet_utils.conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope='conv2')
residual = layers_lib.conv2d(
residual,
depth, [1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope='conv3')
output = shortcut + residual
return utils.collect_named_outputs(outputs_collections, sc.name, output)
def resnet_v2(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with variable_scope.variable_scope(
scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with arg_scope(
[layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with arg_scope([layers.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in
# conv1 because the first ResNet unit will perform these. Cf.
# Appendix of [2].
with arg_scope(
[layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = layers.batch_norm(
net, activation_fn=nn_ops.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = math_ops.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = layers_lib.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = layers.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v2 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v2 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v2_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=True,
reuse=reuse,
scope=scope)
|
apache-2.0
|
alirizakeles/zato
|
code/zato-web-admin/src/zato/admin/web/views/load_balancer.py
|
1
|
11755
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import json, logging
from traceback import format_exc
from xmlrpclib import Fault
# OrderedDict is new in 2.7
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# Django
from django.http import HttpResponse, HttpResponseServerError
from django.template.response import TemplateResponse
# Zato
from zato.admin.web import from_utc_to_user
from zato.admin.web.forms.load_balancer import ManageLoadBalancerForm, RemoteCommandForm, \
ManageLoadBalancerSourceCodeForm
from zato.admin.web.views import get_lb_client, method_allowed
from zato.common.haproxy import haproxy_stats, Config
from zato.common.odb.model import Cluster
logger = logging.getLogger(__name__)
def _haproxy_alive(client, lb_use_tls):
""" Check whether HAProxy is up and running. If 'status' is True then HAProxy
is alive, sets 'status' to False otherwise and fills in the 'error' attribute
with the details of an exception caught.
"""
haproxy_alive = {}
try:
client.is_haproxy_alive(lb_use_tls)
except Exception, e:
haproxy_alive["status"] = False
haproxy_alive["error"] = format_exc(e)
else:
haproxy_alive["status"] = True
return haproxy_alive
def _haproxy_stat_config(client=None, lb_config=None):
""" Return the configuration of the HAProxy HTTP stats interface.
"""
if not lb_config:
lb_config = client.get_config()
# Stats URI is optional
try:
stats_uri = lb_config["defaults"]["stats_uri"]
except KeyError:
return None, None
else:
stats_port = lb_config["frontend"]["front_http_plain"]["bind"]["port"]
return stats_uri, stats_port
def _get_validate_save_flag(cluster_id, req_post):
""" A convenience function for checking we were told to validate & save
a config or was it a request for validating it only.
"""
if "validate_save" in req_post:
save = True
elif "validate" in req_post:
save = False
else:
msg = "Expected a flag indicating what to do with input data. cluster_id:[{cluster_id}] req.POST:[{post}]"
msg = msg.format(cluster_id=cluster_id, post=req_post)
logger.error(msg)
raise Exception(msg)
return save
def _client_validate_save(req, func, *args):
""" A convenience function for validating or validating & saving a config
file on a remote SSL XML-RPC server.
"""
save = args[1]
has_error = False
try:
func(*args)
except Fault, e:
msg = e.faultString
has_error = True
except Exception, e:
msg = format_exc(e)
has_error = True
if has_error:
msg = 'Caught an exception while invoking the load-balancer agent, e:`{}`'.format(msg)
logger.error(msg)
return HttpResponseServerError(msg)
else:
if save:
return HttpResponse('Config validated and saved successfully')
else:
return HttpResponse("Config is valid, it's safe to save it")
@method_allowed("GET", "POST")
def remote_command(req, cluster_id):
""" Execute a HAProxy command.
"""
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
client = get_lb_client(cluster)
haproxy_alive = _haproxy_alive(client, req.zato.lb_use_tls)
cluster.stats_uri, cluster.stats_port = _haproxy_stat_config(client=client)
# We need to know the HAProxy version before we can build up the select box
# on the form.
commands = haproxy_stats[("1", "3")]
version_info = tuple(client.haproxy_version_info())
if version_info >= ("1", "4"):
commands.update(haproxy_stats[("1", "4")])
if req.method == "POST":
result = client.execute_command(req.POST["command"], req.POST["timeout"], req.POST.get("extra", ""))
if not result.strip():
result = "(empty result)"
initial={"result":result}
for k, v in req.POST.items():
if k != "result":
initial[k] = v
form = RemoteCommandForm(commands, initial)
else:
form = RemoteCommandForm(commands)
return_data = {"form":form, "cluster":cluster, "haproxy_alive":haproxy_alive, "lb_use_tls": req.zato.lb_use_tls}
return TemplateResponse(req, 'zato/load_balancer/remote_command.html', return_data)
@method_allowed("GET")
def manage(req, cluster_id):
""" GUI for managing HAProxy configuration.
"""
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
client = get_lb_client(cluster)
lb_start_time = from_utc_to_user(client.get_uptime_info(), req.zato.user_profile)
lb_config = client.get_config()
lb_work_config = client.get_work_config()
lb_work_config['verify_fields'] = ', '.join(['%s=%s' % (k,v) for (k, v) in sorted(lb_work_config['verify_fields'].items())])
form_data = {
'global_log_host': lb_config['global_']['log']['host'],
'global_log_port': lb_config['global_']['log']['port'],
'global_log_level': lb_config['global_']['log']['level'],
'global_log_facility': lb_config['global_']['log']['facility'],
'timeout_connect': lb_config['defaults']['timeout_connect'],
'timeout_client': lb_config['defaults']['timeout_client'],
'timeout_server': lb_config['defaults']['timeout_server'],
'http_plain_bind_address':lb_config['frontend']['front_http_plain']['bind']['address'],
'http_plain_bind_port':lb_config['frontend']['front_http_plain']['bind']['port'],
'http_plain_log_http_requests':lb_config['frontend']['front_http_plain']['log_http_requests'],
'http_plain_maxconn':lb_config['frontend']['front_http_plain']['maxconn'],
'http_plain_monitor_uri':lb_config['frontend']['front_http_plain']['monitor_uri'],
}
backends = {}
for backend_type in lb_config['backend']:
for name in lb_config['backend'][backend_type]:
# Is it a server?
if 'address' in lb_config['backend'][backend_type][name]:
if not name in backends:
backends[name] = {}
backends[name][backend_type] = {}
backends[name][backend_type]['address'] = lb_config['backend'][backend_type][name]['address']
backends[name][backend_type]['port'] = lb_config['backend'][backend_type][name]['port']
backends[name][backend_type]['extra'] = lb_config['backend'][backend_type][name]['extra']
backends = OrderedDict(sorted(backends.items(), key=lambda t: t[0]))
form = ManageLoadBalancerForm(initial=form_data)
haproxy_alive = _haproxy_alive(client, req.zato.lb_use_tls)
cluster.stats_uri, cluster.stats_port = _haproxy_stat_config(lb_config=lb_config)
servers_state = client.get_servers_state()
return_data = {'cluster':cluster, 'lb_start_time':lb_start_time,
'lb_config':lb_config, 'lb_work_config':lb_work_config,
'form':form, 'backends':backends, 'haproxy_alive':haproxy_alive,
'servers_state':servers_state, 'lb_use_tls': req.zato.lb_use_tls}
return TemplateResponse(req, 'zato/load_balancer/manage.html', return_data)
@method_allowed("POST")
def validate_save(req, cluster_id):
""" A common handler for both validating and saving a HAProxy config using
a pretty GUI form.
"""
save = _get_validate_save_flag(cluster_id, req.POST)
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
client = get_lb_client(cluster)
lb_config = Config()
lb_config.global_["log"] = {}
lb_config.frontend["front_http_plain"] = {}
lb_config.frontend["front_http_plain"]["bind"] = {}
lb_config.global_["log"]["host"] = req.POST["global_log_host"]
lb_config.global_["log"]["port"] = req.POST["global_log_port"]
lb_config.global_["log"]["level"] = req.POST["global_log_level"]
lb_config.global_["log"]["facility"] = req.POST["global_log_facility"]
lb_config.defaults["timeout_connect"] = req.POST["timeout_connect"]
lb_config.defaults["timeout_client"] = req.POST["timeout_client"]
lb_config.defaults["timeout_server"] = req.POST["timeout_server"]
lb_config.frontend["front_http_plain"]["bind"]["address"] = req.POST["http_plain_bind_address"]
lb_config.frontend["front_http_plain"]["bind"]["port"] = req.POST["http_plain_bind_port"]
lb_config.frontend["front_http_plain"]["log_http_requests"] = req.POST["http_plain_log_http_requests"]
lb_config.frontend["front_http_plain"]["maxconn"] = req.POST["http_plain_maxconn"]
lb_config.frontend["front_http_plain"]["monitor_uri"] = req.POST["http_plain_monitor_uri"]
for key, value in req.POST.items():
if key.startswith("bck_http"):
for token in("address", "port", "extra"):
splitted = key.split(token)
if splitted[0] == key:
continue # We don't have the token in that key.
backend_type, backend_name = splitted
# Get rid of underscores left over from the .split above.
backend_type = backend_type[:-1]
backend_name = backend_name[1:]
lb_config.backend.setdefault(backend_type, {})
lb_config.backend[backend_type].setdefault(backend_name, {})
lb_config.backend[backend_type][backend_name][token] = value
# Invoke the LB agent
return _client_validate_save(req, client.validate_save, lb_config, save)
@method_allowed("GET")
def manage_source_code(req, cluster_id):
""" Source code view for managing HAProxy configuration.
"""
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
client = get_lb_client(cluster)
cluster.stats_uri, cluster.stats_port = _haproxy_stat_config(client=client)
haproxy_alive = _haproxy_alive(client, req.zato.lb_use_tls)
source_code = client.get_config_source_code()
form = ManageLoadBalancerSourceCodeForm(initial={"source_code":source_code})
return_data = {"form": form, "haproxy_alive":haproxy_alive, "cluster":cluster, "lb_use_tls": req.zato.lb_use_tls}
return TemplateResponse(req, 'zato/load_balancer/manage_source_code.html', return_data)
@method_allowed("POST")
def validate_save_source_code(req, cluster_id):
""" A common handler for both validating and saving a HAProxy config using
the raw HAProxy config file's view.
"""
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
save = _get_validate_save_flag(cluster_id, req.POST)
# Invoke the LB agent
client = get_lb_client(cluster)
return _client_validate_save(req, client.validate_save_source_code, req.POST["source_code"], save)
@method_allowed("GET")
def get_addresses(req, cluster_id):
""" Return JSON-formatted addresses known to HAProxy.
"""
cluster = req.zato.odb.query(Cluster).filter_by(id=cluster_id).one()
client = get_lb_client(cluster)
addresses = {}
addresses["cluster"] = {"lb_host": cluster.lb_host, "lb_agent_port":cluster.lb_agent_port}
try:
lb_config = client.get_config()
except Exception, e:
msg = "Could not get load balancer's config, client:[{client!r}], e:[{e}]".format(client=client,
e=format_exc(e))
logger.error(msg)
lb_config = None
addresses["cluster"]["lb_config"] = lb_config
return HttpResponse(json.dumps(addresses))
|
gpl-3.0
|
hapylestat/appcore
|
apputils/views.py
|
1
|
4436
|
# coding=utf-8
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# Copyright (c) 2018 Reishin <[email protected]>
class BaseView(object):
"""
BaseConfigView is a basic class, which providing Object to Dict, Dict to Object conversion
"""
def __init__(self, serialized_obj=None, ignore_non_existing=False, **kwargs):
"""
:type tree dict
:type ignore_non_existing bool
"""
if len(kwargs) > 0:
self.deserialize(kwargs, ignore_non_existing=ignore_non_existing)
if serialized_obj:
self.deserialize(serialized_obj, ignore_non_existing=ignore_non_existing)
@classmethod
def _isclass(cls, obj, clazz=object):
try:
return issubclass(obj, clazz)
except TypeError:
return False
@classmethod
def deserialize_dict(cls, obj=None, ignore_non_existing=False):
"""
:type obj dict|None
:type ignore_non_existing bool
:rtype cls
"""
return cls().deserialize(obj, ignore_non_existing)
def deserialize(self, obj=None, ignore_non_existing=False):
"""
:type obj dict|None
:type ignore_non_existing bool
"""
if not isinstance(obj, dict):
if ignore_non_existing:
return
raise TypeError("Wrong data '{}' passed for '{}' deserialization".format(obj, self.__class__.__name__))
definitions = {k: v for k, v in self.__class__.__dict__.items() if k[:1] != "_"}
def_property_keys = set(definitions.keys())
property_keys = set(obj.keys())
existing_keys = def_property_keys & property_keys
non_defined_keys = property_keys - def_property_keys
non_existing_keys = def_property_keys - property_keys
if not ignore_non_existing and non_defined_keys:
raise TypeError(self.__class__.__name__ + " doesn't contain properties: {}".format(", ".join(non_defined_keys)))
for k in existing_keys:
v = obj[k]
attr_type = definitions[k]
try:
if isinstance(attr_type, list) and self._isclass(attr_type[0], BaseView):
if isinstance(v, list):
obj_list = [attr_type[0](serialized_obj=v_item, ignore_non_existing=ignore_non_existing) for v_item in v]
else:
obj_list = [attr_type[0](serialized_obj=v, ignore_non_existing=ignore_non_existing)]
self.__setattr__(k, obj_list)
elif self._isclass(attr_type, BaseView):
self.__setattr__(k, attr_type(v))
else:
self.__setattr__(k, v)
except IndexError:
self.__setattr__(k, v) # check test_empty_view_deserialization test suite for test-case
for k in non_existing_keys:
attr_type = definitions[k]
if attr_type is None:
self.__setattr__(k, None)
elif isinstance(attr_type, (list, set, tuple, dict)) and len(attr_type) == 0:
self.__setattr__(k, attr_type.__class__())
elif isinstance(attr_type, (list, set, tuple)) and self._isclass(attr_type[0], BaseView):
self.__setattr__(k, attr_type.__class__())
else:
self.__setattr__(k, attr_type.__class__(attr_type))
return self
def serialize(self, null_values=False):
"""
:type null_values bool
:rtype: dict
"""
ret = {}
property_dict = dict(self.__class__.__dict__) # contain view definition defaults
property_dict.update(self.__dict__) # overrides done at runtime
for k in property_dict.keys():
if k[:1] == "_" or (property_dict[k] is None and not null_values):
continue
v = property_dict[k]
if isinstance(v, list):
v_items = []
for v_item in v:
if self._isclass(v_item.__class__, BaseView):
v_item_val = v_item.serialize(null_values=null_values)
if not null_values and len(v_item_val) == 0:
continue
v_items.append(v_item_val)
elif self._isclass(v_item, BaseView): # when were passed Class instead of instance
pass
else:
v_items.append(v_item)
ret[k] = v_items
elif self._isclass(v.__class__, BaseView):
ret[k] = v.serialize(null_values=null_values)
elif self._isclass(v, BaseView): # when were passed Class instead of instance
pass
else:
ret[k] = v
return ret
|
lgpl-3.0
|
caisq/tensorflow
|
tensorflow/python/eager/graph_callable.py
|
2
|
17077
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Decorator that produces a callable object that executes a TensorFlow graph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import tape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _default_initializer(name, shape, dtype):
"""The default initializer for variables."""
# pylint: disable=protected-access
store = variable_scope._get_default_variable_store()
initializer = store._get_default_initializer(name, shape=shape, dtype=dtype)
# pylint: enable=protected-access
return initializer[0]
class _CapturedVariable(object):
"""Variable captured by graph_callable.
Internal to the implementation of graph_callable. Created only by
_VariableCapturingScope and used only to read the variable values when calling
the function after the variables are initialized.
"""
def __init__(self, name, initializer, shape, dtype, trainable):
self.name = name
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
initial_value = lambda: initializer(shape, dtype=dtype)
with context.eager_mode():
self.variable = resource_variable_ops.ResourceVariable(
initial_value=initial_value, name=name, dtype=dtype,
trainable=trainable)
self.shape = shape
self.dtype = dtype
self.placeholder = None
self.trainable = trainable
def read(self, want_gradients=True):
if want_gradients and self.trainable:
v = tape.watch_variable(self.variable)
else:
v = self.variable
return v.read_value()
class _VariableCapturingScope(object):
"""Variable-scope-like object which captures tf.get_variable calls.
This is responsible for the main difference between the initialization version
of a function object and the calling version of a function object.
capturing_scope replaces calls to tf.get_variable with placeholder tensors to
be fed the variable's current value. TODO(apassos): these placeholders should
instead be objects implementing a similar API to tf.Variable, for full
compatibility.
initializing_scope replaces calls to tf.get_variable with creation of
variables and initialization of their values. This allows eventual support of
initialized_value and friends.
TODO(apassos): once the eager mode layers API is implemented support eager
func-to-object as well.
"""
def __init__(self):
self.variables = {}
self.tf_variables = {}
@contextlib.contextmanager
def capturing_scope(self):
"""Context manager to capture variable creations.
Replaces variable accesses with placeholders.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter( # pylint: disable=missing-docstring
getter=None,
name=None,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None,
validate_shape=True,
use_resource=None,
aggregation=variable_scope.VariableAggregation.NONE,
synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, partitioner, validate_shape, use_resource, dtype
del collections, initializer, trainable, reuse, caching_device, shape
del aggregation, synchronization
assert name in self.variables
v = self.variables[name]
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
@contextlib.contextmanager
def initializing_scope(self):
"""Context manager to capture variable creations.
Forcibly initializes all created variables.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter( # pylint: disable=missing-docstring
getter=None,
name=None,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None,
validate_shape=True,
use_resource=None,
aggregation=variable_scope.VariableAggregation.NONE,
synchronization=variable_scope.VariableSynchronization.AUTO):
del getter, regularizer, collections, caching_device, partitioner
del use_resource, validate_shape, aggregation, synchronization
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
else:
raise ValueError("Specified reuse=%s but tried to reuse variables."
% reuse)
# TODO(apassos): ensure this is on the same device as above
v = _CapturedVariable(name, initializer, shape, dtype, trainable)
self.variables[name] = v
graph_mode_resource = v.variable.handle
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
resource_variable_ops.shape_safe_assign_variable_handle(
graph_mode_resource, v.variable.shape, initializer(shape, dtype))
return v.variable
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
class _InitializingFunctionObject(object):
"""Responsible for deciding which version of func-to-object to call.
call_fn is the version which calls the function with the current values of the
variables and init_fn is the version which calls the function to initialize
all variables.
TODO(apassos): figure out a way to support initializing only _some_
variables. This requires a way to pull out a variable's initialization code
from the graph, which might not be possible in general.
"""
def __init__(self, call_fn, init_fn, shape_and_dtypes):
self._init_fn = init_fn
self._call_fn = call_fn
self.shape_and_dtypes = shape_and_dtypes
self.flattened_shapes = [tensor_shape.as_shape(sd.shape) for sd in
nest.flatten(self.shape_and_dtypes)]
@property
def variables(self):
return self._call_fn.variables
def __call__(self, *args):
nest.assert_same_structure(self.shape_and_dtypes, args, check_types=False)
if not all([
shape.is_compatible_with(arg.shape)
for shape, arg in zip(self.flattened_shapes, nest.flatten(args))
]):
raise ValueError(
"Declared shapes do not match argument shapes: Expected %s, found %s."
% (self.flattened_shapes, [arg.shape for arg in nest.flatten(args)]))
initialized = [resource_variable_ops.var_is_initialized_op(
v.handle).numpy() for v in self._call_fn.variables]
if all(x for x in initialized):
for v in self._call_fn.variables:
if v.trainable:
tape.watch_variable(v)
return self._call_fn(*args)
elif all(not x for x in initialized):
return self._init_fn(*args)
else:
raise ValueError("Some, but not all, variables are initialized.")
def _get_graph_callable_inputs(shape_and_dtypes):
"""Maps specified shape_and_dtypes to graph inputs."""
ret = []
for x in shape_and_dtypes:
if isinstance(x, ShapeAndDtype):
ret.append(array_ops.placeholder(x.dtype, x.shape))
elif isinstance(x, (tuple, list)):
ret.append(_get_graph_callable_inputs(x))
else:
raise errors.InvalidArgumentError(
None, None, "Expected the argument to @graph_callable to be a "
"(possibly nested) list or tuple of ShapeAndDtype objects, "
"but got an object of type: %s" % type(x))
return tuple(ret) if isinstance(shape_and_dtypes, tuple) else ret
def _graph_callable_internal(func, shape_and_dtypes):
"""Defines and returns a template version of func.
Under the hood we make two function objects, each wrapping a different version
of the graph-mode code. One version immediately runs variable initialization
before making the variable's Tensors available for use, while the other
version replaces the Variables with placeholders which become function
arguments and get the current variable's value.
Limitations in (2) and (4) are because this does not implement a graph-mode
Variable class which has a convert_to_tensor(as_ref=True) method and a
initialized_value method. This is fixable.
Args:
func: The tfe Python function to compile.
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects.
Raises:
ValueError: If any one of func's outputs is not a Tensor.
Returns:
Callable graph object.
"""
container = tf_ops.get_default_graph()._container # pylint: disable=protected-access
graph_key = tf_ops.get_default_graph()._graph_key # pylint: disable=protected-access
with context.graph_mode():
# This graph will store both the initialization and the call version of the
# wrapped function. It will later be used by the backprop code to build the
# backprop graph, if necessary.
captures = {}
tmp_graph = function.CapturingGraph(captures)
# Inherit the graph key from the original graph to ensure optimizers don't
# misbehave.
tmp_graph._container = container # pylint: disable=protected-access
tmp_graph._graph_key = graph_key # pylint: disable=protected-access
with tmp_graph.as_default():
# Placeholders for the non-variable inputs.
func_inputs = _get_graph_callable_inputs(shape_and_dtypes)
func_num_args = len(tf_inspect.getargspec(func).args)
if len(func_inputs) != func_num_args:
raise TypeError("The number of arguments accepted by the decorated "
"function `%s` (%d) must match the number of "
"ShapeAndDtype objects passed to the graph_callable() "
"decorator (%d)." %
(func.__name__, func_num_args, len(func_inputs)))
# First call the function to generate a graph which can initialize all
# variables. As a side-effect this will populate the variable capturing
# scope's view of which variables exist.
variable_captures = _VariableCapturingScope()
with variable_captures.initializing_scope(
), function.AutomaticControlDependencies() as a:
func_outputs = func(*func_inputs)
outputs_list = nest.flatten(func_outputs)
for i, x in enumerate(outputs_list):
if x is not None:
outputs_list[i] = a.mark_as_return(x)
if len(outputs_list) == 1 and outputs_list[0] is None:
outputs_list = []
output_shapes = [x.shape for x in outputs_list]
if not all(isinstance(x, tf_ops.Tensor) for x in outputs_list):
raise ValueError("Found non-tensor output in %s" % str(outputs_list))
initializing_operations = tmp_graph.get_operations()
# Call the function again, now replacing usages of variables with
# placeholders. This assumes the variable capturing scope created above
# knows about all variables.
tmp_graph.clear_resource_control_flow_state()
with variable_captures.capturing_scope(
), function.AutomaticControlDependencies() as a:
captured_outputs = func(*func_inputs)
captured_outlist = nest.flatten(captured_outputs)
for i, x in enumerate(captured_outlist):
if x is not None:
captured_outlist[i] = a.mark_as_return(x)
capturing_operations = tmp_graph.get_operations()[
len(initializing_operations):]
sorted_variables = sorted(variable_captures.variables.values(),
key=lambda x: x.name)
ids = list(sorted(captures.keys()))
if ids:
extra_inputs, extra_placeholders = zip(*[captures[x] for x in ids])
else:
extra_inputs = []
extra_placeholders = []
flat_inputs = [x for x in nest.flatten(func_inputs)
if isinstance(x, tf_ops.Tensor)]
placeholder_inputs = flat_inputs+ list(extra_placeholders)
func_def_outputs = [x for x in outputs_list if isinstance(x, tf_ops.Tensor)]
initialization_name = function._inference_name(func.__name__) # pylint: disable=protected-access
# TODO(ashankar): Oh lord, forgive me for this lint travesty.
# Also, what about the gradient registry of these functions? Those need to be
# addressed as well.
for f in tmp_graph._functions.values(): # pylint: disable=protected-access
function._register(f._c_func.func) # pylint: disable=protected-access
initializer_function = function.GraphModeFunction(
initialization_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
initializing_operations,
func_def_outputs,
func_outputs,
output_shapes)
capture_func_def_outputs = [
x for x in captured_outlist if isinstance(x, tf_ops.Tensor)]
captured_function_name = function._inference_name(func.__name__) # pylint: disable=protected-access
captured_function = function.GraphModeFunction(
captured_function_name,
placeholder_inputs,
extra_inputs,
tmp_graph,
capturing_operations,
capture_func_def_outputs,
captured_outputs,
output_shapes,
variables=[x.variable for x in sorted_variables])
return _InitializingFunctionObject(captured_function, initializer_function,
shape_and_dtypes)
class ShapeAndDtype(object):
"""Data type that packages together shape and type information.
Used for arguments to graph callables. See graph_callable() for an example.
"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
def graph_callable(shape_and_dtypes):
"""Decorator that produces a callable that executes a TensorFlow graph.
When applied on a function that constructs a TensorFlow graph, this decorator
produces a callable object that:
1. Executes the graph when invoked. The first call will initialize any
variables defined in the graph.
2. Provides a .variables() method to return the list of TensorFlow variables
defined in the graph.
Note that the wrapped function is not allowed to change the values of the
variables, just use them.
The return value of the wrapped function must be one of the following:
(1) None, (2) a Tensor, or (3) a possibly nested sequence of Tensors.
Example:
```python
@tfe.graph_callable([tfe.ShapeAndDtype(shape(), dtype=dtypes.float32)])
def foo(x):
v = tf.get_variable('v', initializer=tf.ones_initializer(), shape=())
return v + x
ret = foo(tfe.Tensor(2.0)) # `ret` here is a Tensor with value 3.0.
foo.variables[0].assign(7.0) # Modify the value of variable `v`.
ret = foo(tfe.Tensor(2.0)) # `ret` here now is a Tensor with value 9.0.
```
Args:
shape_and_dtypes: A possibly nested list or tuple of ShapeAndDtype objects
that specifies shape and type information for each of the callable's
arguments. The length of this list must be equal to the number of
arguments accepted by the wrapped function.
Returns:
A callable graph object.
"""
# TODO(alive,apassos): support initialized_value and friends from tf.Variable.
assert context.executing_eagerly(), (
"graph_callable can only be used when Eager execution is enabled.")
def decorator(func):
return tf_decorator.make_decorator(func,
_graph_callable_internal(
func, shape_and_dtypes))
return decorator
|
apache-2.0
|
imron/scalyr-agent-2
|
scripts/circleci/upload_circleci_artifacts.py
|
1
|
13293
|
#!/usr/bin/env python
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script which triggers circleci pipeline for particular git branch and uploads its job artifacts after.
#
# It depends on the following environment variables being set:
# - CIRCLE_API_TOKEN - CircleCI API access token.
# - AGENT_PROJECT_NAME - The name of the github project.
#
# Usage:
#
# The script expects '--workflow', '--job', and '--artifact-path' for each desired artifact file in order to find it.
#
# python upload_circleci_artifacts.py --branch mastert # --output-path <path>\
# --workflow=package-tests --job=build-windows-package --artifact-path=".*\.msi" \
# --workflow=package-tests --job=build-linux-packages --artifact-path=".*\.rpm" \
# --workflow=package-tests --job=build-linux-packages --artifact-path=".*\.deb" \
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from scalyr_agent import compat
from io import open
from six.moves import zip
if False:
from typing import Dict
from typing import List
from typing import Any
from typing import Generator
from typing import Tuple
import argparse
import os
import itertools
import operator
import datetime
import time
import re
import json
import requests
CIRCLE_API_URL = "https://circleci.com/api/v2"
# 15 min
CIRCLE_WAIT_TIMEOUT = 60 * 15
try:
CIRCLE_API_TOKEN = compat.os_environ_unicode["CIRCLE_API_TOKEN"]
except KeyError:
print("Environment variable 'CIRCLE_API_TOKEN' is not specified.")
raise
try:
AGENT_REPO_NAME = compat.os_environ_unicode["AGENT_REPO_NAME"]
except KeyError:
print("Environment variable 'AGENT_REPO_NAME' is not specified.")
raise
CIRCLE_API_PROJECT_URL = CIRCLE_API_URL + "/project/gh/scalyr/" + AGENT_REPO_NAME
def _do_request(method, url, **kwargs):
# type: (str, str, **Any) -> requests.Response
headers = kwargs.get("headers", dict())
headers["Circle-Token"] = CIRCLE_API_TOKEN
headers["Accept"] = "application/json"
kwargs["headers"] = headers
with requests.Session() as session:
resp = session.request(method=method, url=url, **kwargs)
resp.raise_for_status()
return resp
def get_request(url, **kwargs):
# type: (str, Dict) -> Dict
resp = _do_request("GET", url, **kwargs)
return resp.json()
def post_request(url, **kwargs):
# type: (str, Dict) -> requests.Response
return _do_request("POST", url, **kwargs)
def get_paginated_collection(url):
# type: (str) -> Generator
"""
Generator witch fetches elements from circleci collection with pagination.
:param url: Url to the collection.
:return: Yields next element in the collection.
"""
next_page_token = None
while True:
resp = get_request(url=url, params={"page-token": next_page_token})
for item in resp["items"]:
yield item
next_page_token = resp["next_page_token"]
if next_page_token is None:
raise StopIteration()
def get_paginated_list(url):
# type: (str) -> List
"""
Fetch the circleci paginated collection as list.
"""
return list(get_paginated_collection(url))
def get_paginated_dict(url, key):
# type: (str, str) -> Dict
"""
Fetch the circleci paginated collection as dict.
:param key: Name of the field of the element which will be used as key for the dictionary.
"""
return {item[key]: item for item in get_paginated_collection(url)}
def download_artifact_file(artifact_info, output_path):
# type: (Dict, str) -> None
"""
Download circleci job artifact.
:param artifact_info: Contains information about artifact.
:param output_path: Base output path.
:return:
"""
artifact_output_path = os.path.join(
output_path, os.path.basename(artifact_info["path"]),
)
resp = _do_request(
"GET", url=artifact_info["url"], allow_redirects=True, stream=True
)
with open(artifact_output_path, "wb") as file:
for chunk in resp.iter_content(chunk_size=8192):
file.write(chunk)
def trigger_pipeline(branch_name, pipeline_parameters):
# type: (str, str) -> Dict
"""
Trigger new CircleCI pipeline for the specified branch.
:return: General information about started pipeline from CircleCI.
"""
resp = post_request(
url=CIRCLE_API_PROJECT_URL + "/pipeline",
json={"branch": branch_name, "parameters": pipeline_parameters},
)
resp.raise_for_status()
pipeline_info = resp.json()
return pipeline_info
def wait_for_workflow(workflow_id, timeout_time):
# type: (str, datetime.datetime) -> Dict
"""
Wait for specified workflow is finished. If workflow finished without success, raise an error.
:param timeout_time:
:return: General information about workflow from CircleCI.
"""
while True:
workflow_info = get_request(
url=CIRCLE_API_URL + "/workflow/" + str(workflow_id),
)
workflow_status = workflow_info["status"]
workflow_name = workflow_info["name"]
print("Status: ", workflow_status)
if workflow_status == "success":
print("Workflow '{0}' finished successfully.".format(workflow_name))
return workflow_info
if workflow_status != "running":
raise RuntimeError(
"Workflow '{0}' failed with status '{1}'.".format(
workflow_name, workflow_status
)
)
if datetime.datetime.utcnow() >= timeout_time:
raise RuntimeError(
"Timeout. Can not wait more for workflow '{0}'.".format(workflow_name)
)
print("Wait for workflow '{0}'.".format(workflow_name))
time.sleep(10)
def discard_outdated_workflows(workflow_infos):
# type: (List) -> Dict
"""
Find workflows with the same names and keep only latest one.
The pipeline can contain multiple workflows with the same name
(this can happen, for example, if workflow was restarted manually).
so we need to get the latest workflow.
"""
result = dict()
for name, group in itertools.groupby(
workflow_infos, key=operator.itemgetter("name")
):
# get workflow with the 'biggest' time.
latest_workflow = max(group, key=operator.itemgetter("created_at"))
result[name] = latest_workflow
return result
def wait_for_pipeline(pipeline_number,):
# type: (int) -> Dict
"""
Wait for all workflows are finishedin the pipeline specified by 'pipeline number'.
:return: General information about all workflows from CircleCI.
"""
# wait until the 'state' field of the pipeline is 'created'.
timeout_time = datetime.datetime.utcnow() + datetime.timedelta(
seconds=CIRCLE_WAIT_TIMEOUT
)
while True:
# get information about the pipeline.
pipeline_info = get_request(
url=CIRCLE_API_PROJECT_URL + "/pipeline/" + str(pipeline_number)
)
pipeline_state = pipeline_info["state"]
if pipeline_state == "created":
break
if pipeline_state != "pending":
raise RuntimeError(
"Pipeline has a wrong state: {0}.".format(pipeline_state)
)
if datetime.datetime.utcnow() >= timeout_time:
raise RuntimeError(
"Timeout. Can not wait more for pipeline: '{0}'".format(pipeline_number)
)
time.sleep(10)
# not a great idea, but it looks like we can get incomplete list of workflows,
# even if we wait for pipeline status = 'created'.
# so we just wait a little to be sure that everything is created.
time.sleep(10)
pipeline_id = pipeline_info["id"]
# get pipeline workflows
pipeline_workflows = get_paginated_list(
url=CIRCLE_API_URL + "/pipeline/" + str(pipeline_id) + "/workflow",
)
# remove duplicated workflows and keep latest ones.
latest_workflows = discard_outdated_workflows(pipeline_workflows)
finished_workflows = dict()
# wait for each workflow is successfully finished.
for name, workflow in latest_workflows.items():
# If any of the workflows is not successful 'wait_for_workflow' will raise error.
timeout_time = datetime.datetime.utcnow() + datetime.timedelta(
seconds=CIRCLE_WAIT_TIMEOUT
)
finished_workflows[name] = wait_for_workflow(
workflow_id=workflow["id"], timeout_time=timeout_time
)
return finished_workflows
def download_artifacts(artifacts_to_fetch, workflow_infos, output_path):
# type: (Dict, Dict, str) -> None
cached_job_infos = dict() # type: Dict[str, Any]
cached_artifact_infos = dict() # type: Dict[Tuple, Any]
for workflow_name, job_name, artifact_pattern in artifacts_to_fetch:
workflow_info = workflow_infos.get(workflow_name)
if workflow_info is None:
raise RuntimeError(
"Can not find workflow with name '{0}'".format(workflow_name)
)
# if we already get job infos for this workflow, we just can reuse it from cache.
job_infos = cached_job_infos.get(workflow_name)
if job_infos is None:
# job infos for this workflow are not used yet. Fetch them from CircleCI and cache for future use.
job_infos = get_paginated_dict(
url=CIRCLE_API_URL + "/workflow/" + workflow_info["id"] + "/job",
key="name",
)
cached_job_infos[workflow_name] = job_infos
job_info = job_infos.get(job_name)
if job_info is None:
raise RuntimeError("Can not find job with name '{0}'".format(job_name))
artifact_infos = cached_artifact_infos.get((workflow_name, job_name))
if artifact_infos is None:
artifact_infos = get_paginated_dict(
url=CIRCLE_API_PROJECT_URL
+ "/"
+ str(job_info["job_number"])
+ "/artifacts",
key="path",
)
cached_artifact_infos[(workflow_name, job_name)] = artifact_infos
for artifact_info in artifact_infos.values():
artifact_path = artifact_info["path"]
if re.match(artifact_pattern, artifact_path):
download_artifact_file(artifact_info, output_path)
print("Artifact '{0}'is downloaded.".format(artifact_path))
break
else:
raise RuntimeError(
"Can not find artifact with path '{0}'".format(artifact_pattern)
)
def main(branch_name, artifacts_to_fetch, output_path, pipeline_parameters):
pipeline_trigger_info = trigger_pipeline(
branch_name=branch_name, pipeline_parameters=pipeline_parameters
)
pipeline_number = pipeline_trigger_info["number"]
# pipeline_number = 3292
# time.sleep(20)
# wait for whole pipeline is finished and get all workflows.
workflow_infos = wait_for_pipeline(pipeline_number=pipeline_number)
# download artifacts.
download_artifacts(artifacts_to_fetch, workflow_infos, output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--branch", required=True, type=str, help="Branch name."),
parser.add_argument(
"--workflow",
required=True,
type=str,
help="Name of the CircleCI workflow.",
action="append",
),
parser.add_argument(
"--job",
required=True,
type=str,
help="Name of the CircleCI job.",
action="append",
)
parser.add_argument(
"--artifact-path",
required=True,
type=str,
help="The Regular expression for the path of the job artifact.",
action="append",
)
parser.add_argument(
"--output-path",
required=True,
type=str,
help="Output path for all uploaded artifacts.",
)
parser.add_argument(
"--pipeline-parameters",
type=str,
help="Text string with an encoded json with CircleCI pipeline parameters.",
)
args = parser.parse_args()
if [len(args.workflow), len(args.job), len(args.artifact_path)].count(
len(args.workflow)
) != 3:
raise ValueError(
"Options '--workflow', '--job' and --'artifact-path' must be specified for each artifact."
)
main(
branch_name=args.branch,
artifacts_to_fetch=list(zip(args.workflow, args.job, args.artifact_path)),
output_path=args.output_path,
pipeline_parameters=json.loads(args.pipeline_parameters),
)
|
apache-2.0
|
barseghyanartur/django-security
|
conf.py
|
5
|
6433
|
# -*- coding: utf-8 -*-
#
# django-security documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 24 18:23:00 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testing.settings")
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', ]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-security'
copyright = u'2013, SD Elements'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = Python
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_patterns = ['_build/*', '.tox/*']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-securitydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-security.tex', u'django-security Documentation',
u'Pawel Krawczyk', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
bsd-3-clause
|
oneminot/xbmc
|
lib/gtest/test/gtest_xml_outfiles_test.py
|
2526
|
5340
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "[email protected] (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO([email protected]): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
gpl-2.0
|
jwhui/openthread
|
tests/scripts/thread-cert/border_router/test_dnssd_server.py
|
3
|
14851
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ipaddress
import json
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies DNS-SD server works on a BR and is accessible from a Host.
#
# Topology:
# ----------------(eth)--------------------
# | |
# BR1 (Leader, Server) HOST
# / \
# CLIENT1 CLIENT2
SERVER = BR1 = 1
CLIENT1, CLIENT2 = 2, 3
HOST = 4
DIGGER = HOST
DOMAIN = 'default.service.arpa.'
SERVICE = '_testsrv._udp'
SERVICE_FULL_NAME = f'{SERVICE}.{DOMAIN}'
VALID_SERVICE_NAMES = [
'_abc._udp.default.service.arpa.',
'_abc._tcp.default.service.arpa.',
]
WRONG_SERVICE_NAMES = [
'_testsrv._udp.default.service.xxxx.',
'_testsrv._txp,default.service.arpa.',
]
class TestDnssdServerOnBr(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'SERVER',
'is_otbr': True,
'version': '1.2',
},
CLIENT1: {
'name': 'CLIENT1',
},
CLIENT2: {
'name': 'CLIENT2',
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
self.nodes[HOST].start(start_radvd=False)
self.simulator.go(5)
self.nodes[BR1].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR1].get_state())
self.nodes[SERVER].srp_server_set_enabled(True)
self.nodes[CLIENT1].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[CLIENT1].get_state())
self.nodes[CLIENT2].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[CLIENT2].get_state())
self.simulator.go(10)
server_addr = self.nodes[SERVER].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
# Router1 can ping to/from the Host on infra link.
self.assertTrue(self.nodes[BR1].ping(self.nodes[HOST].get_ip6_address(config.ADDRESS_TYPE.ONLINK_ULA)[0],
backbone=True))
self.assertTrue(self.nodes[HOST].ping(self.nodes[BR1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0],
backbone=True))
client1_addrs = [
self.nodes[CLIENT1].get_mleid(), self.nodes[CLIENT1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
]
self._config_srp_client_services(CLIENT1, 'ins1', 'host1', 11111, 1, 1, client1_addrs)
client2_addrs = [
self.nodes[CLIENT2].get_mleid(), self.nodes[CLIENT2].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]
]
self._config_srp_client_services(CLIENT2, 'ins2', 'host2', 22222, 2, 2, client2_addrs)
ins1_full_name = f'ins1.{SERVICE_FULL_NAME}'
ins2_full_name = f'ins2.{SERVICE_FULL_NAME}'
host1_full_name = f'host1.{DOMAIN}'
host2_full_name = f'host2.{DOMAIN}'
EMPTY_TXT = {}
# check if PTR query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, SERVICE_FULL_NAME, 'PTR')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(SERVICE_FULL_NAME, 'IN', 'PTR')],
'ANSWER': [(SERVICE_FULL_NAME, 'IN', 'PTR', f'ins1.{SERVICE_FULL_NAME}'),
(SERVICE_FULL_NAME, 'IN', 'PTR', f'ins2.{SERVICE_FULL_NAME}')],
'ADDITIONAL': [
(ins1_full_name, 'IN', 'SRV', 1, 1, 11111, host1_full_name),
(ins1_full_name, 'IN', 'TXT', EMPTY_TXT),
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
(ins2_full_name, 'IN', 'SRV', 2, 2, 22222, host2_full_name),
(ins2_full_name, 'IN', 'TXT', EMPTY_TXT),
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check if SRV query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins1_full_name, 'SRV')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(ins1_full_name, 'IN', 'SRV')],
'ANSWER': [(ins1_full_name, 'IN', 'SRV', 1, 1, 11111, host1_full_name),],
'ADDITIONAL': [
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins2_full_name, 'SRV')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(ins2_full_name, 'IN', 'SRV')],
'ANSWER': [(ins2_full_name, 'IN', 'SRV', 2, 2, 22222, host2_full_name),],
'ADDITIONAL': [
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check if TXT query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins1_full_name, 'TXT')
self._assert_dig_result_matches(dig_result, {
'QUESTION': [(ins1_full_name, 'IN', 'TXT')],
'ANSWER': [(ins1_full_name, 'IN', 'TXT', EMPTY_TXT),],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, ins2_full_name, 'TXT')
self._assert_dig_result_matches(dig_result, {
'QUESTION': [(ins2_full_name, 'IN', 'TXT')],
'ANSWER': [(ins2_full_name, 'IN', 'TXT', EMPTY_TXT),],
})
# check if AAAA query works
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host1_full_name, 'AAAA')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(host1_full_name, 'IN', 'AAAA'),],
'ANSWER': [
(host1_full_name, 'IN', 'AAAA', client1_addrs[0]),
(host1_full_name, 'IN', 'AAAA', client1_addrs[1]),
],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host2_full_name, 'AAAA')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(host2_full_name, 'IN', 'AAAA'),],
'ANSWER': [
(host2_full_name, 'IN', 'AAAA', client2_addrs[0]),
(host2_full_name, 'IN', 'AAAA', client2_addrs[1]),
],
})
# check some invalid queries
for qtype in ['A', 'CNAME']:
dig_result = self.nodes[DIGGER].dns_dig(server_addr, host1_full_name, qtype)
self._assert_dig_result_matches(dig_result, {
'status': 'NOTIMP',
})
for service_name in WRONG_SERVICE_NAMES:
dig_result = self.nodes[DIGGER].dns_dig(server_addr, service_name, 'PTR')
self._assert_dig_result_matches(dig_result, {
'status': 'NXDOMAIN',
})
# verify Discovery Proxy works for _meshcop._udp
self._verify_discovery_proxy_meshcop(server_addr)
def _verify_discovery_proxy_meshcop(self, server_addr):
dp_service_name = '_meshcop._udp.default.service.arpa.'
network_name = self.nodes[SERVER].get_network_name()
dp_instance_name = f'{network_name}._meshcop._udp.default.service.arpa.'
dp_hostname = lambda x: x.endswith('.default.service.arpa.')
def check_border_agent_port(port):
return 0 < port <= 65535
dig_result = self.nodes[DIGGER].dns_dig(server_addr, dp_service_name, 'PTR')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(dp_service_name, 'IN', 'PTR'),],
'ANSWER': [(dp_service_name, 'IN', 'PTR', dp_instance_name),],
'ADDITIONAL': [
(dp_instance_name, 'IN', 'SRV', 0, 0, check_border_agent_port, dp_hostname),
(dp_instance_name, 'IN', 'TXT', lambda txt: (isinstance(txt, dict) and txt.get(
'nn') == network_name and 'xp' in txt and 'tv' in txt and 'dd' in txt)),
],
})
# Find the actual host name and IPv6 address
dp_ip6_address = None
for rr in dig_result['ADDITIONAL']:
if rr[3] == 'SRV':
dp_hostname = rr[7]
elif rr[3] == 'AAAA':
dp_ip6_address = rr[4]
assert isinstance(dp_hostname, str), dig_result
dig_result = self.nodes[DIGGER].dns_dig(server_addr, dp_instance_name, 'SRV')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(dp_instance_name, 'IN', 'SRV'),],
'ANSWER': [(dp_instance_name, 'IN', 'SRV', 0, 0, check_border_agent_port, dp_hostname),],
'ADDITIONAL': [(dp_instance_name, 'IN', 'TXT', lambda txt: (isinstance(txt, dict) and txt.get(
'nn') == network_name and 'xp' in txt and 'tv' in txt and 'dd' in txt)),],
})
dig_result = self.nodes[DIGGER].dns_dig(server_addr, dp_instance_name, 'TXT')
self._assert_dig_result_matches(
dig_result, {
'QUESTION': [(dp_instance_name, 'IN', 'TXT'),],
'ANSWER': [(dp_instance_name, 'IN', 'TXT', lambda txt: (isinstance(txt, dict) and txt.get(
'nn') == network_name and 'xp' in txt and 'tv' in txt and 'dd' in txt)),],
'ADDITIONAL': [(dp_instance_name, 'IN', 'SRV', 0, 0, check_border_agent_port, dp_hostname),],
})
if dp_ip6_address is not None:
dig_result = self.nodes[DIGGER].dns_dig(server_addr, dp_hostname, 'AAAA')
self._assert_dig_result_matches(dig_result, {
'QUESTION': [(dp_hostname, 'IN', 'AAAA'),],
'ANSWER': [(dp_hostname, 'IN', 'AAAA', dp_ip6_address),],
})
def _config_srp_client_services(self, client, instancename, hostname, port, priority, weight, addrs):
self.nodes[client].netdata_show()
srp_server_port = self.nodes[client].get_srp_server_port()
self.nodes[client].srp_client_start(self.nodes[SERVER].get_mleid(), srp_server_port)
self.nodes[client].srp_client_set_host_name(hostname)
self.nodes[client].srp_client_set_host_address(*addrs)
self.nodes[client].srp_client_add_service(instancename, SERVICE, port, priority, weight)
self.simulator.go(5)
self.assertEqual(self.nodes[client].srp_client_get_host_state(), 'Registered')
def _assert_have_question(self, dig_result, question):
for dig_question in dig_result['QUESTION']:
if self._match_record(dig_question, question):
return
self.fail((dig_result, question))
def _assert_have_answer(self, dig_result, record, additional=False):
for dig_answer in dig_result['ANSWER' if not additional else 'ADDITIONAL']:
dig_answer = list(dig_answer)
dig_answer[1:2] = [] # remove TTL from answer
record = list(record)
# convert IPv6 addresses to `ipaddress.IPv6Address` before matching
if dig_answer[2] == 'AAAA':
dig_answer[3] = ipaddress.IPv6Address(dig_answer[3])
if record[2] == 'AAAA':
record[3] = ipaddress.IPv6Address(record[3])
if self._match_record(dig_answer, record):
return
print('not match: ', dig_answer, record,
list(a == b or (callable(b) and b(a)) for a, b in zip(dig_answer, record)))
self.fail((record, dig_result))
def _match_record(self, record, match):
assert not any(callable(elem) for elem in record), record
if record == match:
return True
return all(a == b or (callable(b) and b(a)) for a, b in zip(record, match))
def _assert_dig_result_matches(self, dig_result, expected_result):
self.assertEqual(dig_result['opcode'], expected_result.get('opcode', 'QUERY'), dig_result)
self.assertEqual(dig_result['status'], expected_result.get('status', 'NOERROR'), dig_result)
if 'QUESTION' in expected_result:
self.assertEqual(len(dig_result['QUESTION']), len(expected_result['QUESTION']), dig_result)
for question in expected_result['QUESTION']:
self._assert_have_question(dig_result, question)
if 'ANSWER' in expected_result:
self.assertEqual(len(dig_result['ANSWER']), len(expected_result['ANSWER']), dig_result)
for record in expected_result['ANSWER']:
self._assert_have_answer(dig_result, record, additional=False)
if 'ADDITIONAL' in expected_result:
self.assertGreaterEqual(len(dig_result['ADDITIONAL']), len(expected_result['ADDITIONAL']), dig_result)
for record in expected_result['ADDITIONAL']:
self._assert_have_answer(dig_result, record, additional=True)
logging.info("dig result matches:\r%s", json.dumps(dig_result, indent=True))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
timoschwarzer/blendworks
|
BlendWorks Server/python/Lib/encodings/mbcs.py
|
860
|
1211
|
""" Python 'mbcs' Codec for Windows
Cloned by Mark Hammond ([email protected]) from ascii.py,
which was written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.