repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jjmleiro/hue | desktop/core/ext-py/openpyxl-2.3.0-b2/openpyxl/reader/worksheet.py | 13 | 13133 | from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
"""Reader for a single worksheet."""
from io import BytesIO
from warnings import warn
# compatibility imports
from openpyxl.xml.functions import iterparse
# package imports
from openpyxl.cell import Cell
from openpyxl.cell.read_only import _cast_number
from openpyxl.worksheet import Worksheet, ColumnDimension, RowDimension
from openpyxl.worksheet.page import PageMargins, PrintOptions, PrintPageSetup
from openpyxl.worksheet.protection import SheetProtection
from openpyxl.worksheet.views import SheetView
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.xml.constants import (
SHEET_MAIN_NS,
REL_NS,
EXT_TYPES
)
from openpyxl.xml.functions import safe_iterator
from openpyxl.styles import Color
from openpyxl.formatting import ConditionalFormatting, Rule
from openpyxl.formula.translate import Translator
from openpyxl.worksheet.properties import WorksheetProperties
from openpyxl.utils import (
coordinate_from_string,
get_column_letter,
column_index_from_string,
coordinate_to_tuple,
)
from openpyxl.descriptors.excel import ExtensionList, Extension
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
pass
return xml_source
class WorkSheetParser(object):
COL_TAG = '{%s}col' % SHEET_MAIN_NS
ROW_TAG = '{%s}row' % SHEET_MAIN_NS
CELL_TAG = '{%s}c' % SHEET_MAIN_NS
VALUE_TAG = '{%s}v' % SHEET_MAIN_NS
FORMULA_TAG = '{%s}f' % SHEET_MAIN_NS
MERGE_TAG = '{%s}mergeCell' % SHEET_MAIN_NS
INLINE_STRING = "{%s}is/{%s}t" % (SHEET_MAIN_NS, SHEET_MAIN_NS)
INLINE_RICHTEXT = "{%s}is/{%s}r/{%s}t" % (SHEET_MAIN_NS, SHEET_MAIN_NS, SHEET_MAIN_NS)
def __init__(self, wb, title, xml_source, shared_strings):
self.ws = wb.create_sheet(title=title)
self.source = xml_source
self.shared_strings = shared_strings
self.guess_types = wb._guess_types
self.data_only = wb.data_only
self.styles = self.ws.parent._cell_styles
self.differential_styles = wb._differential_styles
self.keep_vba = wb.vba_archive is not None
self.shared_formula_masters = {} # {si_str: Translator()}
def parse(self):
dispatcher = {
'{%s}mergeCells' % SHEET_MAIN_NS: self.parse_merge,
'{%s}col' % SHEET_MAIN_NS: self.parse_column_dimensions,
'{%s}row' % SHEET_MAIN_NS: self.parse_row_dimensions,
'{%s}printOptions' % SHEET_MAIN_NS: self.parse_print_options,
'{%s}pageMargins' % SHEET_MAIN_NS: self.parse_margins,
'{%s}pageSetup' % SHEET_MAIN_NS: self.parse_page_setup,
'{%s}headerFooter' % SHEET_MAIN_NS: self.parse_header_footer,
'{%s}conditionalFormatting' % SHEET_MAIN_NS: self.parser_conditional_formatting,
'{%s}autoFilter' % SHEET_MAIN_NS: self.parse_auto_filter,
'{%s}sheetProtection' % SHEET_MAIN_NS: self.parse_sheet_protection,
'{%s}dataValidations' % SHEET_MAIN_NS: self.parse_data_validation,
'{%s}sheetPr' % SHEET_MAIN_NS: self.parse_properties,
'{%s}legacyDrawing' % SHEET_MAIN_NS: self.parse_legacy_drawing,
'{%s}sheetViews' % SHEET_MAIN_NS: self.parse_sheet_views,
'{%s}extLst' % SHEET_MAIN_NS: self.parse_extensions,
}
tags = dispatcher.keys()
stream = _get_xml_iter(self.source)
it = iterparse(stream, tag=tags)
for _, element in it:
tag_name = element.tag
if tag_name in dispatcher:
dispatcher[tag_name](element)
element.clear()
self.ws._current_row = self.ws.max_row
def parse_cell(self, element):
value = element.find(self.VALUE_TAG)
if value is not None:
value = value.text
formula = element.find(self.FORMULA_TAG)
data_type = element.get('t', 'n')
coordinate = element.get('r')
style_id = element.get('s')
# assign formula to cell value unless only the data is desired
if formula is not None and not self.data_only:
data_type = 'f'
if formula.text:
value = "=" + formula.text
else:
value = "="
formula_type = formula.get('t')
if formula_type:
if formula_type != "shared":
self.ws.formula_attributes[coordinate] = dict(formula.attrib)
else:
si = formula.get('si') # Shared group index for shared formulas
# The spec (18.3.1.40) defines shared formulae in
# terms of the following:
#
# `master`: "The first formula in a group of shared
# formulas"
# `ref`: "Range of cells which the formula applies
# to." It's a required attribute on the master
# cell, forbidden otherwise.
# `shared cell`: "A cell is shared only when si is
# used and t is `shared`."
#
# Whether to use the cell's given formula or the
# master's depends on whether the cell is shared,
# whether it's in the ref, and whether it defines its
# own formula, as follows:
#
# Shared? Has formula? | In ref Not in ref
# ========= ==============|======== ===============
# Yes Yes | master impl. defined
# No Yes | own own
# Yes No | master impl. defined
# No No | ?? N/A
#
# The ?? is because the spec is silent on this issue,
# though my inference is that the cell does not
# receive a formula at all.
#
# For this implementation, we are using the master
# formula in the two "impl. defined" cases and no
# formula in the "??" case. This choice of
# implementation allows us to disregard the `ref`
# parameter altogether, and does not require
# computing expressions like `C5 in A1:D6`.
# Presumably, Excel does not generate spreadsheets
# with such contradictions.
if si in self.shared_formula_masters:
trans = self.shared_formula_masters[si]
value = trans.translate_formula(coordinate)
else:
self.shared_formula_masters[si] = Translator(value, coordinate)
style_array = None
if style_id is not None:
style_id = int(style_id)
style_array = self.styles[style_id]
row, column = coordinate_to_tuple(coordinate)
cell = Cell(self.ws, row=row, col_idx=column, style_array=style_array)
self.ws._cells[(row, column)] = cell
if value is not None:
if data_type == 'n':
value = _cast_number(value)
elif data_type == 'b':
value = bool(int(value))
elif data_type == 's':
value = self.shared_strings[int(value)]
elif data_type == 'str':
data_type = 's'
else:
if data_type == 'inlineStr':
data_type = 's'
child = element.find(self.INLINE_STRING)
if child is None:
child = element.find(self.INLINE_RICHTEXT)
if child is not None:
value = child.text
if self.guess_types or value is None:
cell.value = value
else:
cell._value=value
cell.data_type=data_type
def parse_merge(self, element):
for mergeCell in safe_iterator(element, ('{%s}mergeCell' % SHEET_MAIN_NS)):
self.ws.merge_cells(mergeCell.get('ref'))
def parse_column_dimensions(self, col):
attrs = dict(col.attrib)
column = get_column_letter(int(attrs['min']))
attrs['index'] = column
if 'style' in attrs:
attrs['style'] = self.styles[int(attrs['style'])]
dim = ColumnDimension(self.ws, **attrs)
self.ws.column_dimensions[column] = dim
def parse_row_dimensions(self, row):
attrs = dict(row.attrib)
keys = set(attrs)
for key in keys:
if key == "s":
attrs['s'] = self.styles[int(attrs['s'])]
elif key.startswith('{'):
del attrs[key]
keys = set(attrs)
if keys != set(['r', 'spans']) and keys != set(['r']):
# don't create dimension objects unless they have relevant information
dim = RowDimension(self.ws, **attrs)
self.ws.row_dimensions[dim.index] = dim
for cell in safe_iterator(row, self.CELL_TAG):
self.parse_cell(cell)
def parse_print_options(self, element):
self.ws.print_options = PrintOptions.from_tree(element)
def parse_margins(self, element):
self.page_margins = PageMargins.from_tree(element)
def parse_page_setup(self, element):
self.ws.page_setup = PrintPageSetup.from_tree(element)
def parse_header_footer(self, element):
oddHeader = element.find('{%s}oddHeader' % SHEET_MAIN_NS)
if oddHeader is not None and oddHeader.text is not None:
self.ws.header_footer.setHeader(oddHeader.text)
oddFooter = element.find('{%s}oddFooter' % SHEET_MAIN_NS)
if oddFooter is not None and oddFooter.text is not None:
self.ws.header_footer.setFooter(oddFooter.text)
def parser_conditional_formatting(self, element):
range_string = element.get('sqref')
cfRules = element.findall('{%s}cfRule' % SHEET_MAIN_NS)
self.ws.conditional_formatting.cf_rules[range_string] = []
for node in cfRules:
rule = Rule.from_tree(node)
if rule.dxfId is not None:
rule.dxf = self.differential_styles[rule.dxfId]
self.ws.conditional_formatting.cf_rules[range_string].append(rule)
def parse_auto_filter(self, element):
self.ws.auto_filter.ref = element.get("ref")
for fc in safe_iterator(element, '{%s}filterColumn' % SHEET_MAIN_NS):
filters = fc.find('{%s}filters' % SHEET_MAIN_NS)
if filters is None:
continue
vals = [f.get("val") for f in safe_iterator(filters, '{%s}filter' % SHEET_MAIN_NS)]
blank = filters.get("blank")
self.ws.auto_filter.add_filter_column(fc.get("colId"), vals, blank=blank)
for sc in safe_iterator(element, '{%s}sortCondition' % SHEET_MAIN_NS):
self.ws.auto_filter.add_sort_condition(sc.get("ref"), sc.get("descending"))
def parse_sheet_protection(self, element):
self.ws.protection = SheetProtection.from_tree(element)
password = element.get("password")
if password is not None:
self.ws.protection.set_password(password, True)
def parse_data_validation(self, element):
for node in safe_iterator(element, "{%s}dataValidation" % SHEET_MAIN_NS):
dv = DataValidation.from_tree(node)
self.ws._data_validations.append(dv)
def parse_properties(self, element):
self.ws.sheet_properties = WorksheetProperties.from_tree(element)
def parse_legacy_drawing(self, element):
if self.keep_vba:
# Create an id that will not clash with any other ids that will
# be generated.
self.ws.vba_controls = 'vbaControlId'
def parse_sheet_views(self, element):
for el in element.findall("{%s}sheetView" % SHEET_MAIN_NS):
# according to the specification the last view wins
pass
self.ws.sheet_view = SheetView.from_tree(el)
def parse_extensions(self, element):
extLst = ExtensionList.from_tree(element)
for e in extLst.ext:
ext_type = EXT_TYPES.get(e.uri.upper(), "Unknown")
msg = "{0} extension is not supported and will be removed".format(ext_type)
warn(msg)
def fast_parse(xml_source, parent, sheet_title, shared_strings):
parser = WorkSheetParser(parent, sheet_title, xml_source, shared_strings)
parser.parse()
return parser.ws
| apache-2.0 | -5,987,100,388,995,258,000 | 39.285276 | 95 | 0.570928 | false |
cysuncn/python | spark/crm/PROC_A_SUBJECT_D002017.py | 1 | 3651 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_SUBJECT_D002017').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
OCRM_F_CI_CUSTLNAINFO = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUSTLNAINFO/*')
OCRM_F_CI_CUSTLNAINFO.registerTempTable("OCRM_F_CI_CUSTLNAINFO")
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT A.FR_ID AS FR_ID
,A.CUST_ID AS CUST_ID
,MAX(A.EXP_DATE) AS EXP_DATE
FROM OCRM_F_CI_CUSTLNAINFO A --客户信用信息
WHERE EXP_DATE > V_DT
GROUP BY FR_ID
,CUST_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
TMP_CUSTLNA_INFO_01 = sqlContext.sql(sql)
TMP_CUSTLNA_INFO_01.registerTempTable("TMP_CUSTLNA_INFO_01")
dfn="TMP_CUSTLNA_INFO_01/"+V_DT+".parquet"
TMP_CUSTLNA_INFO_01.cache()
nrows = TMP_CUSTLNA_INFO_01.count()
TMP_CUSTLNA_INFO_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
TMP_CUSTLNA_INFO_01.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/TMP_CUSTLNA_INFO_01/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert TMP_CUSTLNA_INFO_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[21] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT A.CUST_ID AS CUST_ID
,'' AS ORG_ID
,'D002017' AS INDEX_CODE
,A.CREDIT_LINE AS INDEX_VALUE
,SUBSTR(V_DT, 1, 7) AS YEAR_MONTH
,V_DT AS ETL_DATE
,A.CUST_TYPE AS CUST_TYPE
,A.FR_ID AS FR_ID
FROM OCRM_F_CI_CUSTLNAINFO A --客户信用信息
INNER JOIN TMP_CUSTLNA_INFO_01 B --客户信用信息临时表01
ON A.FR_ID = B.FR_ID
AND A.CUST_ID = B.CUST_ID
AND A.EXP_DATE = B.EXP_DATE """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_TARGET_D002017 = sqlContext.sql(sql)
ACRM_A_TARGET_D002017.registerTempTable("ACRM_A_TARGET_D002017")
dfn="ACRM_A_TARGET_D002017/"+V_DT+".parquet"
ACRM_A_TARGET_D002017.cache()
nrows = ACRM_A_TARGET_D002017.count()
ACRM_A_TARGET_D002017.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_TARGET_D002017.unpersist()
OCRM_F_CI_CUSTLNAINFO.unpersist()
TMP_CUSTLNA_INFO_01.unpersist()
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_TARGET_D002017/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_TARGET_D002017 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | -5,784,959,972,438,862,000 | 38.388889 | 173 | 0.591255 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_8_0/system_backup_broker.py | 15 | 13541 | from ..broker import Broker
class SystemBackupBroker(Broker):
controller = "system_backup"
def create_archive(self, **kwargs):
"""Creates backup of current system database.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include_date: Defines whether include date in file name or not.
:type include_date: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param init: Defines whether to initially create the archive.
:type init: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: When false, backup creating will be run synchronously, and the API call will block until it is complete. When true, backup creating id will be returned to use for subsequent calls
:type async_ind: Boolean
**Outputs**
"""
return self.api_request(self._get_method_fullname("create_archive"), kwargs)
def create_archive_status(self, **kwargs):
"""Backup database status.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("create_archive_status"), kwargs)
def ssh_authentication_test(self, **kwargs):
"""Test SSH authentication.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param host: Host name or IP address of the system where archive will be copied.
:type host: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified).
:type port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param user_name: Name of the existing user on the system where archive will be copied.
:type user_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param password: User password on the system where archive will be copied.
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param use_ssh_keys: Specifies whether to use SSH keys.
:type use_ssh_keys: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param directory: Remote host directory where archive will be stored.
:type directory: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("ssh_authentication_test"), kwargs)
def move_archive_to_remote_host(self, **kwargs):
"""Moves database archive to remote host via SSH. Note that archive will be removed from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param host: Host name or IP address of the system where archive will be copied. Required if init is set to true.
:type host: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified).
:type port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_name: Name of the existing user on the system where archive will be copied. Required if init is set to true.
:type user_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param password: User password on the system where archive will be copied. Required if init is set to true.
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param use_ssh_keys: Specifies whether to use SSH keys.
:type use_ssh_keys: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param directory: Specifies directory where archive will be stored on remote host. Default is user home directory.
:type directory: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param init: Set to true to initialize moving archive
:type init: Boolean
**Outputs**
"""
return self.api_request(self._get_method_fullname("move_archive_to_remote_host"), kwargs)
def download_archive(self, **kwargs):
"""Download database archive.
**Inputs**
**Outputs**
"""
return self.api_mixed_request(self._get_method_fullname("download_archive"), kwargs)
def download_archive_md5_sum(self, **kwargs):
"""Download database archive md5 checksum.
**Inputs**
**Outputs**
"""
return self.api_mixed_request(self._get_method_fullname("download_archive_md5_sum"), kwargs)
def remove_archive(self, **kwargs):
"""Database archive is stored in temporary directory on NetMRI. It's removed on schedule but you may choose to force remove it.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("remove_archive"), kwargs)
def schedule_archiving(self, **kwargs):
"""Schedule NetMRI database archiving. Archive will be stored on up to 2 systems supporting SCP.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enable: Specifies whether scheduled archiving should be enabled or not. If parameter is not specified then scheduled archiving is set disabled.
:type enable: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param host_1: Host name or IP address of the system where archive will be copied.
:type host_1: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port_1: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified).
:type port_1: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_name_1: Name of the existing user on the system where archive will be copied.
:type user_name_1: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param password_1: User password on the system where archive will be copied.
:type password_1: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param use_ssh_keys_1: Specifies whether to use SSH keys.
:type use_ssh_keys_1: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param directory_1: Specifies directory where archive will be stored on remote host. Default is user home directory.
:type directory_1: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param host_2: Host name or IP address of the system where archive will be copied.
:type host_2: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param port_2: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified).
:type port_2: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_name_2: Name of the existing user on the system where archive will be copied.
:type user_name_2: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:``
:param password_2: User password on the system where archive will be copied.
:type password_2: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param use_ssh_keys_2: Specifies whether to use SSH keys.
:type use_ssh_keys_2: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param directory_2: Specifies directory where archive will be stored on remote host. Default is user home directory.
:type directory_2: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include_date_1: Specifies whether to put current date into archive file name or not while saving on remote host 1.
:type include_date_1: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include_date_2: Specifies whether to put current date into archive file name or not while saving on remote host 2.
:type include_date_2: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_cron: Cron schedule string.
:type schedule_cron: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string.
:type schedule_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param force_save: If true, changes will be saved even if credentials test failed
:type force_save: Boolean
**Outputs**
"""
return self.api_request(self._get_method_fullname("schedule_archiving"), kwargs)
def upload_archive(self, **kwargs):
"""Upload database archive to NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param archive: NetMRI database archive file.
:type archive: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param md5: NetMRI database archive MD5 checksum file.
:type md5: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("upload_archive"), kwargs)
def restore_database(self, **kwargs):
"""Restores database from the archive which should have been uploaded to NetMRI.
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("restore_database"), kwargs)
| apache-2.0 | -8,656,417,111,616,167,000 | 32.600496 | 210 | 0.52426 | false |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/singledispatch.py | 45 | 8292 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['singledispatch']
from functools import update_wrapper
from weakref import WeakKeyDictionary
from singledispatch_helpers import MappingProxyType, get_cache_token
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
raise RuntimeError("Inconsistent hierarchy")
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _c3_mro(cls, abcs=None):
"""Computes the method resolution order using extended C3 linearization.
If no *abcs* are given, the algorithm works exactly like the built-in C3
linearization used for method resolution.
If given, *abcs* is a list of abstract base classes that should be inserted
into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
result. The algorithm inserts ABCs where their functionality is introduced,
i.e. issubclass(cls, abc) returns True for the class itself but returns
False for all its direct base classes. Implicit ABCs for a given class
(either registered or inferred from the presence of a special method like
__len__) are inserted directly after the last ABC explicitly listed in the
MRO of said class. If two implicit ABCs end up next to each other in the
resulting MRO, their ordering depends on the order of types in *abcs*.
"""
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if abcs else []
explicit_bases = list(cls.__bases__[:boundary])
abstract_bases = []
other_bases = list(cls.__bases__[boundary:])
for base in abcs:
if issubclass(cls, base) and not any(
issubclass(b, base) for b in cls.__bases__
):
# If *cls* is the class that introduces behaviour described by
# an ABC *base*, insert said ABC to its MRO.
abstract_bases.append(base)
for base in abstract_bases:
abcs.remove(base)
explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
return _c3_merge(
[[cls]] +
explicit_c3_mros + abstract_c3_mros + other_c3_mros +
[explicit_bases] + [abstract_bases] + [other_bases]
)
def _compose_mro(cls, types):
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(typ):
return (typ not in bases and hasattr(typ, '__mro__')
and issubclass(cls, typ))
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(typ):
for other in types:
if typ != other and typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro)
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if (t in registry and t not in cls.__mro__
and match not in cls.__mro__
and not issubclass(match, t)):
raise RuntimeError("Ambiguous dispatch: {0} or {1}".format(
match, t))
break
if t in registry:
match = t
return registry.get(match)
def singledispatch(func):
"""Single-dispatch generic function decorator.
Transforms a function into a generic function, which can have different
behaviours depending upon the type of its first argument. The decorated
function acts as the default implementation, and additional
implementations can be registered using the register() attribute of the
generic function.
"""
registry = {}
dispatch_cache = WeakKeyDictionary()
def ns(): pass
ns.cache_token = None
def dispatch(cls):
"""generic_func.dispatch(cls) -> <function implementation>
Runs the dispatch algorithm to return the best available implementation
for the given *cls* registered on *generic_func*.
"""
if ns.cache_token is not None:
current_token = get_cache_token()
if ns.cache_token != current_token:
dispatch_cache.clear()
ns.cache_token = current_token
try:
impl = dispatch_cache[cls]
except KeyError:
try:
impl = registry[cls]
except KeyError:
impl = _find_impl(cls, registry)
dispatch_cache[cls] = impl
return impl
def register(cls, func=None):
"""generic_func.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_func*.
"""
if func is None:
return lambda f: register(cls, f)
registry[cls] = func
if ns.cache_token is None and hasattr(cls, '__abstractmethods__'):
ns.cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
return dispatch(args[0].__class__)(*args, **kw)
registry[object] = func
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, func)
return wrapper
| mit | -3,261,028,177,306,718,000 | 36.863014 | 80 | 0.6048 | false |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/music21/figuredBass/resolution.py | 1 | 31355 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: resolution.py
# Purpose: Defines standard resolutions for possibility instances
# Authors: Jose Cabal-Ugaz
#
# Copyright: Copyright © 2011 Michael Scott Cuthbert and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
.. note:: The terminology, V43, viio, iv, etc. are explained more fully in *The Music Theory Handbook*
by Marjorie Merryman.
This module contains methods which can properly resolve
`dominant seventh <http://en.wikipedia.org/wiki/Dominant_seventh_chord>`_,
`diminished seventh <http://en.wikipedia.org/wiki/Diminished_seventh_chord>`_, and
`augmented sixth <http://en.wikipedia.org/wiki/Augmented_sixth_chord>`_
chords expressed as possibilities (See :mod:`~music21.figuredBass.possibility`).
Although these methods can stand alone, they are speed-enhanced for instances
of :class:`~music21.figuredBass.segment.Segment`, where there are many
possibilities formed around the same chord. If provided with additional
arguments, the methods only :meth:`~music21.pitch.Pitch.transpose` each
:class:`~music21.pitch.Pitch` in a possibility by the appropriate interval.
'''
import unittest
from music21 import exceptions21
from music21 import chord
from music21 import note
from music21 import stream
def augmentedSixthToDominant(augSixthPossib, augSixthType = None, augSixthChordInfo = None):
'''
Resolves French (augSixthType = 1), German (augSixthType = 2), and Swiss (augSixthType = 3)
augmented sixth chords to the root position dominant triad.
Proper Italian augmented sixth resolutions not supported within this method.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Bb2 = pitch.Pitch('B-2')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> Es4 = pitch.Pitch('E#4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> Gs4 = pitch.Pitch('G#4')
>>> iv6 = (G4, D4, D4, Bb2)
>>> itAug6 = (Gs4, D4, D4, Bb2)
>>> frAug6 = (Gs4, E4, D4, Bb2)
>>> grAug6 = (Gs4, F4, D4, Bb2)
>>> swAug6 = (Gs4, Es4, D4, Bb2)
>>> frRes = resolution.augmentedSixthToDominant(frAug6)
>>> frRes
(<music21.pitch.Pitch A4>, <music21.pitch.Pitch E4>, <music21.pitch.Pitch C#4>, <music21.pitch.Pitch A2>)
>>> [str(p) for p in frRes]
['A4', 'E4', 'C#4', 'A2']
>>> grRes = resolution.augmentedSixthToDominant(grAug6)
>>> [str(p) for p in grRes]
['A4', 'E4', 'C#4', 'A2']
>>> swRes = resolution.augmentedSixthToDominant(swAug6)
>>> [str(p) for p in swRes]
['A4', 'E4', 'C#4', 'A2']
>>> #_DOCS_SHOW resolution.showResolutions(frAug6, frRes, grAug6, grRes, swAug6, swRes)
.. image:: images/figuredBass/fbResolution_a6toV.*
:width: 700
Above: French, German, and Swiss resolutions, respectively.
'''
if augSixthChordInfo == None:
augSixthChord = chord.Chord(augSixthPossib)
if not augSixthChord.isAugmentedSixth():
raise ResolutionException("Possibility is not an augmented sixth chord.")
augSixthChordInfo = _unpackSeventhChord(chord.Chord(augSixthPossib))
if augSixthType == None:
if augSixthChord.isItalianAugmentedSixth():
raise ResolutionException("Italian augmented sixth resolution not supported in this method.")
elif augSixthChord.isFrenchAugmentedSixth():
augSixthType = 1
elif augSixthChord.isGermanAugmentedSixth():
augSixthType = 2
elif augSixthChord.isSwissAugmentedSixth():
augSixthType = 3
if augSixthType == 1 or augSixthType == 3:
[bass, other, root, unused_third, fifth] = augSixthChordInfo # other == sixth
elif augSixthType == 2:
[bass, root, unused_third, fifth, other] = augSixthChordInfo # other == seventh
howToResolve = \
[(lambda p: p.name == bass.name, '-m2'),
(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == fifth.name, '-m2'),
(lambda p: p.name == other.name and augSixthType == 3, 'd1'),
(lambda p: p.name == other.name and augSixthType == 2, '-m2')]
return _resolvePitches(augSixthPossib, howToResolve)
def augmentedSixthToMajorTonic(augSixthPossib, augSixthType = None, augSixthChordInfo = None):
'''
Resolves French (augSixthType = 1), German (augSixthType = 2), and Swiss (augSixthType = 3)
augmented sixth chords to the major tonic 6,4.
Proper Italian augmented sixth resolutions not supported within this method.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Bb2 = pitch.Pitch('B-2')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> Es4 = pitch.Pitch('E#4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> Gs4 = pitch.Pitch('G#4')
>>> iv6 = (G4, D4, D4, Bb2)
>>> itAug6 = (Gs4, D4, D4, Bb2)
>>> frAug6 = (Gs4, E4, D4, Bb2)
>>> grAug6 = (Gs4, F4, D4, Bb2)
>>> swAug6 = (Gs4, Es4, D4, Bb2)
>>> frRes = resolution.augmentedSixthToMajorTonic(frAug6)
>>> [str(p) for p in frRes]
['A4', 'F#4', 'D4', 'A2']
>>> grRes = resolution.augmentedSixthToMajorTonic(grAug6)
>>> [str(p) for p in grRes]
['A4', 'F#4', 'D4', 'A2']
>>> swRes = resolution.augmentedSixthToMajorTonic(swAug6)
>>> [str(p) for p in swRes]
['A4', 'F#4', 'D4', 'A2']
>>> #_DOCS_SHOW resolution.showResolutions(frAug6, frRes, grAug6, grRes, swAug6, swRes)
.. image:: images/figuredBass/fbResolution_a6toI.*
:width: 700
Above: French, German, and Swiss resolutions, respectively.
'''
if augSixthChordInfo == None:
augSixthChord = chord.Chord(augSixthPossib)
if not augSixthChord.isAugmentedSixth():
raise ResolutionException("Possibility is not an augmented sixth chord.")
augSixthChordInfo = _unpackSeventhChord(chord.Chord(augSixthPossib))
if augSixthType == None:
if augSixthChord.isItalianAugmentedSixth():
raise ResolutionException("Italian augmented sixth resolution not supported in this method.")
elif augSixthChord.isFrenchAugmentedSixth():
augSixthType = 1
elif augSixthChord.isGermanAugmentedSixth():
augSixthType = 2
elif augSixthChord.isSwissAugmentedSixth():
augSixthType = 3
if augSixthType == 1 or augSixthType == 3:
[bass, other, root, unused_third, fifth] = augSixthChordInfo # other == sixth
elif augSixthType == 2:
[bass, root, unused_third, fifth, other] = augSixthChordInfo # other == seventh
howToResolve = \
[(lambda p: p.name == bass.name, '-m2'),
(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == fifth.name, 'P1'),
(lambda p: p.name == other.name and augSixthType == 1, 'M2'),
(lambda p: p.name == other.name and augSixthType == 2, 'A1'),
(lambda p: p.name == other.name and augSixthType == 3, 'm2')]
return _resolvePitches(augSixthPossib, howToResolve)
def augmentedSixthToMinorTonic(augSixthPossib, augSixthType = None, augSixthChordInfo = None):
'''
Resolves French (augSixthType = 1), German (augSixthType = 2), and Swiss (augSixthType = 3)
augmented sixth chords to the minor tonic 6,4.
Proper Italian augmented sixth resolutions not supported within this method.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Bb2 = pitch.Pitch('B-2')
>>> D4 = pitch.Pitch('D4')
>>> E4 = pitch.Pitch('E4')
>>> Es4 = pitch.Pitch('E#4')
>>> F4 = pitch.Pitch('F4')
>>> G4 = pitch.Pitch('G4')
>>> Gs4 = pitch.Pitch('G#4')
>>> iv6 = (G4, D4, D4, Bb2)
>>> itAug6 = (Gs4, D4, D4, Bb2)
>>> frAug6 = (Gs4, E4, D4, Bb2)
>>> grAug6 = (Gs4, F4, D4, Bb2)
>>> swAug6 = (Gs4, Es4, D4, Bb2)
>>> frRes = resolution.augmentedSixthToMinorTonic(frAug6)
>>> [str(p) for p in frRes]
['A4', 'F4', 'D4', 'A2']
>>> grRes = resolution.augmentedSixthToMinorTonic(grAug6)
>>> [str(p) for p in grRes]
['A4', 'F4', 'D4', 'A2']
>>> swRes = resolution.augmentedSixthToMinorTonic(swAug6)
>>> [str(p) for p in swRes]
['A4', 'F4', 'D4', 'A2']
>>> #_DOCS_SHOW resolution.showResolutions(frAug6, frRes, grAug6, grRes, swAug6, swRes)
.. image:: images/figuredBass/fbResolution_a6toIm.*
:width: 700
Above: French, German, and Swiss resolutions, respectively.
'''
if augSixthChordInfo == None:
augSixthChord = chord.Chord(augSixthPossib)
if not augSixthChord.isAugmentedSixth():
raise ResolutionException("Possibility is not an augmented sixth chord.")
augSixthChordInfo = _unpackSeventhChord(chord.Chord(augSixthPossib))
if augSixthType == None:
if augSixthChord.isItalianAugmentedSixth():
raise ResolutionException("Italian augmented sixth resolution not supported in this method.")
elif augSixthChord.isFrenchAugmentedSixth():
augSixthType = 1
elif augSixthChord.isGermanAugmentedSixth():
augSixthType = 2
elif augSixthChord.isSwissAugmentedSixth():
augSixthType = 3
if augSixthType == 1 or augSixthType == 3:
[bass, other, root, unused_third, fifth] = augSixthChordInfo # other == sixth
elif augSixthType == 2:
[bass, root, unused_third, fifth, other] = augSixthChordInfo # other == seventh
howToResolve = \
[(lambda p: p.name == bass.name, '-m2'),
(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == fifth.name, 'P1'),
(lambda p: p.name == other.name and augSixthType == 1, 'm2'),
(lambda p: p.name == other.name and augSixthType == 3, 'd2')]
return _resolvePitches(augSixthPossib, howToResolve)
def dominantSeventhToMajorTonic(domPossib, resolveV43toI6 = False, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position or any of its
inversions to the major tonic, in root position or first inversion.
The second inversion (4,3) dominant seventh chord can resolve to
the tonic in either inversion. This is controlled by
resolveV43toI6, and is set to True by :meth:`~music21.figuredBass.segment.Segment.resolveDominantSeventhSegment`
only when the :attr:`~music21.figuredBass.segment.Segment.segmentChord`
of a :class:`~music21.figuredBass.segment.Segment`
spells out a dominant seventh chord in second inversion.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> C3 = pitch.Pitch('C3')
>>> E3 = pitch.Pitch('E3')
>>> G3 = pitch.Pitch('G3')
>>> Bb3 = pitch.Pitch('B-3')
>>> B3 = pitch.Pitch('B3')
>>> C4 = pitch.Pitch('C4')
>>> F4 = pitch.Pitch('F4')
>>> Bb4 = pitch.Pitch('B-4')
>>> D5 = pitch.Pitch('D5')
>>> E5 = pitch.Pitch('E5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMajorTonic(domPossibA1)
>>> resPossibA1
(<music21.pitch.Pitch C5>, <music21.pitch.Pitch E4>, <music21.pitch.Pitch C4>, <music21.pitch.Pitch C3>)
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toI_1.*
:width: 150
>>> domPossibA2 = (Bb3, G3, E3, C3)
>>> resPossibA2 = resolution.dominantSeventhToMajorTonic(domPossibA2)
>>> [str(p) for p in resPossibA2]
['A3', 'F3', 'F3', 'F3']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA2, resPossibA2)
.. image:: images/figuredBass/fbResolution_V7toI_2.*
:width: 150
>>> domPossibA3 = (E5, Bb4, C4, G3)
>>> resPossibA3a = resolution.dominantSeventhToMajorTonic(domPossibA3, False)
>>> [str(p) for p in resPossibA3a]
['F5', 'A4', 'C4', 'F3']
>>> resPossibA3b = resolution.dominantSeventhToMajorTonic(domPossibA3, True)
>>> [str(p) for p in resPossibA3b]
['F5', 'C5', 'C4', 'A3']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA3, resPossibA3a, domPossibA3, resPossibA3b)
.. image:: images/figuredBass/fbResolution_V7toI_3.*
:width: 200
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
[bass, root, third, fifth, seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name and p == bass, 'P4'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name and resolveV43toI6, 'M2'),
(lambda p: p.name == fifth.name, '-M2'),
(lambda p: p.name == seventh.name and resolveV43toI6, 'M2'),
(lambda p: p.name == seventh.name, '-m2')]
return _resolvePitches(domPossib, howToResolve)
def dominantSeventhToMinorTonic(domPossib, resolveV43toi6 = False, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position or any of its
inversions to the minor tonic, in root position or first inversion,
accordingly.
The second inversion (4,3) dominant seventh chord can resolve to
the tonic in either inversion. This is controlled by
resolveV43toi6, and is set to True by :meth:`~music21.figuredBass.segment.Segment.resolveDominantSeventhSegment`
only when the :attr:`~music21.figuredBass.segment.Segment.segmentChord`
of a :class:`~music21.figuredBass.segment.Segment`
spells out a dominant seventh chord in second inversion.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> C3 = pitch.Pitch('C3')
>>> E3 = pitch.Pitch('E3')
>>> G3 = pitch.Pitch('G3')
>>> Bb3 = pitch.Pitch('B-3')
>>> B3 = pitch.Pitch('B3')
>>> C4 = pitch.Pitch('C4')
>>> F4 = pitch.Pitch('F4')
>>> Bb4 = pitch.Pitch('B-4')
>>> D5 = pitch.Pitch('D5')
>>> E5 = pitch.Pitch('E5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMinorTonic(domPossibA1)
>>> [str(p) for p in resPossibA1]
['C5', 'E-4', 'C4', 'C3']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toIm_1.*
:width: 150
>>> domPossibA2 = (Bb3, G3, E3, C3)
>>> resPossibA2 = resolution.dominantSeventhToMinorTonic(domPossibA2)
>>> ', '.join([str(p) for p in resPossibA2])
'A-3, F3, F3, F3'
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA2, resPossibA2)
.. image:: images/figuredBass/fbResolution_V7toIm_2.*
:width: 150
>>> domPossibA3 = (E5, Bb4, C4, G3)
>>> resPossibA3a = resolution.dominantSeventhToMinorTonic(domPossibA3, False)
>>> [str(p) for p in resPossibA3a]
['F5', 'A-4', 'C4', 'F3']
>>> resPossibA3b = resolution.dominantSeventhToMinorTonic(domPossibA3, True)
>>> [str(p) for p in resPossibA3b]
['F5', 'C5', 'C4', 'A-3']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA3, resPossibA3a, domPossibA3, resPossibA3b)
.. image:: images/figuredBass/fbResolution_V7toIm_3.*
:width: 200
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
[bass, root, third, fifth, seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name and p == bass, 'P4'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name and resolveV43toi6, 'm2'),
(lambda p: p.name == fifth.name, '-M2'),
(lambda p: p.name == seventh.name and resolveV43toi6, 'M2'),
(lambda p: p.name == seventh.name, '-M2')]
return _resolvePitches(domPossib, howToResolve)
def dominantSeventhToMajorSubmediant(domPossib, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position to the
major submediant (VI) in root position.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> B3 = pitch.Pitch('B3')
>>> F4 = pitch.Pitch('F4')
>>> D5 = pitch.Pitch('D5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMajorSubmediant(domPossibA1)
>>> [p.nameWithOctave for p in resPossibA1]
['C5', 'E-4', 'C4', 'A-2']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toVI.*
:width: 150
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
if not domChord.inversion() == 0:
raise ResolutionException("Possibility must be in root position.")
[unused_bass, root, third, fifth, seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name, '-M2'),
(lambda p: p.name == seventh.name, '-M2')]
return _resolvePitches(domPossib, howToResolve)
def dominantSeventhToMinorSubmediant(domPossib, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position to the
minor submediant (vi) in root position.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> B3 = pitch.Pitch('B3')
>>> F4 = pitch.Pitch('F4')
>>> D5 = pitch.Pitch('D5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMinorSubmediant(domPossibA1)
>>> [p.nameWithOctave for p in resPossibA1]
['C5', 'E4', 'C4', 'A2']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toVIm.*
:width: 150
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
if not domChord.inversion() == 0:
raise ResolutionException("Possibility must be in root position.")
[unused_bass, root, third, fifth, seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'M2'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name, '-M2'),
(lambda p: p.name == seventh.name, '-m2')]
return _resolvePitches(domPossib, howToResolve)
def dominantSeventhToMajorSubdominant(domPossib, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position
to the major subdominant (IV) in first inversion.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> B3 = pitch.Pitch('B3')
>>> F4 = pitch.Pitch('F4')
>>> D5 = pitch.Pitch('D5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMajorSubdominant(domPossibA1)
>>> [p.nameWithOctave for p in resPossibA1]
['C5', 'F4', 'C4', 'A2']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toIV.*
:width: 150
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
if not domChord.inversion() == 0:
raise ResolutionException("Possibility must be in root position.")
[unused_bass, root, third, fifth, unused_seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'M2'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name, '-M2')]
return _resolvePitches(domPossib, howToResolve)
def dominantSeventhToMinorSubdominant(domPossib, domChordInfo = None):
'''
Resolves a dominant seventh chord in root position
to the minor subdominant (iv) in first inversion.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> G2 = pitch.Pitch('G2')
>>> B3 = pitch.Pitch('B3')
>>> F4 = pitch.Pitch('F4')
>>> D5 = pitch.Pitch('D5')
>>> domPossibA1 = (D5, F4, B3, G2)
>>> resPossibA1 = resolution.dominantSeventhToMinorSubdominant(domPossibA1)
>>> [p.nameWithOctave for p in resPossibA1]
['C5', 'F4', 'C4', 'A-2']
>>> #_DOCS_SHOW resolution.showResolutions(domPossibA1, resPossibA1)
.. image:: images/figuredBass/fbResolution_V7toIVm.*
:width: 150
'''
if domChordInfo == None:
domChord = chord.Chord(domPossib)
if not domChord.isDominantSeventh():
raise ResolutionException("Possibility is not a dominant seventh chord.")
domChordInfo = _unpackSeventhChord(chord.Chord(domPossib))
if not domChord.inversion() == 0:
raise ResolutionException("Possibility must be in root position.")
[unused_bass, root, third, fifth, unused_seventh] = domChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name, '-M2')]
return _resolvePitches(domPossib, howToResolve)
def diminishedSeventhToMajorTonic(dimPossib, doubledRoot = False, dimChordInfo = None):
'''
Resolves a fully diminished seventh chord to the major tonic,
in root position or either inversion.
The resolution of the diminished seventh chord can have a
doubled third (standard resolution) or a doubled root
(alternate resolution), because the third of the diminished
chord can either rise or fall. The desired resolution is
attained using doubledRoot, and is set by
:meth:`~music21.figuredBass.segment.Segment.resolveDiminishedSeventhSegment`.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Cs3 = pitch.Pitch('C#3')
>>> G3 = pitch.Pitch('G3')
>>> E4 = pitch.Pitch('E4')
>>> Bb4 = pitch.Pitch('B-4')
>>> dimPossibA = (Bb4, E4, G3, Cs3)
>>> resPossibAa = resolution.diminishedSeventhToMajorTonic(dimPossibA, False)
>>> [str(p) for p in resPossibAa]
['A4', 'F#4', 'F#3', 'D3']
>>> resPossibAb = resolution.diminishedSeventhToMajorTonic(dimPossibA, True)
>>> [p.nameWithOctave for p in resPossibAb]
['A4', 'D4', 'F#3', 'D3']
>>> #_DOCS_SHOW resolution.showResolutions(dimPossibA, resPossibAa, dimPossibA, resPossibAb)
.. image:: images/figuredBass/fbResolution_vii7toI.*
:width: 200
'''
if dimChordInfo == None:
dimChord = chord.Chord(dimPossib)
if not dimChord.isDiminishedSeventh():
raise ResolutionException("Possibility is not a fully diminished seventh chord.")
dimChordInfo = _unpackSeventhChord(chord.Chord(dimPossib))
[unused_bass, root, third, fifth, seventh] = dimChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name and doubledRoot, '-M2'),
(lambda p: p.name == third.name, 'M2'),
(lambda p: p.name == fifth.name, '-m2'),
(lambda p: p.name == seventh.name, '-m2')]
return _resolvePitches(dimPossib, howToResolve)
def diminishedSeventhToMinorTonic(dimPossib, doubledRoot = False, dimChordInfo = None):
'''
Resolves a fully diminished seventh chord to the minor tonic,
in root position or either inversion.
The resolution of the diminished seventh chord can have a
doubled third (standard resolution) or a doubled root
(alternate resolution), because the third of the diminished
chord can either rise or fall. The desired resolution is
attained using doubledRoot, and is set by
:meth:`~music21.figuredBass.segment.Segment.resolveDiminishedSeventhSegment`.
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Cs3 = pitch.Pitch('C#3')
>>> G3 = pitch.Pitch('G3')
>>> E4 = pitch.Pitch('E4')
>>> Bb4 = pitch.Pitch('B-4')
>>> dimPossibA = (Bb4, E4, G3, Cs3)
>>> resPossibAa = resolution.diminishedSeventhToMinorTonic(dimPossibA, False)
>>> [p.nameWithOctave for p in resPossibAa]
['A4', 'F4', 'F3', 'D3']
>>> resPossibAb = resolution.diminishedSeventhToMinorTonic(dimPossibA, True)
>>> [p.nameWithOctave for p in resPossibAb]
['A4', 'D4', 'F3', 'D3']
>>> #_DOCS_SHOW resolution.showResolutions(dimPossibA, resPossibAa, dimPossibA, resPossibAb)
.. image:: images/figuredBass/fbResolution_vii7toIm.*
:width: 200
'''
if dimChordInfo == None:
dimChord = chord.Chord(dimPossib)
if not dimChord.isDiminishedSeventh():
raise ResolutionException("Possibility is not a fully diminished seventh chord.")
dimChordInfo = _unpackSeventhChord(chord.Chord(dimPossib))
[unused_bass, root, third, fifth, seventh] = dimChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name and doubledRoot, '-M2'),
(lambda p: p.name == third.name, 'm2'),
(lambda p: p.name == fifth.name, '-M2'),
(lambda p: p.name == seventh.name, '-m2')]
return _resolvePitches(dimPossib, howToResolve)
def diminishedSeventhToMajorSubdominant(dimPossib, dimChordInfo = None):
'''
Resolves a fully diminished seventh chord to the
major subdominant (IV).
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Cs3 = pitch.Pitch('C#3')
>>> G3 = pitch.Pitch('G3')
>>> E4 = pitch.Pitch('E4')
>>> Bb4 = pitch.Pitch('B-4')
>>> dimPossibA = (Bb4, E4, G3, Cs3)
>>> resPossibA = resolution.diminishedSeventhToMajorSubdominant(dimPossibA)
>>> [str(p) for p in resPossibA]
['B4', 'D4', 'G3', 'D3']
>>> #_DOCS_SHOW resolution.showResolutions(dimPossibA, resPossibA)
.. image:: images/figuredBass/fbResolution_vii7toIV.*
:width: 150
'''
if dimChordInfo == None:
dimChord = chord.Chord(dimPossib)
if not dimChord.isDiminishedSeventh():
raise ResolutionException("Possibility is not a fully diminished seventh chord.")
dimChordInfo = _unpackSeventhChord(chord.Chord(dimPossib))
[unused_bass, root, third, unused_fifth, seventh] = dimChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name, '-M2'),
(lambda p: p.name == seventh.name, 'A1')]
return _resolvePitches(dimPossib, howToResolve)
def diminishedSeventhToMinorSubdominant(dimPossib, dimChordInfo = None):
'''
Resolves a fully diminished seventh chord to the
minor subdominant (iv).
>>> from music21 import pitch
>>> from music21.figuredBass import resolution
>>> Cs3 = pitch.Pitch('C#3')
>>> G3 = pitch.Pitch('G3')
>>> E4 = pitch.Pitch('E4')
>>> Bb4 = pitch.Pitch('B-4')
>>> dimPossibA = (Bb4, E4, G3, Cs3)
>>> resPossibA = resolution.diminishedSeventhToMinorSubdominant(dimPossibA)
>>> [str(p) for p in resPossibA]
['B-4', 'D4', 'G3', 'D3']
>>> #_DOCS_SHOW resolution.showResolutions(dimPossibA, resPossibA)
.. image:: images/figuredBass/fbResolution_vii7toIVm.*
:width: 150
'''
if dimChordInfo == None:
dimChord = chord.Chord(dimPossib)
if not dimChord.isDiminishedSeventh():
raise ResolutionException("Possibility is not a fully diminished seventh chord.")
dimChordInfo = _unpackSeventhChord(chord.Chord(dimPossib))
[unused_bass, root, third, unused_fifth, unused_seventh] = dimChordInfo
howToResolve = \
[(lambda p: p.name == root.name, 'm2'),
(lambda p: p.name == third.name, '-M2')]
return _resolvePitches(dimPossib, howToResolve)
def showResolutions(*allPossib):
'''
Takes in possibilities as arguments and adds them in order
to a :class:`~music21.stream.Score` which is then displayed
in external software.
'''
upperParts = stream.Part()
bassLine = stream.Part()
for possibA in allPossib:
chordA = chord.Chord(possibA[0:-1])
chordA.quarterLength = 2.0
bassA = note.Note(possibA[-1])
bassA.quarterLength = 2.0
upperParts.append(chordA)
bassLine.append(bassA)
score = stream.Score()
score.insert(0, upperParts)
score.insert(0, bassLine)
score.show()
#----------------------------------------------
# INTERNAL METHODS
def _transpose(samplePitch, intervalString):
return samplePitch.transpose(intervalString)
def _resolvePitches(possibToResolve, howToResolve):
'''
Takes in a possibility to resolve and a list of (lambda function, intervalString)
pairs and tranposes each pitch by the intervalString corresponding to the lambda
function that returns True when applied to the pitch.
'''
howToResolve.append((lambda p: True, 'P1'))
resPitches = []
for samplePitch in possibToResolve:
for (expression, intervalString) in howToResolve:
if expression(samplePitch):
resPitches.append(_transpose(samplePitch, intervalString))
break
return tuple(resPitches)
def _unpackSeventhChord(seventhChord):
bass = seventhChord.bass()
root = seventhChord.root()
third = seventhChord.getChordStep(3)
fifth = seventhChord.getChordStep(5)
seventh = seventhChord.getChordStep(7)
seventhChordInfo = [bass, root, third, fifth, seventh]
return seventhChordInfo
_DOC_ORDER = [augmentedSixthToDominant,
augmentedSixthToMajorTonic, augmentedSixthToMinorTonic,
dominantSeventhToMajorTonic, dominantSeventhToMinorTonic,
dominantSeventhToMajorSubmediant, dominantSeventhToMinorSubmediant,
dominantSeventhToMajorSubdominant, dominantSeventhToMinorSubdominant,
diminishedSeventhToMajorTonic, diminishedSeventhToMinorTonic,
diminishedSeventhToMajorSubdominant, diminishedSeventhToMinorSubdominant]
#-------------------------------------------------------------------------------
class ResolutionException(exceptions21.Music21Exception):
pass
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
if __name__ == "__main__":
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
| mit | -3,378,788,628,264,093,000 | 39.404639 | 116 | 0.636059 | false |
openmips/stbgui | lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py | 3 | 4380 | # -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Components.SystemInfo import SystemInfo
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eEnv
import os
SKINXML = "skin.xml"
DEFAULTSKIN = "<Default Skin>"
class SkinSelector(Screen):
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.setTitle(_("Select your Skin"))
self.skinlist = []
self.previewPath = ""
if os.path.exists(os.path.join(self.root, SKINXML)):
self.skinlist.append(DEFAULTSKIN)
for root, dirs, files in os.walk(self.root, followlinks=True):
for subdir in dirs:
file = os.path.join(os.path.join(root, subdir), SKINXML)
if os.path.exists(file) and (SystemInfo["HasFullHDSkinSupport"] or not all(x in open(file, "r").read() for x in ('yres="1080"', 'xres="1920"'))):
self.skinlist.append(subdir)
dirs = []
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find("/"+SKINXML)
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("STB-GUI Skinselector\n\nIf you experience any problems please contact\[email protected]\n\n\xA9 2006 - Stephan Reichholf"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def ok(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
self.skinfile = "."
else:
self.skinfile = self["SkinList"].getCurrent()
self.skinfile = os.path.join(self.skinfile, SKINXML)
print "Skinselector: Selected Skin: "+self.root+self.skinfile
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
pngpath = "."
else:
pngpath = self["SkinList"].getCurrent()
pngpath = os.path.join(os.path.join(self.root, pngpath), "prev.png")
if not os.path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
config.skin.primary_skin.value = self.skinfile
config.skin.primary_skin.save()
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "ui_menu":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name=_("Skin"), description= _("Select your Skin"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SkinSelSetup)
| gpl-2.0 | -8,129,415,933,970,107,000 | 30.73913 | 195 | 0.707306 | false |
Nosferatul/coala | tests/bearlib/aspects/ClassTest.py | 12 | 2722 | from coalib.bearlib.aspects import Root, AspectTypeError as aspectTypeError
from coalib.bearlib.aspects.meta import isaspect, assert_aspect, issubaspect
import pytest
class AspectClassTest:
def test_subaspect_without_definition(self, RootAspect):
with pytest.raises(TypeError):
@RootAspect.subaspect
class SubAspect:
pass
def test_subaspect_without_docs(self, RootAspect):
@RootAspect.subaspect
class SubAspect:
"""
Definition
"""
assert not SubAspect.docs.check_consistency()
def test_subaspect_without_enough_docs(self, RootAspect):
@RootAspect.subaspect
class SubAspect:
"""
Description
"""
class docs:
example = 'Example'
assert not SubAspect.docs.check_consistency()
class IsaspectFunctionTest:
def test_isaspect(self, RootAspect):
@RootAspect.subaspect
class SubAspect:
"""
Description
"""
assert isaspect(RootAspect)
assert isaspect(SubAspect)
assert isaspect(SubAspect('Python'))
assert isaspect(Root('py'))
assert not isaspect('String')
class Assert_aspectFunctionTest:
def test_assert_aspect(self, RootAspect):
@RootAspect.subaspect
class SubAspect:
"""
Description
"""
assert assert_aspect(RootAspect) == RootAspect
assert assert_aspect(SubAspect) == SubAspect
assert assert_aspect(Root) == Root
with pytest.raises(aspectTypeError) as exc:
assert_aspect('String')
assert (str(exc.value) == "'String' is not an aspectclass or "
'an instance of an aspectclass')
class IssubaspectFunctionTest:
def test_issubaspect(self, RootAspect):
@RootAspect.subaspect
class SubAspect:
"""
Description
"""
assert issubaspect(SubAspect, RootAspect)
assert not issubaspect(Root, RootAspect)
assert issubaspect(RootAspect, RootAspect)
with pytest.raises(aspectTypeError) as exc:
issubaspect('String', SubAspect)
assert not isaspect('String')
assert (str(exc.value) == "'String' is not an aspectclass or "
'an instance of an aspectclass')
with pytest.raises(aspectTypeError) as exc:
issubaspect(RootAspect, str)
assert not isaspect(str)
assert (str(exc.value) == "<class 'str'> is not an aspectclass or "
'an instance of an aspectclass')
assert issubaspect(SubAspect('Python'), RootAspect)
| agpl-3.0 | -3,647,443,995,309,548,500 | 28.268817 | 76 | 0.60507 | false |
hesseltuinhof/mxnet | tests/nightly/test_tutorial.py | 2 | 3198 | #pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, invalid-name
"""
This script converts all python tutorials into python script
and tests whether there is any warning or error.
After running python script, it will also convert markdown files
to notebooks to make sure notebook execution has no error.
"""
import os
import warnings
import imp
import traceback
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
fail_dict = {}
def test_tutorial(file_path):
"""Run tutorial python script and save any error or warning.
If no error or warning occurs, run notebook.
Parameters
----------
file_path : str
path of tutorial markdown file
"""
with warnings.catch_warnings(record=True) as w:
tutorial_name = os.path.basename(file_path)
print file_path + '.py'
try:
imp.load_source('tutorial', file_path + '.py')
if len(w) > 0:
err_msg = "%s.py has %d warnings.\n" % (tutorial_name, len(w))
fail_dict[tutorial_name] = err_msg
else:
test_tutorial_nb(file_path)
except Exception:
err_msg = "%s.py has error:\n%s" % (tutorial_name, traceback.format_exc())
fail_dict[tutorial_name] = err_msg
def test_tutorial_nb(file_path):
"""Run tutorial jupyter notebook to catch any execution error.
Parameters
----------
file_path : str
path of tutorial markdown file
"""
tutorial_name = os.path.basename(file_path)
notebook = nbformat.read(file_path + '.ipynb', as_version=4)
eprocessor = ExecutePreprocessor(timeout=1800)
try:
eprocessor.preprocess(notebook, {'metadata': {}})
except Exception as err:
err_msg = str(err)
fail_dict[tutorial_name] = err_msg
finally:
output_nb = open("output.txt", mode='w')
nbformat.write(notebook, output_nb)
output_nb.close()
output_nb = open("output.txt", mode='r')
for line in output_nb:
if "Warning:" in line:
fail_dict[tutorial_name] = "%s has warning." % (tutorial_name)
return
if __name__ == "__main__":
tutorial_dir = '../../docs/_build/html/tutorials/'
with open('test_tutorial_config.txt') as config_file:
tutorial_list = []
for line in config_file:
tutorial_list.append(line.lstrip().rstrip())
file_dir = tutorial_dir + line.lstrip().rstrip()
test_tutorial_nb(file_dir)
fail_num = len(fail_dict)
success_num = len(tutorial_list) - fail_num
print "Test Summary Start"
print "%d tutorials tested:" % (len(tutorial_list))
for tutorial in tutorial_list:
print tutorial
print "\n%d tests failed:" % (fail_num)
for tutorial, msg in fail_dict.items():
print tutorial + ":"
print msg
print "Test Summary End"
print "Stats start"
print "[Passed: %d of %d]" % (success_num, len(tutorial_list))
print "Stats end"
| apache-2.0 | 1,592,265,985,950,845,000 | 34.142857 | 167 | 0.604128 | false |
drawquest/drawquest-web | website/canvas/templatetags/canvas_tags.py | 2 | 4205 | # -*- coding: utf-8 -*-
import bleach
import datetime
import os.path
import re
import time
import urllib
import urlparse
import uuid
from django import template
from django.conf import settings
from django.template.defaultfilters import pluralize, yesno
from django.utils.functional import memoize
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from canvas import util, economy, stickers
from canvas.models import SpecialCategory, Category, Comment, Visibility, CommentFlag
from services import Services
from django.conf import settings
register = template.Library()
@register.filter
def get_value(dict_object, key):
""" Looks things up in a dictionary. """
if not dict_object:
return None
return dict_object.get(key, None)
@register.filter
def js_bool(value):
""" Similar to yesno:"true,false,false" """
if not value:
return "false"
return "true"
# Encode all '<'s as \u003c and '>'s as \u003e to prevent <!-- ... --> and </script> from breaking our pages
@register.filter
def to_json(things):
"""
If the model/object defines a "to_client" then call it first.
This way objects can implement the "to_client" interface to return a dictionary
representation of themselves to be serialized as json.
"""
return util.js_safety(util.client_dumps(things))
@register.filter
def ellipsis_after(text, length):
""" Truncates text and adds ellipses at the end. Does not truncate words in the middle. """
if not text or len(text) <= length:
return text
else:
return text[:length].rsplit(' ', 1)[0]+u"\u2026"
@register.filter
def is_in_experiment(request, experiment_name):
try:
experiment_name, branch = experiment_name.split(',')
except ValueError:
branch = 'experimental'
return request.experiments.is_in(experiment_name, branch_name=branch)
@register.filter
def get_labs(kv, key):
""" We need this filter because lab values have semicolons in them. """
return int(kv.get("labs:"+str(key), 0))
@register.simple_tag
def news_img(url):
token = os.path.basename(urlparse.urlparse(url).path)
if "reply" in url:
post_id = int(token)
else:
post_id = util.base36decode_or_404(token)
img_url = Comment.details_by_id(post_id)()['reply_content']['thumbnail']['name']
return "<a href='%s'><img src='http://example.com/ugc/%s'></a>" % (url, img_url)
@register.filter
def sub_header(subheader):
substitutions = {
"hot": "popular",
"active": "new"
}
return substitutions.get(str(subheader).lower(), None) or subheader
@register.inclusion_tag('widget/stickers.django.html', takes_context=True)
def sticker_palette(context):
context['store_items']= stickers.get_purchasable(context['request'].user)
return context
@register.simple_tag
def static_url(relative_url):
return "/static/%s" % str(relative_url)
@register.filter
def pretty_unixtime(t):
return time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(t))
@register.simple_tag
def raw_html(path):
def get_content(path):
import os.path
basedir = os.path.join(settings.PROJECT_PATH, 'templates') #TODO this sucks.
f = file(os.path.join(basedir, path), 'r')
try:
content = f.read()
finally:
f.close()
return mark_safe(content)
if settings.MEMOIZE_RAW_HTML:
return memoize(get_content, {}, 1)(path)
return get_content(path)
@register.simple_tag
def empty_gif():
return "/static/img/0.gif"
class CenterNode(template.Node):
start = """<table width="100%" height="100%" border="0" cellspacing="0" cellpadding="0" style="position: absolute;"><tr><td align="center" valign="middle" style="text-align: center;">"""
end = """</td></tr></table>"""
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = self.nodelist.render(context)
return self.start + output + self.end
@register.tag
def center(parser, token):
nodelist = parser.parse(('endcenter',))
parser.delete_first_token()
return CenterNode(nodelist)
| bsd-3-clause | -1,855,555,445,799,859,500 | 29.251799 | 190 | 0.66849 | false |
MERegistro/meregistro | meregistro/apps/registro/forms/ExtensionAulicaFormFilters.py | 1 | 3469 | # -*- coding: UTF-8 -*-
from django import forms
from apps.seguridad.models import TipoDocumento, Usuario
from apps.registro.models import Establecimiento, ExtensionAulica, Localidad, Departamento, Jurisdiccion, TipoGestion
from apps.registro.models.EstadoExtensionAulica import EstadoExtensionAulica
class ExtensionAulicaFormFilters(forms.Form):
nombre = forms.CharField(max_length=40, label='Nombre', required=False)
cue = forms.CharField(max_length=40, label='Cue', required=False)
establecimiento = forms.ModelChoiceField(queryset=Establecimiento.objects.order_by('nombre'), label ='Establecimiento', required=False)
jurisdiccion = forms.ModelChoiceField(queryset=Jurisdiccion.objects.order_by('nombre'), label='Jurisdiccion', required=False)
departamento = forms.ModelChoiceField(queryset=Departamento.objects.order_by('nombre'), label='Departamento', required=False)
localidad = forms.ModelChoiceField(queryset=Localidad.objects.order_by('nombre'), label='Localidad', required=False)
tipo_gestion = forms.ModelChoiceField(queryset=TipoGestion.objects.order_by('nombre'), label='Tipo de gestión', required=False)
estado = forms.ModelChoiceField(queryset=EstadoExtensionAulica.objects.order_by('nombre'), label='Estado', required=False)
def __init__(self, *args, **kwargs):
self.jurisdiccion_id = kwargs.pop('jurisdiccion_id')
self.departamento_id = kwargs.pop('departamento_id')
super(ExtensionAulicaFormFilters, self).__init__(*args, **kwargs)
"Para no cargar todas las localidades y departamentos"
if self.jurisdiccion_id is not None:
self.fields['departamento'].queryset = self.fields['departamento'].queryset.filter(jurisdiccion__id=self.jurisdiccion_id)
if self.departamento_id is not None:
self.fields['localidad'].queryset = self.fields['localidad'].queryset.filter(departamento__id=self.departamento_id)
else:
self.fields['localidad'].queryset = self.fields['localidad'].queryset.none()
def buildQuery(self, q=None):
"""
Crea o refina un query de búsqueda.
"""
if q is None:
q = ExtensionAulica.objects.all()
if self.is_valid():
def filter_by(field):
return self.cleaned_data.has_key(field) and self.cleaned_data[field] != '' and self.cleaned_data[field] is not None
if filter_by('nombre'):
q = q.filter(nombre__icontains=self.cleaned_data['nombre'])
if filter_by('cue'):
q = q.filter(cue__contains=self.cleaned_data['cue'])
if filter_by('establecimiento'):
q = q.filter(establecimiento=self.cleaned_data['establecimiento'])
if filter_by('jurisdiccion'):
q = q.filter(establecimiento__dependencia_funcional__jurisdiccion=self.cleaned_data['jurisdiccion'])
if filter_by('departamento'):
q = q.filter(domicilio__localidad__departamento=self.cleaned_data['departamento'])
if filter_by('localidad'):
q = q.filter(domicilio__localidad=self.cleaned_data['localidad'])
if filter_by('tipo_gestion'):
q = q.filter(establecimiento__dependencia_funcional__tipo_gestion=self.cleaned_data['tipo_gestion'])
if filter_by('estado'):
q = q.filter(estado=self.cleaned_data['estado'])
return q.distinct()
| bsd-3-clause | 720,294,403,880,718,700 | 57.762712 | 139 | 0.672916 | false |
uclouvain/osis | education_group/ddd/validators/validators_by_business_action.py | 1 | 6776 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from base.ddd.utils import business_validator
from base.ddd.utils.business_validator import MultipleExceptionBusinessListValidator
from education_group.ddd.business_types import *
from education_group.ddd.validators._abbreviated_title_already_exist import AcronymAlreadyExistValidator
from education_group.ddd.validators._acronym_required import AcronymRequiredValidator
from education_group.ddd.validators._certificate_aim_type_2 import CertificateAimType2Validator
from education_group.ddd.validators._content_constraint import ContentConstraintValidator
from education_group.ddd.validators._copy_check_mini_training_end_date import CheckMiniTrainingEndDateValidator
from education_group.ddd.validators._copy_check_training_end_date import CheckTrainingEndDateValidator
from education_group.ddd.validators._credits import CreditsValidator
from education_group.ddd.validators._enrollments import TrainingEnrollmentsValidator, MiniTrainingEnrollmentsValidator
from education_group.ddd.validators._link_with_epc import TrainingLinkWithEPCValidator, MiniTrainingLinkWithEPCValidator
from education_group.ddd.validators._start_year_end_year import StartYearEndYearValidator
from education_group.ddd.validators._unique_code import UniqueCodeValidator
from education_group.ddd.validators.start_and_end_year_validator import StartAndEndYearValidator
from education_group.ddd.validators._hops_validator import HopsValuesValidator
from program_management.ddd.validators._code_pattern import CodePatternValidator
class CreateGroupValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, group: 'Group'):
self.validators = [
UniqueCodeValidator(group.code),
ContentConstraintValidator(group.content_constraint),
CreditsValidator(group.credits),
]
super().__init__()
class UpdateGroupValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, group: 'Group'):
self.validators = [
ContentConstraintValidator(group.content_constraint),
CreditsValidator(group.credits),
]
super().__init__()
class CreateMiniTrainingValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, mini_training_domain_obj: 'MiniTraining'):
self.validators = [
UniqueCodeValidator(mini_training_domain_obj.code),
AcronymRequiredValidator(mini_training_domain_obj.acronym),
AcronymAlreadyExistValidator(mini_training_domain_obj.acronym),
StartAndEndYearValidator(mini_training_domain_obj.start_year, mini_training_domain_obj.end_year)
]
super().__init__()
class UpdateMiniTrainingValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, mini_training: 'MiniTraining'):
self.validators = []
super().__init__()
class CreateTrainingValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, training: 'Training'):
self.validators = [
UniqueCodeValidator(training.code),
AcronymRequiredValidator(training.acronym),
AcronymAlreadyExistValidator(training.acronym),
StartYearEndYearValidator(training),
HopsValuesValidator(training)
]
super().__init__()
class UpdateTrainingValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, training: 'Training'):
self.validators = [
HopsValuesValidator(training)
]
super().__init__()
class UpdateCertificateAimsValidatorList(business_validator.BusinessListValidator):
def __init__(self, training: 'Training'):
self.validators = [
CertificateAimType2Validator(training)
]
super().__init__()
class CopyTrainingValidatorList(business_validator.BusinessListValidator):
def __init__(self, training_from: 'Training'):
self.validators = [
CheckTrainingEndDateValidator(training_from),
]
super().__init__()
class CopyGroupValidatorList(business_validator.BusinessListValidator):
def __init__(self, group_from: 'Group'):
self.validators = []
super().__init__()
class CopyMiniTrainingValidatorList(business_validator.BusinessListValidator):
def __init__(
self,
mini_training_from: 'MiniTraining',
):
self.validators = [
CheckMiniTrainingEndDateValidator(mini_training_from),
]
super().__init__()
class DeleteOrphanTrainingValidatorList(business_validator.BusinessListValidator):
def __init__(
self,
training: 'Training',
):
self.validators = [
TrainingEnrollmentsValidator(training.entity_id),
TrainingLinkWithEPCValidator(training.entity_id)
]
super().__init__()
class DeleteOrphanMiniTrainingValidatorList(business_validator.BusinessListValidator):
def __init__(
self,
mini_training: 'MiniTraining',
):
self.validators = [
MiniTrainingEnrollmentsValidator(mini_training.entity_id),
MiniTrainingLinkWithEPCValidator(mini_training.entity_id)
]
super().__init__()
class CreateOrphanGroupValidatorList(MultipleExceptionBusinessListValidator):
def __init__(self, code: str, group_type: str):
self.validators = [
CodePatternValidator(code, group_type)
]
super().__init__()
| agpl-3.0 | 5,581,070,803,011,847,000 | 37.494318 | 120 | 0.698155 | false |
tlevine/alot | alot/widgets/thread.py | 4 | 14038 | # Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
"""
Widgets specific to thread mode
"""
import urwid
import logging
from alot.settings import settings
from alot.db.utils import decode_header, X_SIGNATURE_MESSAGE_HEADER
from alot.helper import tag_cmp
from alot.widgets.globals import TagWidget
from alot.widgets.globals import AttachmentWidget
from urwidtrees import Tree, SimpleTree, CollapsibleTree
from alot.db.utils import extract_body
class MessageSummaryWidget(urwid.WidgetWrap):
"""
one line summary of a :class:`~alot.db.message.Message`.
"""
def __init__(self, message, even=True):
"""
:param message: a message
:type message: alot.db.Message
:param even: even entry in a pile of messages? Used for theming.
:type even: bool
"""
self.message = message
self.even = even
if even:
attr = settings.get_theming_attribute('thread', 'summary', 'even')
else:
attr = settings.get_theming_attribute('thread', 'summary', 'odd')
focus_att = settings.get_theming_attribute('thread', 'summary',
'focus')
cols = []
sumstr = self.__str__()
txt = urwid.Text(sumstr)
cols.append(txt)
thread_tags = message.get_thread().get_tags(intersection=True)
outstanding_tags = set(message.get_tags()).difference(thread_tags)
tag_widgets = [TagWidget(t, attr, focus_att) for t in outstanding_tags]
tag_widgets.sort(tag_cmp, lambda tag_widget: tag_widget.translated)
for tag_widget in tag_widgets:
if not tag_widget.hidden:
cols.append(('fixed', tag_widget.width(), tag_widget))
line = urwid.AttrMap(urwid.Columns(cols, dividechars=1), attr,
focus_att)
urwid.WidgetWrap.__init__(self, line)
def __str__(self):
author, address = self.message.get_author()
date = self.message.get_datestring()
rep = author if author != '' else address
if date is not None:
rep += " (%s)" % date
return rep
def selectable(self):
return True
def keypress(self, size, key):
return key
class FocusableText(urwid.WidgetWrap):
"""Selectable Text used for nodes in our example"""
def __init__(self, txt, att, att_focus):
t = urwid.Text(txt)
w = urwid.AttrMap(t, att, att_focus)
urwid.WidgetWrap.__init__(self, w)
def selectable(self):
return True
def keypress(self, size, key):
return key
class TextlinesList(SimpleTree):
def __init__(self, content, attr=None, attr_focus=None):
"""
:class:`SimpleTree` that contains a list of all-level-0 Text widgets
for each line in content.
"""
structure = []
for line in content.splitlines():
structure.append((FocusableText(line, attr, attr_focus), None))
SimpleTree.__init__(self, structure)
class DictList(SimpleTree):
"""
:class:`SimpleTree` that displays key-value pairs.
The structure will obey the Tree API but will not actually be a tree
but a flat list: It contains one top-level node (displaying the k/v pair in
Columns) per pair. That is, the root will be the first pair,
its sibblings will be the other pairs and first|last_child will always
be None.
"""
def __init__(self, content, key_attr, value_attr, gaps_attr=None):
"""
:param headerslist: list of key/value pairs to display
:type headerslist: list of (str, str)
:param key_attr: theming attribute to use for keys
:type key_attr: urwid.AttrSpec
:param value_attr: theming attribute to use for values
:type value_attr: urwid.AttrSpec
:param gaps_attr: theming attribute to wrap lines in
:type gaps_attr: urwid.AttrSpec
"""
max_key_len = 1
structure = []
# calc max length of key-string
for key, value in content:
if len(key) > max_key_len:
max_key_len = len(key)
for key, value in content:
# todo : even/odd
keyw = ('fixed', max_key_len + 1,
urwid.Text((key_attr, key)))
valuew = urwid.Text((value_attr, value))
line = urwid.Columns([keyw, valuew])
if gaps_attr is not None:
line = urwid.AttrMap(line, gaps_attr)
structure.append((line, None))
SimpleTree.__init__(self, structure)
class MessageTree(CollapsibleTree):
"""
:class:`Tree` that displays contents of a single :class:`alot.db.Message`.
Its root node is a :class:`MessageSummaryWidget`, and its child nodes
reflect the messages content (parts for headers/attachments etc).
Collapsing this message corresponds to showing the summary only.
"""
def __init__(self, message, odd=True):
"""
:param message: Message to display
:type message: alot.db.Message
:param odd: theme summary widget as if this is an odd line
(in the message-pile)
:type odd: bool
"""
self._message = message
self._odd = odd
self.display_source = False
self._summaryw = None
self._bodytree = None
self._sourcetree = None
self.display_all_headers = False
self._all_headers_tree = None
self._default_headers_tree = None
self.display_attachments = True
self._attachments = None
self._maintree = SimpleTree(self._assemble_structure())
CollapsibleTree.__init__(self, self._maintree)
def get_message(self):
return self._message
def reassemble(self):
self._maintree._treelist = self._assemble_structure()
def refresh(self):
self._summaryw = None
self.reassemble()
def debug(self):
logging.debug('collapsed %s' % self.is_collapsed(self.root))
logging.debug('display_source %s' % self.display_source)
logging.debug('display_all_headers %s' % self.display_all_headers)
logging.debug('display_attachements %s' % self.display_attachments)
logging.debug('AHT %s' % str(self._all_headers_tree))
logging.debug('DHT %s' % str(self._default_headers_tree))
logging.debug('MAINTREE %s' % str(self._maintree._treelist))
def _assemble_structure(self):
mainstruct = []
if self.display_source:
mainstruct.append((self._get_source(), None))
else:
mainstruct.append((self._get_headers(), None))
attachmenttree = self._get_attachments()
if attachmenttree is not None:
mainstruct.append((attachmenttree, None))
bodytree = self._get_body()
if bodytree is not None:
mainstruct.append((self._get_body(), None))
structure = [
(self._get_summary(), mainstruct)
]
return structure
def collapse_if_matches(self, querystring):
"""
collapse (and show summary only) if the :class:`alot.db.Message`
matches given `querystring`
"""
self.set_position_collapsed(
self.root, self._message.matches(querystring))
def _get_summary(self):
if self._summaryw is None:
self._summaryw = MessageSummaryWidget(
self._message, even=(not self._odd))
return self._summaryw
def _get_source(self):
if self._sourcetree is None:
sourcetxt = self._message.get_email().as_string()
att = settings.get_theming_attribute('thread', 'body')
att_focus = settings.get_theming_attribute('thread', 'body_focus')
self._sourcetree = TextlinesList(sourcetxt, att, att_focus)
return self._sourcetree
def _get_body(self):
if self._bodytree is None:
bodytxt = extract_body(self._message.get_email())
if bodytxt:
att = settings.get_theming_attribute('thread', 'body')
att_focus = settings.get_theming_attribute(
'thread', 'body_focus')
self._bodytree = TextlinesList(bodytxt, att, att_focus)
return self._bodytree
def replace_bodytext(self, txt):
"""display txt instead of current msg 'body'"""
if txt:
att = settings.get_theming_attribute('thread', 'body')
att_focus = settings.get_theming_attribute('thread', 'body_focus')
self._bodytree = TextlinesList(txt, att, att_focus)
def _get_headers(self):
if self.display_all_headers is True:
if self._all_headers_tree is None:
self._all_headers_tree = self.construct_header_pile()
ret = self._all_headers_tree
else:
if self._default_headers_tree is None:
headers = settings.get('displayed_headers')
self._default_headers_tree = self.construct_header_pile(
headers)
ret = self._default_headers_tree
return ret
def _get_attachments(self):
if self._attachments is None:
alist = []
for a in self._message.get_attachments():
alist.append((AttachmentWidget(a), None))
if alist:
self._attachments = SimpleTree(alist)
return self._attachments
def construct_header_pile(self, headers=None, normalize=True):
mail = self._message.get_email()
lines = []
if headers is None:
# collect all header/value pairs in the order they appear
headers = mail.keys()
for key, value in mail.items():
dvalue = decode_header(value, normalize=normalize)
lines.append((key, dvalue))
else:
# only a selection of headers should be displayed.
# use order of the `headers` parameter
for key in headers:
if key in mail:
if key.lower() in ['cc', 'bcc', 'to']:
values = mail.get_all(key)
values = [decode_header(
v, normalize=normalize) for v in values]
lines.append((key, ', '.join(values)))
else:
for value in mail.get_all(key):
dvalue = decode_header(value, normalize=normalize)
lines.append((key, dvalue))
elif key.lower() == 'tags':
logging.debug('want tags header')
values = []
for t in self._message.get_tags():
tagrep = settings.get_tagstring_representation(t)
if t is not tagrep['translated']:
t = '%s (%s)' % (tagrep['translated'], t)
values.append(t)
lines.append((key, ', '.join(values)))
# OpenPGP pseudo headers
if mail[X_SIGNATURE_MESSAGE_HEADER]:
lines.append(('PGP-Signature', mail[X_SIGNATURE_MESSAGE_HEADER]))
key_att = settings.get_theming_attribute('thread', 'header_key')
value_att = settings.get_theming_attribute('thread', 'header_value')
gaps_att = settings.get_theming_attribute('thread', 'header')
return DictList(lines, key_att, value_att, gaps_att)
class ThreadTree(Tree):
"""
:class:`Tree` that parses a given :class:`alot.db.Thread` into a tree of
:class:`MessageTrees <MessageTree>` that display this threads individual
messages. As MessageTreess are *not* urwid widgets themself this is to be
used in combination with :class:`NestedTree` only.
"""
def __init__(self, thread):
self._thread = thread
self.root = thread.get_toplevel_messages()[0].get_message_id()
self._parent_of = {}
self._first_child_of = {}
self._last_child_of = {}
self._next_sibling_of = {}
self._prev_sibling_of = {}
self._message = {}
def accumulate(msg, odd=True):
"""recursively read msg and its replies"""
mid = msg.get_message_id()
self._message[mid] = MessageTree(msg, odd)
odd = not odd
last = None
self._first_child_of[mid] = None
for reply in thread.get_replies_to(msg):
rid = reply.get_message_id()
if self._first_child_of[mid] is None:
self._first_child_of[mid] = rid
self._parent_of[rid] = mid
self._prev_sibling_of[rid] = last
self._next_sibling_of[last] = rid
last = rid
odd = accumulate(reply, odd)
self._last_child_of[mid] = last
return odd
last = None
for msg in thread.get_toplevel_messages():
mid = msg.get_message_id()
self._prev_sibling_of[mid] = last
self._next_sibling_of[last] = mid
accumulate(msg)
last = mid
self._next_sibling_of[last] = None
# Tree API
def __getitem__(self, pos):
return self._message.get(pos, None)
def parent_position(self, pos):
return self._parent_of.get(pos, None)
def first_child_position(self, pos):
return self._first_child_of.get(pos, None)
def last_child_position(self, pos):
return self._last_child_of.get(pos, None)
def next_sibling_position(self, pos):
return self._next_sibling_of.get(pos, None)
def prev_sibling_position(self, pos):
return self._prev_sibling_of.get(pos, None)
def position_of_messagetree(self, mt):
return mt._message.get_message_id()
| gpl-3.0 | 6,526,684,069,569,996,000 | 36.335106 | 79 | 0.575794 | false |
HaebinShin/tensorflow | tensorflow/python/kernel_tests/sparse_to_dense_op_py_test.py | 8 | 6792 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.sparse_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def _SparseToDense(sparse_indices, output_size, sparse_values,
default_value, validate_indices=True):
return tf.sparse_to_dense(sparse_indices, output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices)
class SparseToDenseTest(tf.test.TestCase):
def testInt(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, 0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testFloat(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1.0, 0.0).eval()
np_ans = np.array([0, 1, 0, 1, 0]).astype(np.float32)
self.assertAllClose(np_ans, tf_ans)
def testString(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], "a", "b").eval()
np_ans = np.array(["b", "a", "b", "a", "b"]).astype(np.string_)
self.assertAllEqual(np_ans, tf_ans)
def testSetValue(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], [1, 2], -1).eval()
np_ans = np.array([-1, 1, -1, 2, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testSetSingleValue(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([1, 3], [5], 1, -1).eval()
np_ans = np.array([-1, 1, -1, 1, -1]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def test2d(self):
# pylint: disable=bad-whitespace
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3], [2, 0]], [3, 4], 1, -1).eval()
np_ans = np.array([[-1, -1, -1, -1],
[-1, -1, -1, 1],
[ 1, -1, -1, -1]]).astype(np.int32)
self.assertAllClose(np_ans, tf_ans)
def testZeroDefault(self):
with self.test_session():
x = tf.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
def test3d(self):
with self.test_session(use_gpu=False):
tf_ans = _SparseToDense([[1, 3, 0], [2, 0, 1]], [3, 4, 2], 1, -1).eval()
np_ans = np.ones((3, 4, 2), dtype=np.int32) * -1
np_ans[1, 3, 0] = 1
np_ans[2, 0, 1] = 1
self.assertAllClose(np_ans, tf_ans)
def testBadShape(self):
with self.test_session():
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: ("Input shape should be a vector" == str(e))):
_SparseToDense([1, 3], [[5], [3]], 1, -1)
def testBadValue(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [[5], [3]], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[2,1\], "
r"should be \[\] or \[2\]"):
dense.eval()
def testBadNumValues(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [1, 2, 3], -1)
with self.assertRaisesOpError(
r"sparse_values has incorrect shape \[3\], should be \[\] or \[2\]"):
dense.eval()
def testBadDefault(self):
with self.test_session():
dense = _SparseToDense([1, 3], [5], [1, 2], [0])
with self.assertRaisesOpError("default_value should be a scalar"):
dense.eval()
def testOutOfBoundsIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[1], [10]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(
r"indices\[1\] = \[10\] is out of bounds: need 0 <= index < \[5\]"):
dense.eval()
# Disable checks, the allocation should still fail.
with self.assertRaisesOpError("out of bounds"):
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [10]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0,
validate_indices=False)
dense_without_validation.eval()
def testRepeatingIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[1], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is repeated"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[1], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
dense_without_validation.eval()
def testUnsortedIndicesWithWithoutValidation(self):
with self.test_session():
dense = _SparseToDense(
sparse_indices=[[2], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0)
with self.assertRaisesOpError(r"indices\[1\] = \[1\] is out of order"):
dense.eval()
# Disable checks
dense_without_validation = _SparseToDense(
sparse_indices=[[2], [1]], output_size=[5],
sparse_values=[-1.0, 1.0], default_value=0.0, validate_indices=False)
dense_without_validation.eval()
def testShapeInferenceKnownShape(self):
with self.test_session(use_gpu=False):
indices = tf.placeholder(tf.int64)
shape = [4, 5, 6]
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
shape = tf.placeholder(tf.int64, shape=(3,))
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
def testShapeInferenceUnknownShape(self):
with self.test_session(use_gpu=False):
indices = tf.placeholder(tf.int64)
shape = tf.placeholder(tf.int64)
output = tf.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().ndims, None)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -3,062,619,296,948,984,300 | 37.372881 | 80 | 0.602326 | false |
anault/arpdetect | tests.py | 4 | 1995 | #!/usr/bin/python
import sys
import os
import arpdetect
def test(result):
print ("Success" if result else "Fail")
# Not suspicious ARP table
def test1():
pHosts = [
arpdetect.Host(["192.168.1.1", "00-00-00-00-00-00", "dynamique"]),
arpdetect.Host(["192.168.1.101", "11-11-11-11-11-11", "dynamique"]),
arpdetect.Host(["192.168.1.102", "22-22-22-22-22-22", "dynamique"]),
]
hosts = pHosts
suspicious, suspect = arpdetect.inspect(hosts[0], hosts, pHosts)
return not suspicious
# Suspicious as Gateway changed MAC
def test2():
pHosts = [
arpdetect.Host(["192.168.1.1", "00-00-00-00-00-00", "dynamique"]),
arpdetect.Host(["192.168.1.101", "11-11-11-11-11-11", "dynamique"]),
arpdetect.Host(["192.168.1.102", "22-22-22-22-22-22", "dynamique"]),
]
hosts = [
arpdetect.Host(["192.168.1.1", "55-55-55-55-55-55", "dynamique"]),
arpdetect.Host(["192.168.1.101", "11-11-11-11-11-11", "dynamique"]),
arpdetect.Host(["192.168.1.102", "22-22-22-22-22-22", "dynamique"]),
]
suspicious, suspect = arpdetect.inspect(hosts[0], hosts, pHosts)
return suspicious and suspect.mac == "55-55-55-55-55-55" and suspect.ipv4 == "Unknown"
# Suspicious as Gateway changed MAC, also suspecr ip is known
def test3():
pHosts = [
arpdetect.Host(["192.168.1.1", "00-00-00-00-00-00", "dynamique"]),
arpdetect.Host(["192.168.1.101", "11-11-11-11-11-11", "dynamique"]),
arpdetect.Host(["192.168.1.102", "22-22-22-22-22-22", "dynamique"]),
]
hosts = [
arpdetect.Host(["192.168.1.1", "22-22-22-22-22-22", "dynamique"]),
arpdetect.Host(["192.168.1.101", "11-11-11-11-11-11", "dynamique"]),
arpdetect.Host(["192.168.1.102", "22-22-22-22-22-22", "dynamique"]),
]
suspicious, suspect = arpdetect.inspect(hosts[0], hosts, pHosts)
return suspicious and suspect.mac == "22-22-22-22-22-22" and suspect.ipv4 == "192.168.1.102"
if __name__ == "__main__":
# Script only runs on windows
if os.name != "nt":
sys.exit();
# Test 1
test(test1())
test(test2())
test(test3()) | mit | -1,291,289,397,608,141,000 | 33.413793 | 93 | 0.639098 | false |
Vignesh2208/Awlsim | awlsim/core/systemblocks/system_sfb_2.py | 2 | 3286 | # -*- coding: utf-8 -*-
#
# AWL simulator - SFBs
#
# Copyright 2014-2015 Michael Buesch <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.systemblocks.systemblocks import *
from awlsim.core.util import *
class SFB2(SFB):
name = (2, "CTUD", "IEC 1131-3 up/down counter")
interfaceFields = {
BlockInterfaceField.FTYPE_IN : (
BlockInterfaceField(name = "CU",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "CD",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "R",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "LOAD",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "PV",
dataType = AwlDataType.makeByName("INT")),
),
BlockInterfaceField.FTYPE_OUT : (
BlockInterfaceField(name = "QU",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "QD",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "CV",
dataType = AwlDataType.makeByName("INT")),
),
BlockInterfaceField.FTYPE_STAT : (
BlockInterfaceField(name = "CUO",
dataType = AwlDataType.makeByName("BOOL")),
BlockInterfaceField(name = "CDO",
dataType = AwlDataType.makeByName("BOOL")),
),
}
def run(self):
s = self.cpu.statusWord
# CU pos-edge detection
CU = self.fetchInterfaceFieldByName("CU")
CU_pos_edge = CU & ~self.fetchInterfaceFieldByName("CUO") & 1
self.storeInterfaceFieldByName("CUO", CU)
# CD pos-edge detection
CD = self.fetchInterfaceFieldByName("CD")
CD_pos_edge = CD & ~self.fetchInterfaceFieldByName("CDO") & 1
self.storeInterfaceFieldByName("CDO", CD)
# Count
PV = wordToSignedPyInt(self.fetchInterfaceFieldByName("PV"))
CV = wordToSignedPyInt(self.fetchInterfaceFieldByName("CV"))
if self.fetchInterfaceFieldByName("R"): # Counter reset
CV = 0
self.storeInterfaceFieldByName("CV", CV)
elif self.fetchInterfaceFieldByName("LOAD"): # Counter load
CV = PV
self.storeInterfaceFieldByName("CV", CV)
elif CD_pos_edge and not CU_pos_edge and CV > -32768: # Count down
CV -= 1
self.storeInterfaceFieldByName("CV", CV)
elif CU_pos_edge and not CD_pos_edge and CV < 32767: # Count up
CV += 1
self.storeInterfaceFieldByName("CV", CV)
# Update Q-status
self.storeInterfaceFieldByName("QU", 1 if CV >= PV else 0)
self.storeInterfaceFieldByName("QD", 1 if CV <= 0 else 0)
s.BIE = 1
| gpl-2.0 | 1,617,853,630,234,739,700 | 33.957447 | 82 | 0.702982 | false |
pdellaert/ansible | test/units/modules/network/ftd/test_ftd_install.py | 27 | 10168 | # Copyright (c) 2019 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import pytest
from units.compat.mock import PropertyMock
from ansible.module_utils import basic
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson
from ansible.modules.network.ftd import ftd_install
from ansible.module_utils.network.ftd.device import FtdModel
DEFAULT_MODULE_PARAMS = dict(
device_hostname="firepower",
device_username="admin",
device_password="pass",
device_new_password="newpass",
device_sudo_password="sudopass",
device_ip="192.168.0.1",
device_netmask="255.255.255.0",
device_gateway="192.168.0.254",
device_model=FtdModel.FTD_ASA5516_X,
dns_server="8.8.8.8",
console_ip="10.89.0.0",
console_port="2004",
console_username="console_user",
console_password="console_pass",
rommon_file_location="tftp://10.0.0.1/boot/ftd-boot-1.9.2.0.lfbff",
image_file_location="http://10.0.0.1/Release/ftd-6.2.3-83.pkg",
image_version="6.2.3-83",
search_domains="cisco.com",
force_install=False
)
class TestFtdInstall(object):
module = ftd_install
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
mocker.patch.object(basic.AnsibleModule, '_socket_path', new_callable=PropertyMock, create=True,
return_value=mocker.MagicMock())
@pytest.fixture(autouse=True)
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_install.Connection')
return connection_class_mock.return_value
@pytest.fixture
def config_resource_mock(self, mocker):
resource_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_install.BaseConfigurationResource')
return resource_class_mock.return_value
@pytest.fixture(autouse=True)
def ftd_factory_mock(self, mocker):
return mocker.patch('ansible.modules.network.ftd.ftd_install.FtdPlatformFactory')
@pytest.fixture(autouse=True)
def has_kick_mock(self, mocker):
return mocker.patch('ansible.module_utils.network.ftd.device.HAS_KICK', True)
def test_module_should_fail_when_kick_is_not_installed(self, mocker):
mocker.patch('ansible.module_utils.network.ftd.device.HAS_KICK', False)
set_module_args(dict(DEFAULT_MODULE_PARAMS))
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
assert "Firepower-kickstart library is required to run this module" in result['msg']
def test_module_should_fail_when_platform_is_not_supported(self, config_resource_mock):
config_resource_mock.execute_operation.return_value = {'platformModel': 'nonSupportedModel'}
module_params = dict(DEFAULT_MODULE_PARAMS)
del module_params['device_model']
set_module_args(module_params)
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
assert result['msg'] == "Platform model 'nonSupportedModel' is not supported by this module."
def test_module_should_fail_when_device_model_is_missing_with_local_connection(self, mocker):
mocker.patch.object(basic.AnsibleModule, '_socket_path', create=True, return_value=None)
module_params = dict(DEFAULT_MODULE_PARAMS)
del module_params['device_model']
set_module_args(module_params)
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
expected_msg = \
"The following parameters are mandatory when the module is used with 'local' connection: device_model."
assert expected_msg == result['msg']
def test_module_should_fail_when_management_ip_values_are_missing_with_local_connection(self, mocker):
mocker.patch.object(basic.AnsibleModule, '_socket_path', create=True, return_value=None)
module_params = dict(DEFAULT_MODULE_PARAMS)
del module_params['device_ip']
del module_params['device_netmask']
del module_params['device_gateway']
set_module_args(module_params)
with pytest.raises(AnsibleFailJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['failed']
expected_msg = "The following parameters are mandatory when the module is used with 'local' connection: " \
"device_gateway, device_ip, device_netmask."
assert expected_msg == result['msg']
def test_module_should_return_when_software_is_already_installed(self, config_resource_mock):
config_resource_mock.execute_operation.return_value = {
'softwareVersion': '6.3.0-11',
'platformModel': 'Cisco ASA5516-X Threat Defense'
}
module_params = dict(DEFAULT_MODULE_PARAMS)
module_params['image_version'] = '6.3.0-11'
set_module_args(module_params)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
result = ex.value.args[0]
assert not result['changed']
assert result['msg'] == 'FTD already has 6.3.0-11 version of software installed.'
def test_module_should_proceed_if_software_is_already_installed_and_force_param_given(self, config_resource_mock):
config_resource_mock.execute_operation.return_value = {
'softwareVersion': '6.3.0-11',
'platformModel': 'Cisco ASA5516-X Threat Defense'
}
module_params = dict(DEFAULT_MODULE_PARAMS)
module_params['image_version'] = '6.3.0-11'
module_params['force_install'] = True
set_module_args(module_params)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['changed']
assert result['msg'] == 'Successfully installed FTD image 6.3.0-11 on the firewall device.'
def test_module_should_install_ftd_image(self, config_resource_mock, ftd_factory_mock):
config_resource_mock.execute_operation.side_effect = [
{
'softwareVersion': '6.2.3-11',
'platformModel': 'Cisco ASA5516-X Threat Defense'
}
]
module_params = dict(DEFAULT_MODULE_PARAMS)
set_module_args(module_params)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
result = ex.value.args[0]
assert result['changed']
assert result['msg'] == 'Successfully installed FTD image 6.2.3-83 on the firewall device.'
ftd_factory_mock.create.assert_called_once_with('Cisco ASA5516-X Threat Defense', DEFAULT_MODULE_PARAMS)
ftd_factory_mock.create.return_value.install_ftd_image.assert_called_once_with(DEFAULT_MODULE_PARAMS)
def test_module_should_fill_management_ip_values_when_missing(self, config_resource_mock, ftd_factory_mock):
config_resource_mock.execute_operation.side_effect = [
{
'softwareVersion': '6.3.0-11',
'platformModel': 'Cisco ASA5516-X Threat Defense'
},
{
'items': [{
'ipv4Address': '192.168.1.1',
'ipv4NetMask': '255.255.255.0',
'ipv4Gateway': '192.168.0.1'
}]
}
]
module_params = dict(DEFAULT_MODULE_PARAMS)
expected_module_params = dict(module_params)
del module_params['device_ip']
del module_params['device_netmask']
del module_params['device_gateway']
expected_module_params.update(
device_ip='192.168.1.1',
device_netmask='255.255.255.0',
device_gateway='192.168.0.1'
)
set_module_args(module_params)
with pytest.raises(AnsibleExitJson):
self.module.main()
ftd_factory_mock.create.assert_called_once_with('Cisco ASA5516-X Threat Defense', expected_module_params)
ftd_factory_mock.create.return_value.install_ftd_image.assert_called_once_with(expected_module_params)
def test_module_should_fill_dns_server_when_missing(self, config_resource_mock, ftd_factory_mock):
config_resource_mock.execute_operation.side_effect = [
{
'softwareVersion': '6.3.0-11',
'platformModel': 'Cisco ASA5516-X Threat Defense'
},
{
'items': [{
'dnsServerGroup': {
'id': '123'
}
}]
},
{
'dnsServers': [{
'ipAddress': '8.8.9.9'
}]
}
]
module_params = dict(DEFAULT_MODULE_PARAMS)
expected_module_params = dict(module_params)
del module_params['dns_server']
expected_module_params['dns_server'] = '8.8.9.9'
set_module_args(module_params)
with pytest.raises(AnsibleExitJson):
self.module.main()
ftd_factory_mock.create.assert_called_once_with('Cisco ASA5516-X Threat Defense', expected_module_params)
ftd_factory_mock.create.return_value.install_ftd_image.assert_called_once_with(expected_module_params)
| gpl-3.0 | -5,666,699,047,314,154,000 | 40 | 118 | 0.642506 | false |
ehashman/oh-mainline | vendor/packages/docutils/test/test_parsers/test_rst/test_block_quotes.py | 19 | 6665 | #! /usr/bin/env python
# $Id: test_block_quotes.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for states.py.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['block_quotes'] = [
["""\
Line 1.
Line 2.
Indented.
""",
"""\
<document source="test data">
<paragraph>
Line 1.
Line 2.
<block_quote>
<paragraph>
Indented.
"""],
["""\
Line 1.
Line 2.
Indented 1.
Indented 2.
""",
"""\
<document source="test data">
<paragraph>
Line 1.
Line 2.
<block_quote>
<paragraph>
Indented 1.
<block_quote>
<paragraph>
Indented 2.
"""],
["""\
Line 1.
Line 2.
Unexpectedly indented.
""",
"""\
<document source="test data">
<paragraph>
Line 1.
Line 2.
<system_message level="3" line="3" source="test data" type="ERROR">
<paragraph>
Unexpected indentation.
<block_quote>
<paragraph>
Unexpectedly indented.
"""],
["""\
Line 1.
Line 2.
Indented.
no blank line
""",
"""\
<document source="test data">
<paragraph>
Line 1.
Line 2.
<block_quote>
<paragraph>
Indented.
<system_message level="2" line="5" source="test data" type="WARNING">
<paragraph>
Block quote ends without a blank line; unexpected unindent.
<paragraph>
no blank line
"""],
["""\
Here is a paragraph.
Indent 8 spaces.
Indent 4 spaces.
Is this correct? Should it generate a warning?
Yes, it is correct, no warning necessary.
""",
"""\
<document source="test data">
<paragraph>
Here is a paragraph.
<block_quote>
<block_quote>
<paragraph>
Indent 8 spaces.
<paragraph>
Indent 4 spaces.
<paragraph>
Is this correct? Should it generate a warning?
Yes, it is correct, no warning necessary.
"""],
["""\
Paragraph.
Block quote.
-- Attribution
Paragraph.
Block quote.
--Attribution
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution
"""],
[u"""\
Alternative: true em-dash.
Block quote.
\u2014 Attribution
Alternative: three hyphens.
Block quote.
--- Attribution
""",
"""\
<document source="test data">
<paragraph>
Alternative: true em-dash.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution
<paragraph>
Alternative: three hyphens.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution
"""],
["""\
Paragraph.
Block quote.
-- Attribution line one
and line two
Paragraph.
Block quote.
-- Attribution line one
and line two
Paragraph.
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution line one
and line two
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<attribution>
Attribution line one
and line two
<paragraph>
Paragraph.
"""],
["""\
Paragraph.
Block quote 1.
-- Attribution 1
Block quote 2.
--Attribution 2
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote 1.
<attribution>
Attribution 1
<block_quote>
<paragraph>
Block quote 2.
<attribution>
Attribution 2
"""],
["""\
Paragraph.
Block quote 1.
-- Attribution 1
Block quote 2.
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote 1.
<attribution>
Attribution 1
<block_quote>
<paragraph>
Block quote 2.
"""],
["""\
Unindented paragraph.
Block quote 1.
-- Attribution 1
Block quote 2.
..
Block quote 3.
""",
"""\
<document source="test data">
<paragraph>
Unindented paragraph.
<block_quote>
<paragraph>
Block quote 1.
<attribution>
Attribution 1
<block_quote>
<paragraph>
Block quote 2.
<comment xml:space="preserve">
<block_quote>
<paragraph>
Block quote 3.
"""],
["""\
Paragraph.
-- Not an attribution
Paragraph.
Block quote.
\-- Not an attribution
Paragraph.
Block quote.
-- Not an attribution line one
and line two
and line three
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
-- Not an attribution
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<paragraph>
-- Not an attribution
<paragraph>
Paragraph.
<block_quote>
<paragraph>
Block quote.
<definition_list>
<definition_list_item>
<term>
-- Not an attribution line one
<definition>
<definition_list>
<definition_list_item>
<term>
and line two
<definition>
<paragraph>
and line three
"""],
["""\
Paragraph.
-- Not a valid attribution
Block quote 1.
--Attribution 1
--Invalid attribution
Block quote 2.
--Attribution 2
""",
"""\
<document source="test data">
<paragraph>
Paragraph.
<block_quote>
<paragraph>
-- Not a valid attribution
<paragraph>
Block quote 1.
<attribution>
Attribution 1
<block_quote>
<paragraph>
--Invalid attribution
<paragraph>
Block quote 2.
<attribution>
Attribution 2
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 | -7,985,094,825,578,062,000 | 16.356771 | 73 | 0.517329 | false |
google/TaglessCRM | src/plugins/pipeline_plugins/hooks/gcs_hook.py | 1 | 10109 | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom GCS Hook for generating blobs from GCS."""
import enum
import io
import json
from typing import Any, Dict, Generator, List, Optional, Tuple
from airflow.contrib.hooks import gcs_hook
from google.api_core.exceptions import NotFound
from googleapiclient import errors as googleapiclient_errors
from plugins.pipeline_plugins.hooks import input_hook_interface
from plugins.pipeline_plugins.utils import blob
from plugins.pipeline_plugins.utils import errors
_PLATFORM = 'GCS'
_START_POSITION_IN_BLOB = 0
# The default size in bytes (100MB) of each download chunk.
# The value is from googleapiclient http package.
_DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024
class BlobContentTypes(enum.Enum):
JSON = enum.auto()
CSV = enum.auto()
class GoogleCloudStorageHook(gcs_hook.GoogleCloudStorageHook,
input_hook_interface.InputHookInterface):
"""Extends the Google Cloud Storage hook.
Used for chunked download of blobs, and blob generation.
The Blobs must satisfy the following conditions:
- Content is formatted as newline-delimited events.
- Content is formatted as UTF-8.
- Content is validly formatted as one of the types in BlobContentTypes.
- The first line in a CSV blob is the fields labels
Attributes:
bucket: Unique name of the bucket holding the target blob.
prefix: The path to a location within the bucket.
content_type: Blob's content type described by BlobContentTypes.
"""
def __init__(self, gcs_bucket: str,
gcs_content_type: str,
gcs_prefix: str,
**kwargs) -> None:
"""Initiates GoogleCloudStorageHook.
Args:
gcs_bucket: Unique name of the bucket holding the target blob.
gcs_content_type: Blob's content type described by BlobContentTypes.
gcs_prefix: The path to a location within the bucket.
**kwargs: Other optional arguments.
"""
self._verify_content_type(gcs_content_type)
self.bucket = gcs_bucket
self.content_type = gcs_content_type
self.prefix = gcs_prefix
super().__init__()
def get_location(self):
"""Retrieves the full url of the bucket from the GCS data source.
Returns:
The full url of the bucket
"""
return f'gs://{self.bucket}/{self.prefix}'
def _verify_content_type(self, content_type: str) -> None:
"""Validates content_type matches one of the supported formats.
The content type must be one of the formats listed in BlobContentTypes.
Args:
content_type: GCS content type to verify.
Raises:
DataInConnectorValueError: If the content type format is invalid.
"""
if content_type not in BlobContentTypes.__members__:
raise errors.DataInConnectorValueError(
'Invalid GCS blob content type. The supported types are: %s.' %
', '.join([name for name, item in BlobContentTypes.__members__.items(
)]),
errors.ErrorNameIDMap.GCS_HOOK_ERROR_INVALID_BLOB_CONTENT_TYPE)
def _gcs_blob_chunk_generator(self, blob_name: str
) -> Generator[bytes, None, None]:
"""Downloads and generates chunks from given blob.
The base GoogleCloudStorageHook only allows downloading an entire file.
To enable handling large files this class provides a chunk-wise download of
bytes within the blob.
Args:
blob_name: Unique location within the bucket for the target blob.
Yields:
Chunks of the given blob, formatted as bytes.
Raises:
DataInConnectorError: When download failed.
"""
outio = io.BytesIO()
try:
bucket = self.get_conn().bucket(self.bucket)
file_blob = bucket.get_blob(blob_name)
except NotFound as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to download the blob.',
error_num=errors.ErrorNameIDMap.GCS_HOOK_ERROR_MISSING_BLOB)
if file_blob is None:
raise errors.DataInConnectorError(
msg='Failed to download the blob.',
error_num=errors.ErrorNameIDMap.GCS_HOOK_ERROR_MISSING_BLOB)
chunks = int(file_blob.size / _DEFAULT_CHUNK_SIZE) + 1
for i in range(0, chunks):
outio.truncate(0)
outio.seek(0)
start = i * (_DEFAULT_CHUNK_SIZE + 1)
end = i * (_DEFAULT_CHUNK_SIZE + 1) + _DEFAULT_CHUNK_SIZE
if end > file_blob.size:
end = file_blob.size
try:
file_blob.download_to_file(outio, start=start, end=end)
except NotFound as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to download the blob.',
error_num=errors.ErrorNameIDMap.GCS_HOOK_ERROR_MISSING_BLOB)
self.log.debug('Blob loading: {}%'.format(int(i / chunks * 100)))
yield outio.getvalue()
def _parse_events_as_json(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as JSON.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as JSON.
Raises:
DataInConnectorBlobParseError: When parsing the blob was unsuccessful.
"""
try:
return [json.loads(event.decode('utf-8')) for event in parsable_events]
except (json.JSONDecodeError, UnicodeDecodeError) as error:
raise errors.DataInConnectorBlobParseError(
error=error, msg='Failed to parse the blob as JSON.',
error_num=errors.ErrorNameIDMap.GCS_HOOK_ERROR_BAD_JSON_FORMAT_BLOB)
def _parse_events_as_csv(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as CSV.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as CSV.
Raises:
DataInConnectorBlobParseError: When parsing the blob was unsuccessful.
"""
try:
fields = parsable_events[0].decode('utf-8').split(',')
events = [dict(zip(fields, event.decode('utf-8').split(',')))
for event in parsable_events[1:]]
except (ValueError, UnicodeDecodeError) as error:
raise errors.DataInConnectorBlobParseError(
error=error, msg='Failed to parse the blob as CSV',
error_num=errors.ErrorNameIDMap.GCS_HOOK_ERROR_BAD_CSV_FORMAT_BLOB)
if not all(len(event) == len(fields) for event in events):
raise errors.DataInConnectorBlobParseError(
msg='Failed to parse CSV, not all lines have same length.',
error_num=errors.ErrorNameIDMap
.GCS_HOOK_ERROR_DIFFERENT_ROW_LENGTH_IN_CSV_BLOB)
return events
def _parse_events_by_content_type(self, parsable_events: List[bytes]
) -> List[Dict[Any, Any]]:
"""Parses a list of events as content_type.
Args:
parsable_events: Bytes events to parse.
Returns:
A list of events formatted as content_type.
"""
if not parsable_events:
return []
if self.content_type == BlobContentTypes.CSV.name:
return self._parse_events_as_csv(parsable_events)
else:
return self._parse_events_as_json(parsable_events)
def get_blob_events(self, blob_name: str) -> List[Dict[Any, Any]]:
"""Gets blob's contents.
Args:
blob_name: The location and file name of the blob in the bucket.
Returns:
A list of events formatted as content_type.
"""
events: List[bytes] = []
buffer: bytes = b''
blob_chunks_generator = self._gcs_blob_chunk_generator(blob_name=blob_name)
for chunk in blob_chunks_generator:
buffer += chunk
if buffer.startswith(b'\n'):
buffer = buffer[1:]
events.extend(buffer.splitlines())
# Last event might be incomplete. In this case we save the last line back
# into the buffer
buffer = events.pop() if not buffer.endswith(b'\n') and events else b''
if buffer:
events.append(buffer)
return self._parse_events_by_content_type(events)
def events_blobs_generator(
self,
processed_blobs_generator: Optional[Generator[Tuple[str, str], None,
None]] = None
) -> Generator[blob.Blob, None, None]:
"""Generates all blobs from the bucket's prefix location.
Args:
processed_blobs_generator: A generator that provides the processed blob
information that helps skip read ranges.
Yields:
A generator that generates Blob objects from blob contents within a
prefix location in the bucket.
Raises:
DataInConnectorError: When listing blob in bucket returns a HttpError.
"""
try:
blob_names = self.list(bucket=self.bucket, prefix=self.prefix)
except googleapiclient_errors.HttpError as error:
raise errors.DataInConnectorError(
error=error, msg='Failed to get list of blobs from bucket.',
error_num=errors.ErrorNameIDMap.RETRIABLE_GCS_HOOK_ERROR_HTTP_ERROR)
if processed_blobs_generator is not None:
for processed_file, _ in processed_blobs_generator:
if processed_file in blob_names:
blob_names.remove(processed_file)
for blob_name in blob_names:
if not blob_name.endswith('/'):
try:
events = self.get_blob_events(blob_name)
yield blob.Blob(events=events, location=self.get_location(),
position=blob_name)
except (errors.DataInConnectorBlobParseError,
errors.DataInConnectorError) as error:
continue
| apache-2.0 | 4,901,230,522,230,867,000 | 33.738832 | 79 | 0.664556 | false |
Yellowen/Owrang | setup/doctype/authorization_rule/authorization_rule.py | 2 | 4160 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, cstr, flt, has_common
from webnotes.model import db_exists
from webnotes.model.bean import copy_doclist
from webnotes import msgprint
sql = webnotes.conn.sql
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def check_duplicate_entry(self):
exists = sql("""select name, docstatus from `tabAuthorization Rule`
where transaction = %s and based_on = %s and system_user = %s
and system_role = %s and approving_user = %s and approving_role = %s
and to_emp =%s and to_designation=%s and name != %s""",
(self.doc.transaction, self.doc.based_on, cstr(self.doc.system_user),
cstr(self.doc.system_role), cstr(self.doc.approving_user),
cstr(self.doc.approving_role), cstr(self.doc.to_emp),
cstr(self.doc.to_designation), self.doc.name))
auth_exists = exists and exists[0][0] or ''
if auth_exists:
if cint(exists[0][1]) == 2:
msgprint("""Duplicate Entry. Please untrash Authorization Rule : %s \
from Recycle Bin""" % (auth_exists), raise_exception=1)
else:
msgprint("Duplicate Entry. Please check Authorization Rule : %s" %
(auth_exists), raise_exception=1)
def validate_master_name(self):
if self.doc.based_on == 'Customerwise Discount' and \
not sql("select name from tabCustomer where name = '%s' and docstatus != 2" % \
(self.doc.master_name)):
msgprint("Please select valid Customer Name for Customerwise Discount",
raise_exception=1)
elif self.doc.based_on == 'Itemwise Discount' and \
not sql("select name from tabItem where name = '%s' and docstatus != 2" % \
(self.doc.master_name)):
msgprint("Please select valid Item Name for Itemwise Discount", raise_exception=1)
elif (self.doc.based_on == 'Grand Total' or \
self.doc.based_on == 'Average Discount') and self.doc.master_name:
msgprint("Please remove Customer/Item Name for %s." %
self.doc.based_on, raise_exception=1)
def validate_rule(self):
if self.doc.transaction != 'Appraisal':
if not self.doc.approving_role and not self.doc.approving_user:
msgprint("Please enter Approving Role or Approving User", raise_exception=1)
elif self.doc.system_user and self.doc.system_user == self.doc.approving_user:
msgprint("Approving User cannot be same as user the rule is Applicable To (User)",
raise_exception=1)
elif self.doc.system_role and self.doc.system_role == self.doc.approving_role:
msgprint("Approving Role cannot be same as user the rule is \
Applicable To (Role).", raise_exception=1)
elif self.doc.system_user and self.doc.approving_role and \
has_common([self.doc.approving_role], [x[0] for x in \
sql("select role from `tabUserRole` where parent = '%s'" % \
(self.doc.system_user))]):
msgprint("System User : %s is assigned role : %s. So rule does not make sense" %
(self.doc.system_user,self.doc.approving_role), raise_exception=1)
elif self.doc.transaction in ['Purchase Order', 'Purchase Receipt', \
'Purchase Invoice', 'Stock Entry'] and self.doc.based_on \
in ['Average Discount', 'Customerwise Discount', 'Itemwise Discount']:
msgprint("You cannot set authorization on basis of Discount for %s" %
self.doc.transaction, raise_exception=1)
elif self.doc.based_on == 'Average Discount' and flt(self.doc.value) > 100.00:
msgprint("Discount cannot given for more than 100%", raise_exception=1)
elif self.doc.based_on == 'Customerwise Discount' and not self.doc.master_name:
msgprint("Please enter Customer Name for 'Customerwise Discount'",
raise_exception=1)
else:
if self.doc.transaction == 'Appraisal' and self.doc.based_on != 'Not Applicable':
msgprint("Based on should be 'Not Applicable' while setting authorization rule\
for 'Appraisal'", raise_exception=1)
def validate(self):
self.check_duplicate_entry()
self.validate_rule()
self.validate_master_name()
if not self.doc.value: self.doc.value = 0.0 | agpl-3.0 | 5,653,616,322,733,408,000 | 43.741935 | 87 | 0.695433 | false |
harshilasu/GraphicMelon | y/google-cloud-sdk/platform/gsutil/third_party/boto/tests/test.py | 22 | 2484 | #!/usr/bin/env python
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
import sys
import unittest
from nose.core import run
import argparse
def main():
description = ("Runs boto unit and/or integration tests. "
"Arguments will be passed on to nosetests. "
"See nosetests --help for more information.")
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t', '--service-tests', action="append", default=[],
help="Run tests for a given service. This will "
"run any test tagged with the specified value, "
"e.g -t s3 -t ec2")
known_args, remaining_args = parser.parse_known_args()
attribute_args = []
for service_attribute in known_args.service_tests:
attribute_args.extend(['-a', '!notdefault,' +service_attribute])
if not attribute_args:
# If the user did not specify any filtering criteria, we at least
# will filter out any test tagged 'notdefault'.
attribute_args = ['-a', '!notdefault']
all_args = [__file__] + attribute_args + remaining_args
print "nose command:", ' '.join(all_args)
if run(argv=all_args):
# run will return True is all the tests pass. We want
# this to equal a 0 rc
return 0
else:
return 1
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | -6,531,218,534,587,653,000 | 41.101695 | 77 | 0.677536 | false |
outlierbio/ob-pipelines | ob_pipelines/entities/sample.py | 1 | 1438 | from datetime import datetime
from luigi import Parameter
from ob_pipelines.config import cfg, settings
class Sample(object):
sample_id = Parameter()
def __init__(self, *args, **kwargs):
if 'id' in kwargs:
self.key = kwargs['id']
if 'fields' in kwargs:
self._sample = kwargs['fields']
if 'createdTime' in kwargs:
self._created_at = kwargs['createdTime']
else:
self._created_at = datetime.utcnow()
@property
def sample(self):
# TODO: remove fallback init
if not hasattr(self, '_sample'):
from ob_pipelines.entities.persistence import get_sample_by_name
self._sample = get_sample_by_name(self.sample_id)
return self._sample
@property
def sample_folder(self) -> str:
return '{expt}/{sample}'.format(
bucket=settings.get_target_bucket(),
expt=self.experiment.name,
sample=self.sample_id)
@property
def experiment(self):
# TODO: remove fallback init
if not hasattr(self, '_experiment'):
expt_key = self.sample.sample['Experiment'][0]
from ob_pipelines.entities.persistence import get_experiment_by_key
self._experiment = get_experiment_by_key(expt_key)
return self._experiment
@experiment.setter
def experiment(self, value):
self._experiment = value
| apache-2.0 | 1,228,496,761,335,606,000 | 27.196078 | 79 | 0.60153 | false |
Sorsly/subtle | google-cloud-sdk/platform/gsutil/third_party/oauth2client/oauth2client/contrib/appengine.py | 21 | 34440 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use OAuth 2.0 on Google App Engine.
"""
import cgi
import json
import logging
import os
import pickle
import threading
import httplib2
import webapp2 as webapp
from google.appengine.api import app_identity
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext.webapp.util import login_required
from oauth2client import GOOGLE_AUTH_URI
from oauth2client import GOOGLE_REVOKE_URI
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client import clientsecrets
from oauth2client import util
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
from oauth2client.client import Credentials
from oauth2client.client import Flow
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.client import Storage
from oauth2client.contrib import xsrfutil
# This is a temporary fix for a Google internal issue.
try:
from oauth2client.contrib import _appengine_ndb
except ImportError: # pragma: NO COVER
_appengine_ndb = None
__author__ = '[email protected] (Joe Gregorio)'
logger = logging.getLogger(__name__)
OAUTH2CLIENT_NAMESPACE = 'oauth2client#ns'
XSRF_MEMCACHE_ID = 'xsrf_secret_key'
if _appengine_ndb is None: # pragma: NO COVER
CredentialsNDBModel = None
CredentialsNDBProperty = None
FlowNDBProperty = None
_NDB_KEY = None
_NDB_MODEL = None
SiteXsrfSecretKeyNDB = None
else:
CredentialsNDBModel = _appengine_ndb.CredentialsNDBModel
CredentialsNDBProperty = _appengine_ndb.CredentialsNDBProperty
FlowNDBProperty = _appengine_ndb.FlowNDBProperty
_NDB_KEY = _appengine_ndb.NDB_KEY
_NDB_MODEL = _appengine_ndb.NDB_MODEL
SiteXsrfSecretKeyNDB = _appengine_ndb.SiteXsrfSecretKeyNDB
def _safe_html(s):
"""Escape text to make it safe to display.
Args:
s: string, The text to escape.
Returns:
The escaped text as a string.
"""
return cgi.escape(s, quote=1).replace("'", ''')
class InvalidClientSecretsError(Exception):
"""The client_secrets.json file is malformed or missing required fields."""
class InvalidXsrfTokenError(Exception):
"""The XSRF token is invalid or expired."""
class SiteXsrfSecretKey(db.Model):
"""Storage for the sites XSRF secret key.
There will only be one instance stored of this model, the one used for the
site.
"""
secret = db.StringProperty()
def _generate_new_xsrf_secret_key():
"""Returns a random XSRF secret key."""
return os.urandom(16).encode("hex")
def xsrf_secret_key():
"""Return the secret key for use for XSRF protection.
If the Site entity does not have a secret key, this method will also create
one and persist it.
Returns:
The secret key.
"""
secret = memcache.get(XSRF_MEMCACHE_ID, namespace=OAUTH2CLIENT_NAMESPACE)
if not secret:
# Load the one and only instance of SiteXsrfSecretKey.
model = SiteXsrfSecretKey.get_or_insert(key_name='site')
if not model.secret:
model.secret = _generate_new_xsrf_secret_key()
model.put()
secret = model.secret
memcache.add(XSRF_MEMCACHE_ID, secret,
namespace=OAUTH2CLIENT_NAMESPACE)
return str(secret)
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for App Engine Assertion Grants
This object will allow an App Engine application to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be
used for the purpose of accessing data stored under an account assigned to
the App Engine application itself.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or iterable of strings, scope(s) of the credentials
being requested.
**kwargs: optional keyword args, including:
service_account_id: service account id of the application. If None
or unspecified, the default service account for
the app is used.
"""
self.scope = util.scopes_to_string(scope)
self._kwargs = kwargs
self.service_account_id = kwargs.get('service_account_id', None)
self._service_account_email = None
# Assertion type is no longer used, but still in the
# parent class signature.
super(AppAssertionCredentials, self).__init__(None)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Since the underlying App Engine app_identity implementation does its
own caching we can skip all the storage hoops and just to a refresh
using the API.
Args:
http_request: callable, a callable that matches the method
signature of httplib2.Http.request, used to make the
refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
try:
scopes = self.scope.split()
(token, _) = app_identity.get_access_token(
scopes, service_account_id=self.service_account_id)
except app_identity.Error as e:
raise AccessTokenRefreshError(str(e))
self.access_token = token
@property
def serialization_data(self):
raise NotImplementedError('Cannot serialize credentials '
'for Google App Engine.')
def create_scoped_required(self):
return not self.scope
def create_scoped(self, scopes):
return AppAssertionCredentials(scopes, **self._kwargs)
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
Implements abstract method
:meth:`oauth2client.client.AssertionCredentials.sign_blob`.
Args:
blob: bytes, Message to be signed.
Returns:
tuple, A pair of the private key ID used to sign the blob and
the signed contents.
"""
return app_identity.sign_blob(blob)
@property
def service_account_email(self):
"""Get the email for the current service account.
Returns:
string, The email associated with the Google App Engine
service account.
"""
if self._service_account_email is None:
self._service_account_email = (
app_identity.get_service_account_name())
return self._service_account_email
class FlowProperty(db.Property):
"""App Engine datastore Property for Flow.
Utility property that allows easy storage and retrieval of an
oauth2client.Flow
"""
# Tell what the user type is.
data_type = Flow
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowProperty, self).get_value_for_datastore(
model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, Flow):
raise db.BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowProperty, self).validate(value)
def empty(self, value):
return not value
class CredentialsProperty(db.Property):
"""App Engine datastore Property for Credentials.
Utility property that allows easy storage and retrieval of
oauth2client.Credentials
"""
# Tell what the user type is.
data_type = Credentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
logger.info("get: Got type " + str(type(model_instance)))
cred = super(CredentialsProperty, self).get_value_for_datastore(
model_instance)
if cred is None:
cred = ''
else:
cred = cred.to_json()
return db.Blob(cred)
# For reading from datastore.
def make_value_from_datastore(self, value):
logger.info("make: Got type " + str(type(value)))
if value is None:
return None
if len(value) == 0:
return None
try:
credentials = Credentials.new_from_json(value)
except ValueError:
credentials = None
return credentials
def validate(self, value):
value = super(CredentialsProperty, self).validate(value)
logger.info("validate: Got type " + str(type(value)))
if value is not None and not isinstance(value, Credentials):
raise db.BadValueError('Property %s must be convertible '
'to a Credentials instance (%s)' %
(self.name, value))
return value
class StorageByKeyName(Storage):
"""Store and retrieve a credential to and from the App Engine datastore.
This Storage helper presumes the Credentials have been stored as a
CredentialsProperty or CredentialsNDBProperty on a datastore model class,
and that entities are stored by key_name.
"""
@util.positional(4)
def __init__(self, model, key_name, property_name, cache=None, user=None):
"""Constructor for Storage.
Args:
model: db.Model or ndb.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a
CredentialsProperty or CredentialsNDBProperty.
cache: memcache, a write-through cache to put in front of the
datastore. If the model you are using is an NDB model, using
a cache will be redundant since the model uses an instance
cache and memcache for you.
user: users.User object, optional. Can be used to grab user ID as a
key_name if no key name is specified.
"""
super(StorageByKeyName, self).__init__()
if key_name is None:
if user is None:
raise ValueError('StorageByKeyName called with no '
'key name or user.')
key_name = user.user_id()
self._model = model
self._key_name = key_name
self._property_name = property_name
self._cache = cache
def _is_ndb(self):
"""Determine whether the model of the instance is an NDB model.
Returns:
Boolean indicating whether or not the model is an NDB or DB model.
"""
# issubclass will fail if one of the arguments is not a class, only
# need worry about new-style classes since ndb and db models are
# new-style
if isinstance(self._model, type):
if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL):
return True
elif issubclass(self._model, db.Model):
return False
raise TypeError('Model class not an NDB or DB model: %s.' %
(self._model,))
def _get_entity(self):
"""Retrieve entity from datastore.
Uses a different model method for db or ndb models.
Returns:
Instance of the model corresponding to the current storage object
and stored using the key name of the storage object.
"""
if self._is_ndb():
return self._model.get_by_id(self._key_name)
else:
return self._model.get_by_key_name(self._key_name)
def _delete_entity(self):
"""Delete entity from datastore.
Attempts to delete using the key_name stored on the object, whether or
not the given key is in the datastore.
"""
if self._is_ndb():
_NDB_KEY(self._model, self._key_name).delete()
else:
entity_key = db.Key.from_path(self._model.kind(), self._key_name)
db.delete(entity_key)
@db.non_transactional(allow_existing=True)
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credentials = None
if self._cache:
json = self._cache.get(self._key_name)
if json:
credentials = Credentials.new_from_json(json)
if credentials is None:
entity = self._get_entity()
if entity is not None:
credentials = getattr(entity, self._property_name)
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
if credentials and hasattr(credentials, 'set_store'):
credentials.set_store(self)
return credentials
@db.non_transactional(allow_existing=True)
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self._model.get_or_insert(self._key_name)
setattr(entity, self._property_name, credentials)
entity.put()
if self._cache:
self._cache.set(self._key_name, credentials.to_json())
@db.non_transactional(allow_existing=True)
def locked_delete(self):
"""Delete Credential from datastore."""
if self._cache:
self._cache.delete(self._key_name)
self._delete_entity()
class CredentialsModel(db.Model):
"""Storage for OAuth 2.0 Credentials
Storage of the model is keyed by the user.user_id().
"""
credentials = CredentialsProperty()
def _build_state_value(request_handler, user):
"""Composes the value for the 'state' parameter.
Packs the current request URI and an XSRF token into an opaque string that
can be passed to the authentication server via the 'state' parameter.
Args:
request_handler: webapp.RequestHandler, The request.
user: google.appengine.api.users.User, The current user.
Returns:
The state value as a string.
"""
uri = request_handler.request.url
token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),
action_id=str(uri))
return uri + ':' + token
def _parse_state_value(state, user):
"""Parse the value of the 'state' parameter.
Parses the value and validates the XSRF token in the state parameter.
Args:
state: string, The value of the state parameter.
user: google.appengine.api.users.User, The current user.
Raises:
InvalidXsrfTokenError: if the XSRF token is invalid.
Returns:
The redirect URI.
"""
uri, token = state.rsplit(':', 1)
if not xsrfutil.validate_token(xsrf_secret_key(), token, user.user_id(),
action_id=uri):
raise InvalidXsrfTokenError()
return uri
class OAuth2Decorator(object):
"""Utility for making OAuth 2.0 easier.
Instantiate and then use with oauth_required or oauth_aware
as decorators on webapp.RequestHandler methods.
::
decorator = OAuth2Decorator(
client_id='837...ent.com',
client_secret='Qh...wwI',
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be
# used in API calls
"""
def set_credentials(self, credentials):
self._tls.credentials = credentials
def get_credentials(self):
"""A thread local Credentials object.
Returns:
A client.Credentials object, or None if credentials hasn't been set
in this thread yet, which may happen when calling has_credentials
inside oauth_aware.
"""
return getattr(self._tls, 'credentials', None)
credentials = property(get_credentials, set_credentials)
def set_flow(self, flow):
self._tls.flow = flow
def get_flow(self):
"""A thread local Flow object.
Returns:
A credentials.Flow object, or None if the flow hasn't been set in
this thread yet, which happens in _create_flow() since Flows are
created lazily.
"""
return getattr(self._tls, 'flow', None)
flow = property(get_flow, set_flow)
@util.positional(4)
def __init__(self, client_id, client_secret, scope,
auth_uri=GOOGLE_AUTH_URI,
token_uri=GOOGLE_TOKEN_URI,
revoke_uri=GOOGLE_REVOKE_URI,
user_agent=None,
message=None,
callback_path='/oauth2callback',
token_response_param=None,
_storage_class=StorageByKeyName,
_credentials_class=CredentialsModel,
_credentials_property_name='credentials',
**kwargs):
"""Constructor for OAuth2Decorator
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string or iterable of strings, scope(s) of the credentials
being requested.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider
can be used.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
user_agent: string, User agent of your application, default to
None.
message: Message to display if there are problems with the
OAuth 2.0 configuration. The message may contain HTML and
will be presented on the web interface for any method that
uses the decorator.
callback_path: string, The absolute path to use as the callback
URI. Note that this must match up with the URI given
when registering the application in the APIs
Console.
token_response_param: string. If provided, the full JSON response
to the access token request will be encoded
and included in this query parameter in the
callback URI. This is useful with providers
(e.g. wordpress.com) that include extra
fields that the client may want.
_storage_class: "Protected" keyword argument not typically provided
to this constructor. A storage class to aid in
storing a Credentials object for a user in the
datastore. Defaults to StorageByKeyName.
_credentials_class: "Protected" keyword argument not typically
provided to this constructor. A db or ndb Model
class to hold credentials. Defaults to
CredentialsModel.
_credentials_property_name: "Protected" keyword argument not
typically provided to this constructor.
A string indicating the name of the
field on the _credentials_class where a
Credentials object will be stored.
Defaults to 'credentials'.
**kwargs: dict, Keyword arguments are passed along as kwargs to
the OAuth2WebServerFlow constructor.
"""
self._tls = threading.local()
self.flow = None
self.credentials = None
self._client_id = client_id
self._client_secret = client_secret
self._scope = util.scopes_to_string(scope)
self._auth_uri = auth_uri
self._token_uri = token_uri
self._revoke_uri = revoke_uri
self._user_agent = user_agent
self._kwargs = kwargs
self._message = message
self._in_error = False
self._callback_path = callback_path
self._token_response_param = token_response_param
self._storage_class = _storage_class
self._credentials_class = _credentials_class
self._credentials_property_name = _credentials_property_name
def _display_error_message(self, request_handler):
request_handler.response.out.write('<html><body>')
request_handler.response.out.write(_safe_html(self._message))
request_handler.response.out.write('</body></html>')
def oauth_required(self, method):
"""Decorator that starts the OAuth 2.0 dance.
Starts the OAuth dance for the logged in user if they haven't already
granted access for this application.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def check_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a
# POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
# Store the request URI in 'state' so we can use it later
self.flow.params['state'] = _build_state_value(
request_handler, user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
if not self.has_credentials():
return request_handler.redirect(self.authorize_url())
try:
resp = method(request_handler, *args, **kwargs)
except AccessTokenRefreshError:
return request_handler.redirect(self.authorize_url())
finally:
self.credentials = None
return resp
return check_oauth
def _create_flow(self, request_handler):
"""Create the Flow object.
The Flow is calculated lazily since we don't know where this app is
running until it receives a request, at which point redirect_uri can be
calculated and then the Flow object can be constructed.
Args:
request_handler: webapp.RequestHandler, the request handler.
"""
if self.flow is None:
redirect_uri = request_handler.request.relative_url(
self._callback_path) # Usually /oauth2callback
self.flow = OAuth2WebServerFlow(
self._client_id, self._client_secret, self._scope,
redirect_uri=redirect_uri, user_agent=self._user_agent,
auth_uri=self._auth_uri, token_uri=self._token_uri,
revoke_uri=self._revoke_uri, **self._kwargs)
def oauth_aware(self, method):
"""Decorator that sets up for OAuth 2.0 dance, but doesn't do it.
Does all the setup for the OAuth dance, but doesn't initiate it.
This decorator is useful if you want to create a page that knows
whether or not the user has granted access to this application.
From within a method decorated with @oauth_aware the has_credentials()
and authorize_url() methods can be called.
Args:
method: callable, to be decorated method of a webapp.RequestHandler
instance.
"""
def setup_oauth(request_handler, *args, **kwargs):
if self._in_error:
self._display_error_message(request_handler)
return
user = users.get_current_user()
# Don't use @login_decorator as this could be used in a
# POST request.
if not user:
request_handler.redirect(users.create_login_url(
request_handler.request.uri))
return
self._create_flow(request_handler)
self.flow.params['state'] = _build_state_value(request_handler,
user)
self.credentials = self._storage_class(
self._credentials_class, None,
self._credentials_property_name, user=user).get()
try:
resp = method(request_handler, *args, **kwargs)
finally:
self.credentials = None
return resp
return setup_oauth
def has_credentials(self):
"""True if for the logged in user there are valid access Credentials.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
return self.credentials is not None and not self.credentials.invalid
def authorize_url(self):
"""Returns the URL to start the OAuth dance.
Must only be called from with a webapp.RequestHandler subclassed method
that had been decorated with either @oauth_required or @oauth_aware.
"""
url = self.flow.step1_get_authorize_url()
return str(url)
def http(self, *args, **kwargs):
"""Returns an authorized http instance.
Must only be called from within an @oauth_required decorated method, or
from within an @oauth_aware decorated method where has_credentials()
returns True.
Args:
*args: Positional arguments passed to httplib2.Http constructor.
**kwargs: Positional arguments passed to httplib2.Http constructor.
"""
return self.credentials.authorize(httplib2.Http(*args, **kwargs))
@property
def callback_path(self):
"""The absolute path where the callback will occur.
Note this is the absolute path, not the absolute URI, that will be
calculated by the decorator at runtime. See callback_handler() for how
this should be used.
Returns:
The callback path as a string.
"""
return self._callback_path
def callback_handler(self):
"""RequestHandler for the OAuth 2.0 redirect callback.
Usage::
app = webapp.WSGIApplication([
('/index', MyIndexHandler),
...,
(decorator.callback_path, decorator.callback_handler())
])
Returns:
A webapp.RequestHandler that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
decorator = self
class OAuth2Handler(webapp.RequestHandler):
"""Handler for the redirect_uri of the OAuth 2.0 dance."""
@login_required
def get(self):
error = self.request.get('error')
if error:
errormsg = self.request.get('error_description', error)
self.response.out.write(
'The authorization request failed: %s' %
_safe_html(errormsg))
else:
user = users.get_current_user()
decorator._create_flow(self)
credentials = decorator.flow.step2_exchange(
self.request.params)
decorator._storage_class(
decorator._credentials_class, None,
decorator._credentials_property_name,
user=user).put(credentials)
redirect_uri = _parse_state_value(
str(self.request.get('state')), user)
if (decorator._token_response_param and
credentials.token_response):
resp_json = json.dumps(credentials.token_response)
redirect_uri = util._add_query_parameter(
redirect_uri, decorator._token_response_param,
resp_json)
self.redirect(redirect_uri)
return OAuth2Handler
def callback_application(self):
"""WSGI application for handling the OAuth 2.0 redirect callback.
If you need finer grained control use `callback_handler` which returns
just the webapp.RequestHandler.
Returns:
A webapp.WSGIApplication that handles the redirect back from the
server during the OAuth 2.0 dance.
"""
return webapp.WSGIApplication([
(self.callback_path, self.callback_handler())
])
class OAuth2DecoratorFromClientSecrets(OAuth2Decorator):
"""An OAuth2Decorator that builds from a clientsecrets file.
Uses a clientsecrets file as the source for all the information when
constructing an OAuth2Decorator.
::
decorator = OAuth2DecoratorFromClientSecrets(
os.path.join(os.path.dirname(__file__), 'client_secrets.json')
scope='https://www.googleapis.com/auth/plus')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
http = decorator.http()
# http is authorized with the user's Credentials and can be
# used in API calls
"""
@util.positional(3)
def __init__(self, filename, scope, message=None, cache=None, **kwargs):
"""Constructor
Args:
filename: string, File name of client secrets.
scope: string or iterable of strings, scope(s) of the credentials
being requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may
contain HTML and will be presented on the web interface
for any method that uses the decorator.
cache: An optional cache service client that implements get() and
set()
methods. See clientsecrets.loadfile() for details.
**kwargs: dict, Keyword arguments are passed along as kwargs to
the OAuth2WebServerFlow constructor.
"""
client_type, client_info = clientsecrets.loadfile(filename,
cache=cache)
if client_type not in (clientsecrets.TYPE_WEB,
clientsecrets.TYPE_INSTALLED):
raise InvalidClientSecretsError(
"OAuth2Decorator doesn't support this OAuth 2.0 flow.")
constructor_kwargs = dict(kwargs)
constructor_kwargs.update({
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
'message': message,
})
revoke_uri = client_info.get('revoke_uri')
if revoke_uri is not None:
constructor_kwargs['revoke_uri'] = revoke_uri
super(OAuth2DecoratorFromClientSecrets, self).__init__(
client_info['client_id'], client_info['client_secret'],
scope, **constructor_kwargs)
if message is not None:
self._message = message
else:
self._message = 'Please configure your application for OAuth 2.0.'
@util.positional(2)
def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may
contain HTML and will be presented on the web interface for
any method that uses the decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache)
| mit | 8,795,563,558,984,678,000 | 36.112069 | 79 | 0.601539 | false |
facaiy/spark | python/pyspark/sql/dataframe.py | 4 | 90501 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
if sys.version >= '3':
basestring = unicode = str
long = int
from functools import reduce
else:
from itertools import imap as map
import warnings
from pyspark import copy_func, since, _NoValue
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.serializers import ArrowCollectSerializer, BatchedSerializer, PickleSerializer, \
UTF8Deserializer
from pyspark.storagelevel import StorageLevel
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.sql.types import _parse_datatype_json_string
from pyspark.sql.column import Column, _to_seq, _to_list, _to_java_column
from pyspark.sql.readwriter import DataFrameWriter
from pyspark.sql.streaming import DataStreamWriter
from pyspark.sql.types import IntegralType
from pyspark.sql.types import *
from pyspark.util import _exception_message
__all__ = ["DataFrame", "DataFrameNaFunctions", "DataFrameStatFunctions"]
class DataFrame(object):
"""A distributed collection of data grouped into named columns.
A :class:`DataFrame` is equivalent to a relational table in Spark SQL,
and can be created using various functions in :class:`SparkSession`::
people = spark.read.parquet("...")
Once created, it can be manipulated using the various domain-specific-language
(DSL) functions defined in: :class:`DataFrame`, :class:`Column`.
To select a column from the data frame, use the apply method::
ageCol = people.age
A more concrete example::
# To create DataFrame using SparkSession
people = spark.read.parquet("...")
department = spark.read.parquet("...")
people.filter(people.age > 30).join(department, people.deptId == department.id) \\
.groupBy(department.name, "gender").agg({"salary": "avg", "age": "max"})
.. versionadded:: 1.3
"""
def __init__(self, jdf, sql_ctx):
self._jdf = jdf
self.sql_ctx = sql_ctx
self._sc = sql_ctx and sql_ctx._sc
self.is_cached = False
self._schema = None # initialized lazily
self._lazy_rdd = None
# Check whether _repr_html is supported or not, we use it to avoid calling _jdf twice
# by __repr__ and _repr_html_ while eager evaluation opened.
self._support_repr_html = False
@property
@since(1.3)
def rdd(self):
"""Returns the content as an :class:`pyspark.RDD` of :class:`Row`.
"""
if self._lazy_rdd is None:
jrdd = self._jdf.javaToPython()
self._lazy_rdd = RDD(jrdd, self.sql_ctx._sc, BatchedSerializer(PickleSerializer()))
return self._lazy_rdd
@property
@since("1.3.1")
def na(self):
"""Returns a :class:`DataFrameNaFunctions` for handling missing values.
"""
return DataFrameNaFunctions(self)
@property
@since(1.4)
def stat(self):
"""Returns a :class:`DataFrameStatFunctions` for statistic functions.
"""
return DataFrameStatFunctions(self)
@ignore_unicode_prefix
@since(1.3)
def toJSON(self, use_unicode=True):
"""Converts a :class:`DataFrame` into a :class:`RDD` of string.
Each row is turned into a JSON document as one element in the returned RDD.
>>> df.toJSON().first()
u'{"age":2,"name":"Alice"}'
"""
rdd = self._jdf.toJSON()
return RDD(rdd.toJavaRDD(), self._sc, UTF8Deserializer(use_unicode))
@since(2.0)
def createTempView(self, name):
"""Creates a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createTempView("people")
>>> df2 = spark.sql("select * from people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createTempView(name)
@since(2.0)
def createOrReplaceTempView(self, name):
"""Creates or replaces a local temporary view with this DataFrame.
The lifetime of this temporary table is tied to the :class:`SparkSession`
that was used to create this :class:`DataFrame`.
>>> df.createOrReplaceTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceTempView("people")
>>> df3 = spark.sql("select * from people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropTempView("people")
"""
self._jdf.createOrReplaceTempView(name)
@since(2.1)
def createGlobalTempView(self, name):
"""Creates a global temporary view with this DataFrame.
The lifetime of this temporary view is tied to this Spark application.
throws :class:`TempTableAlreadyExistsException`, if the view name already exists in the
catalog.
>>> df.createGlobalTempView("people")
>>> df2 = spark.sql("select * from global_temp.people")
>>> sorted(df.collect()) == sorted(df2.collect())
True
>>> df.createGlobalTempView("people") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AnalysisException: u"Temporary table 'people' already exists;"
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createGlobalTempView(name)
@since(2.2)
def createOrReplaceGlobalTempView(self, name):
"""Creates or replaces a global temporary view using the given name.
The lifetime of this temporary view is tied to this Spark application.
>>> df.createOrReplaceGlobalTempView("people")
>>> df2 = df.filter(df.age > 3)
>>> df2.createOrReplaceGlobalTempView("people")
>>> df3 = spark.sql("select * from global_temp.people")
>>> sorted(df3.collect()) == sorted(df2.collect())
True
>>> spark.catalog.dropGlobalTempView("people")
"""
self._jdf.createOrReplaceGlobalTempView(name)
@property
@since(1.4)
def write(self):
"""
Interface for saving the content of the non-streaming :class:`DataFrame` out into external
storage.
:return: :class:`DataFrameWriter`
"""
return DataFrameWriter(self)
@property
@since(2.0)
def writeStream(self):
"""
Interface for saving the content of the streaming :class:`DataFrame` out into external
storage.
.. note:: Evolving.
:return: :class:`DataStreamWriter`
"""
return DataStreamWriter(self)
@property
@since(1.3)
def schema(self):
"""Returns the schema of this :class:`DataFrame` as a :class:`pyspark.sql.types.StructType`.
>>> df.schema
StructType(List(StructField(age,IntegerType,true),StructField(name,StringType,true)))
"""
if self._schema is None:
try:
self._schema = _parse_datatype_json_string(self._jdf.schema().json())
except AttributeError as e:
raise Exception(
"Unable to parse datatype from schema. %s" % e)
return self._schema
@since(1.3)
def printSchema(self):
"""Prints out the schema in the tree format.
>>> df.printSchema()
root
|-- age: integer (nullable = true)
|-- name: string (nullable = true)
<BLANKLINE>
"""
print(self._jdf.schema().treeString())
@since(1.3)
def explain(self, extended=False):
"""Prints the (logical and physical) plans to the console for debugging purpose.
:param extended: boolean, default ``False``. If ``False``, prints only the physical plan.
>>> df.explain()
== Physical Plan ==
*(1) Scan ExistingRDD[age#0,name#1]
>>> df.explain(True)
== Parsed Logical Plan ==
...
== Analyzed Logical Plan ==
...
== Optimized Logical Plan ==
...
== Physical Plan ==
...
"""
if extended:
print(self._jdf.queryExecution().toString())
else:
print(self._jdf.queryExecution().simpleString())
@since(2.4)
def exceptAll(self, other):
"""Return a new :class:`DataFrame` containing rows in this :class:`DataFrame` but
not in another :class:`DataFrame` while preserving duplicates.
This is equivalent to `EXCEPT ALL` in SQL.
>>> df1 = spark.createDataFrame(
... [("a", 1), ("a", 1), ("a", 1), ("a", 2), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.exceptAll(df2).show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| a| 2|
| c| 4|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.exceptAll(other._jdf), self.sql_ctx)
@since(1.3)
def isLocal(self):
"""Returns ``True`` if the :func:`collect` and :func:`take` methods can be run locally
(without any Spark executors).
"""
return self._jdf.isLocal()
@property
@since(2.0)
def isStreaming(self):
"""Returns true if this :class:`Dataset` contains one or more sources that continuously
return data as it arrives. A :class:`Dataset` that reads data from a streaming source
must be executed as a :class:`StreamingQuery` using the :func:`start` method in
:class:`DataStreamWriter`. Methods that return a single answer, (e.g., :func:`count` or
:func:`collect`) will throw an :class:`AnalysisException` when there is a streaming
source present.
.. note:: Evolving
"""
return self._jdf.isStreaming()
@since(1.3)
def show(self, n=20, truncate=True, vertical=False):
"""Prints the first ``n`` rows to the console.
:param n: Number of rows to show.
:param truncate: If set to True, truncate strings longer than 20 chars by default.
If set to a number greater than one, truncates long strings to length ``truncate``
and align cells right.
:param vertical: If set to True, print output rows vertically (one line
per column value).
>>> df
DataFrame[age: int, name: string]
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.show(truncate=3)
+---+----+
|age|name|
+---+----+
| 2| Ali|
| 5| Bob|
+---+----+
>>> df.show(vertical=True)
-RECORD 0-----
age | 2
name | Alice
-RECORD 1-----
age | 5
name | Bob
"""
if isinstance(truncate, bool) and truncate:
print(self._jdf.showString(n, 20, vertical))
else:
print(self._jdf.showString(n, int(truncate), vertical))
def __repr__(self):
if not self._support_repr_html and self.sql_ctx._conf.isReplEagerEvalEnabled():
vertical = False
return self._jdf.showString(
self.sql_ctx._conf.replEagerEvalMaxNumRows(),
self.sql_ctx._conf.replEagerEvalTruncate(), vertical)
else:
return "DataFrame[%s]" % (", ".join("%s: %s" % c for c in self.dtypes))
def _repr_html_(self):
"""Returns a dataframe with html code when you enabled eager evaluation
by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are
using support eager evaluation with HTML.
"""
import cgi
if not self._support_repr_html:
self._support_repr_html = True
if self.sql_ctx._conf.isReplEagerEvalEnabled():
max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0)
sock_info = self._jdf.getRowsToPython(
max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate())
rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
head = rows[0]
row_data = rows[1:]
has_more_data = len(row_data) > max_num_rows
row_data = row_data[:max_num_rows]
html = "<table border='1'>\n"
# generate table head
html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head))
# generate table rows
for row in row_data:
html += "<tr><td>%s</td></tr>\n" % "</td><td>".join(
map(lambda x: cgi.escape(x), row))
html += "</table>\n"
if has_more_data:
html += "only showing top %d %s\n" % (
max_num_rows, "row" if max_num_rows == 1 else "rows")
return html
else:
return None
@since(2.1)
def checkpoint(self, eager=True):
"""Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
logical plan of this DataFrame, which is especially useful in iterative algorithms where the
plan may grow exponentially. It will be saved to files inside the checkpoint
directory set with L{SparkContext.setCheckpointDir()}.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.checkpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.3)
def localCheckpoint(self, eager=True):
"""Returns a locally checkpointed version of this Dataset. Checkpointing can be used to
truncate the logical plan of this DataFrame, which is especially useful in iterative
algorithms where the plan may grow exponentially. Local checkpoints are stored in the
executors using the caching subsystem and therefore they are not reliable.
:param eager: Whether to checkpoint this DataFrame immediately
.. note:: Experimental
"""
jdf = self._jdf.localCheckpoint(eager)
return DataFrame(jdf, self.sql_ctx)
@since(2.1)
def withWatermark(self, eventTime, delayThreshold):
"""Defines an event time watermark for this :class:`DataFrame`. A watermark tracks a point
in time before which we assume no more late data is going to arrive.
Spark will use this watermark for several purposes:
- To know when a given time window aggregation can be finalized and thus can be emitted
when using output modes that do not allow updates.
- To minimize the amount of state that we need to keep for on-going aggregations.
The current watermark is computed by looking at the `MAX(eventTime)` seen across
all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
of coordinating this value across partitions, the actual watermark used is only guaranteed
to be at least `delayThreshold` behind the actual event time. In some cases we may still
process records that arrive more than `delayThreshold` late.
:param eventTime: the name of the column that contains the event time of the row.
:param delayThreshold: the minimum delay to wait to data to arrive late, relative to the
latest record that has been processed in the form of an interval
(e.g. "1 minute" or "5 hours").
.. note:: Evolving
>>> sdf.select('name', sdf.time.cast('timestamp')).withWatermark('time', '10 minutes')
DataFrame[name: string, time: timestamp]
"""
if not eventTime or type(eventTime) is not str:
raise TypeError("eventTime should be provided as a string")
if not delayThreshold or type(delayThreshold) is not str:
raise TypeError("delayThreshold should be provided as a string interval")
jdf = self._jdf.withWatermark(eventTime, delayThreshold)
return DataFrame(jdf, self.sql_ctx)
@since(2.2)
def hint(self, name, *parameters):
"""Specifies some hint on the current DataFrame.
:param name: A name of the hint.
:param parameters: Optional parameters.
:return: :class:`DataFrame`
>>> df.join(df2.hint("broadcast"), "name").show()
+----+---+------+
|name|age|height|
+----+---+------+
| Bob| 5| 85|
+----+---+------+
"""
if len(parameters) == 1 and isinstance(parameters[0], list):
parameters = parameters[0]
if not isinstance(name, str):
raise TypeError("name should be provided as str, got {0}".format(type(name)))
allowed_types = (basestring, list, float, int)
for p in parameters:
if not isinstance(p, allowed_types):
raise TypeError(
"all parameters should be in {0}, got {1} of type {2}".format(
allowed_types, p, type(p)))
jdf = self._jdf.hint(name, self._jseq(parameters))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def count(self):
"""Returns the number of rows in this :class:`DataFrame`.
>>> df.count()
2
"""
return int(self._jdf.count())
@ignore_unicode_prefix
@since(1.3)
def collect(self):
"""Returns all the records as a list of :class:`Row`.
>>> df.collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectToPython()
return list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer())))
@ignore_unicode_prefix
@since(2.0)
def toLocalIterator(self):
"""
Returns an iterator that contains all of the rows in this :class:`DataFrame`.
The iterator will consume as much memory as the largest partition in this DataFrame.
>>> list(df.toLocalIterator())
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.toPythonIterator()
return _load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))
@ignore_unicode_prefix
@since(1.3)
def limit(self, num):
"""Limits the result count to the number specified.
>>> df.limit(1).collect()
[Row(age=2, name=u'Alice')]
>>> df.limit(0).collect()
[]
"""
jdf = self._jdf.limit(num)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def take(self, num):
"""Returns the first ``num`` rows as a :class:`list` of :class:`Row`.
>>> df.take(2)
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
"""
return self.limit(num).collect()
@since(1.3)
def foreach(self, f):
"""Applies the ``f`` function to all :class:`Row` of this :class:`DataFrame`.
This is a shorthand for ``df.rdd.foreach()``.
>>> def f(person):
... print(person.name)
>>> df.foreach(f)
"""
self.rdd.foreach(f)
@since(1.3)
def foreachPartition(self, f):
"""Applies the ``f`` function to each partition of this :class:`DataFrame`.
This a shorthand for ``df.rdd.foreachPartition()``.
>>> def f(people):
... for person in people:
... print(person.name)
>>> df.foreachPartition(f)
"""
self.rdd.foreachPartition(f)
@since(1.3)
def cache(self):
"""Persists the :class:`DataFrame` with the default storage level (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
self._jdf.cache()
return self
@since(1.3)
def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
"""Sets the storage level to persist the contents of the :class:`DataFrame` across
operations after the first time it is computed. This can only be used to assign
a new storage level if the :class:`DataFrame` does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_AND_DISK}).
.. note:: The default storage level has changed to C{MEMORY_AND_DISK} to match Scala in 2.0.
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdf.persist(javaStorageLevel)
return self
@property
@since(2.1)
def storageLevel(self):
"""Get the :class:`DataFrame`'s current storage level.
>>> df.storageLevel
StorageLevel(False, False, False, False, 1)
>>> df.cache().storageLevel
StorageLevel(True, True, False, True, 1)
>>> df2.persist(StorageLevel.DISK_ONLY_2).storageLevel
StorageLevel(True, False, False, False, 2)
"""
java_storage_level = self._jdf.storageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
@since(1.3)
def unpersist(self, blocking=False):
"""Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from
memory and disk.
.. note:: `blocking` default has changed to False to match Scala in 2.0.
"""
self.is_cached = False
self._jdf.unpersist(blocking)
return self
@since(1.4)
def coalesce(self, numPartitions):
"""
Returns a new :class:`DataFrame` that has exactly `numPartitions` partitions.
:param numPartitions: int, to specify the target number of partitions
Similar to coalesce defined on an :class:`RDD`, this operation results in a
narrow dependency, e.g. if you go from 1000 partitions to 100 partitions,
there will not be a shuffle, instead each of the 100 new partitions will
claim 10 of the current partitions. If a larger number of partitions is requested,
it will stay at the current number of partitions.
However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
this may result in your computation taking place on fewer nodes than
you like (e.g. one node in the case of numPartitions = 1). To avoid this,
you can call repartition(). This will add a shuffle step, but means the
current upstream partitions will be executed in parallel (per whatever
the current partitioning is).
>>> df.coalesce(1).rdd.getNumPartitions()
1
"""
return DataFrame(self._jdf.coalesce(numPartitions), self.sql_ctx)
@since(1.3)
def repartition(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is hash partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
.. versionchanged:: 1.6
Added optional arguments to specify the partitioning columns. Also made numPartitions
optional if partitioning columns are specified.
>>> df.repartition(10).rdd.getNumPartitions()
10
>>> data = df.union(df).repartition("age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
>>> data = data.repartition(7, "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
| 2|Alice|
| 5| Bob|
+---+-----+
>>> data.rdd.getNumPartitions()
7
>>> data = data.repartition("name", "age")
>>> data.show()
+---+-----+
|age| name|
+---+-----+
| 5| Bob|
| 5| Bob|
| 2|Alice|
| 2|Alice|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return DataFrame(self._jdf.repartition(numPartitions), self.sql_ctx)
else:
return DataFrame(
self._jdf.repartition(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions, ) + cols
return DataFrame(self._jdf.repartition(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int or Column")
@since("2.4.0")
def repartitionByRange(self, numPartitions, *cols):
"""
Returns a new :class:`DataFrame` partitioned by the given partitioning expressions. The
resulting DataFrame is range partitioned.
:param numPartitions:
can be an int to specify the target number of partitions or a Column.
If it is a Column, it will be used as the first partitioning column. If not specified,
the default number of partitions is used.
At least one partition-by expression must be specified.
When no explicit sort order is specified, "ascending nulls first" is assumed.
Note that due to performance reasons this method uses sampling to estimate the ranges.
Hence, the output may not be consistent, since sampling can return different values.
The sample size can be controlled by the config
`spark.sql.execution.rangeExchange.sampleSizePerPartition`.
>>> df.repartitionByRange(2, "age").rdd.getNumPartitions()
2
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
>>> df.repartitionByRange(1, "age").rdd.getNumPartitions()
1
>>> data = df.repartitionByRange("age")
>>> df.show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
if isinstance(numPartitions, int):
if len(cols) == 0:
return ValueError("At least one partition-by expression must be specified.")
else:
return DataFrame(
self._jdf.repartitionByRange(numPartitions, self._jcols(*cols)), self.sql_ctx)
elif isinstance(numPartitions, (basestring, Column)):
cols = (numPartitions,) + cols
return DataFrame(self._jdf.repartitionByRange(self._jcols(*cols)), self.sql_ctx)
else:
raise TypeError("numPartitions should be an int, string or Column")
@since(1.3)
def distinct(self):
"""Returns a new :class:`DataFrame` containing the distinct rows in this :class:`DataFrame`.
>>> df.distinct().count()
2
"""
return DataFrame(self._jdf.distinct(), self.sql_ctx)
@since(1.3)
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
4
>>> df.sample(fraction=0.5, seed=3).count()
4
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx)
@since(1.5)
def sampleBy(self, col, fractions, seed=None):
"""
Returns a stratified sample without replacement based on the
fraction given on each stratum.
:param col: column that defines strata
:param fractions:
sampling fraction for each stratum. If a stratum is not
specified, we treat its fraction as zero.
:param seed: random seed
:return: a new DataFrame that represents the stratified sample
>>> from pyspark.sql.functions import col
>>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key"))
>>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0)
>>> sampled.groupBy("key").count().orderBy("key").show()
+---+-----+
|key|count|
+---+-----+
| 0| 5|
| 1| 9|
+---+-----+
>>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count()
33
.. versionchanged:: 3.0
Added sampling by a column of :class:`Column`
"""
if isinstance(col, basestring):
col = Column(col)
elif not isinstance(col, Column):
raise ValueError("col must be a string or a column, but got %r" % type(col))
if not isinstance(fractions, dict):
raise ValueError("fractions must be a dict but got %r" % type(fractions))
for k, v in fractions.items():
if not isinstance(k, (float, int, long, basestring)):
raise ValueError("key must be float, int, long, or string, but got %r" % type(k))
fractions[k] = float(v)
col = col._jc
seed = seed if seed is not None else random.randint(0, sys.maxsize)
return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
@since(1.4)
def randomSplit(self, weights, seed=None):
"""Randomly splits this :class:`DataFrame` with the provided weights.
:param weights: list of doubles as weights with which to split the DataFrame. Weights will
be normalized if they don't sum up to 1.0.
:param seed: The seed for sampling.
>>> splits = df4.randomSplit([1.0, 2.0], 24)
>>> splits[0].count()
1
>>> splits[1].count()
3
"""
for w in weights:
if w < 0.0:
raise ValueError("Weights must be positive. Found weight value: %s" % w)
seed = seed if seed is not None else random.randint(0, sys.maxsize)
rdd_array = self._jdf.randomSplit(_to_list(self.sql_ctx._sc, weights), long(seed))
return [DataFrame(rdd, self.sql_ctx) for rdd in rdd_array]
@property
@since(1.3)
def dtypes(self):
"""Returns all column names and their data types as a list.
>>> df.dtypes
[('age', 'int'), ('name', 'string')]
"""
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
@property
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
['age', 'name']
"""
return [f.name for f in self.schema.fields]
@since(2.3)
def colRegex(self, colName):
"""
Selects column based on the column name specified as a regex and returns it
as :class:`Column`.
:param colName: string, column name specified as a regex.
>>> df = spark.createDataFrame([("a", 1), ("b", 2), ("c", 3)], ["Col1", "Col2"])
>>> df.select(df.colRegex("`(Col1)?+.+`")).show()
+----+
|Col2|
+----+
| 1|
| 2|
| 3|
+----+
"""
if not isinstance(colName, basestring):
raise ValueError("colName should be provided as string")
jc = self._jdf.colRegex(colName)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def alias(self, alias):
"""Returns a new :class:`DataFrame` with an alias set.
:param alias: string, an alias name to be set for the DataFrame.
>>> from pyspark.sql.functions import *
>>> df_as1 = df.alias("df_as1")
>>> df_as2 = df.alias("df_as2")
>>> joined_df = df_as1.join(df_as2, col("df_as1.name") == col("df_as2.name"), 'inner')
>>> joined_df.select("df_as1.name", "df_as2.name", "df_as2.age").collect()
[Row(name=u'Bob', name=u'Bob', age=5), Row(name=u'Alice', name=u'Alice', age=2)]
"""
assert isinstance(alias, basestring), "alias should be a string"
return DataFrame(getattr(self._jdf, "as")(alias), self.sql_ctx)
@ignore_unicode_prefix
@since(2.1)
def crossJoin(self, other):
"""Returns the cartesian product with another :class:`DataFrame`.
:param other: Right side of the cartesian product.
>>> df.select("age", "name").collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df2.select("name", "height").collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85)]
>>> df.crossJoin(df2.select("height")).select("age", "name", "height").collect()
[Row(age=2, name=u'Alice', height=80), Row(age=2, name=u'Alice', height=85),
Row(age=5, name=u'Bob', height=80), Row(age=5, name=u'Bob', height=85)]
"""
jdf = self._jdf.crossJoin(other._jdf)
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def join(self, other, on=None, how=None):
"""Joins with another :class:`DataFrame`, using the given join expression.
:param other: Right side of the join
:param on: a string for the join column name, a list of column names,
a join expression (Column), or a list of Columns.
If `on` is a string or a list of strings indicating the name of the join column(s),
the column(s) must exist on both sides, and this performs an equi-join.
:param how: str, default ``inner``. Must be one of: ``inner``, ``cross``, ``outer``,
``full``, ``full_outer``, ``left``, ``left_outer``, ``right``, ``right_outer``,
``left_semi``, and ``left_anti``.
The following performs a full outer join between ``df1`` and ``df2``.
>>> df.join(df2, df.name == df2.name, 'outer').select(df.name, df2.height).collect()
[Row(name=None, height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> df.join(df2, 'name', 'outer').select('name', 'height').collect()
[Row(name=u'Tom', height=80), Row(name=u'Bob', height=85), Row(name=u'Alice', height=None)]
>>> cond = [df.name == df3.name, df.age == df3.age]
>>> df.join(df3, cond, 'outer').select(df.name, df3.age).collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.join(df2, 'name').select(df.name, df2.height).collect()
[Row(name=u'Bob', height=85)]
>>> df.join(df4, ['name', 'age']).select(df.name, df.age).collect()
[Row(name=u'Bob', age=5)]
"""
if on is not None and not isinstance(on, list):
on = [on]
if on is not None:
if isinstance(on[0], basestring):
on = self._jseq(on)
else:
assert isinstance(on[0], Column), "on should be Column or list of Column"
on = reduce(lambda x, y: x.__and__(y), on)
on = on._jc
if on is None and how is None:
jdf = self._jdf.join(other._jdf)
else:
if how is None:
how = "inner"
if on is None:
on = self._jseq([])
assert isinstance(how, basestring), "how should be basestring"
jdf = self._jdf.join(other._jdf, on, how)
return DataFrame(jdf, self.sql_ctx)
@since(1.6)
def sortWithinPartitions(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` with each partition sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sortWithinPartitions("age", ascending=False).show()
+---+-----+
|age| name|
+---+-----+
| 2|Alice|
| 5| Bob|
+---+-----+
"""
jdf = self._jdf.sortWithinPartitions(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def sort(self, *cols, **kwargs):
"""Returns a new :class:`DataFrame` sorted by the specified column(s).
:param cols: list of :class:`Column` or column names to sort by.
:param ascending: boolean or list of boolean (default True).
Sort ascending vs. descending. Specify list for multiple sort orders.
If a list is specified, length of the list must equal length of the `cols`.
>>> df.sort(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.sort("age", ascending=False).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(df.age.desc()).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> from pyspark.sql.functions import *
>>> df.sort(asc("age")).collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.orderBy(desc("age"), "name").collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
>>> df.orderBy(["age", "name"], ascending=[0, 1]).collect()
[Row(age=5, name=u'Bob'), Row(age=2, name=u'Alice')]
"""
jdf = self._jdf.sort(self._sort_cols(cols, kwargs))
return DataFrame(jdf, self.sql_ctx)
orderBy = sort
def _jseq(self, cols, converter=None):
"""Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)
def _jmap(self, jm):
"""Return a JVM Scala Map from a dict"""
return _to_scala_map(self.sql_ctx._sc, jm)
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column)
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
"""
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols)
@since("1.3.1")
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since("2.3.0")
def summary(self, *statistics):
"""Computes specified statistics for numeric and string columns. Available statistics are:
- count
- mean
- stddev
- min
- max
- arbitrary approximate percentiles specified as a percentage (eg, 75%)
If no statistics are given, this function computes count, mean, stddev, min,
approximate quartiles (percentiles at 25%, 50%, and 75%), and max.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.summary().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| 25%| 2| null|
| 50%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+------------------+-----+
>>> df.summary("count", "min", "25%", "75%", "max").show()
+-------+---+-----+
|summary|age| name|
+-------+---+-----+
| count| 2| 2|
| min| 2|Alice|
| 25%| 2| null|
| 75%| 5| null|
| max| 5| Bob|
+-------+---+-----+
To do a summary for specific columns first select them:
>>> df.select("age", "name").summary("count").show()
+-------+---+----+
|summary|age|name|
+-------+---+----+
| count| 2| 2|
+-------+---+----+
See also describe for basic statistics.
"""
if len(statistics) == 1 and isinstance(statistics[0], list):
statistics = statistics[0]
jdf = self._jdf.summary(self._jseq(statistics))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
"""
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n)
@ignore_unicode_prefix
@since(1.3)
def first(self):
"""Returns the first row as a :class:`Row`.
>>> df.first()
Row(age=2, name=u'Alice')
"""
return self.head()
@ignore_unicode_prefix
@since(1.3)
def __getitem__(self, item):
"""Returns the column as a :class:`Column`.
>>> df.select(df['age']).collect()
[Row(age=2), Row(age=5)]
>>> df[ ["name", "age"]].collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df[ df.age > 3 ].collect()
[Row(age=5, name=u'Bob')]
>>> df[df[0] > 3].collect()
[Row(age=5, name=u'Bob')]
"""
if isinstance(item, basestring):
jc = self._jdf.apply(item)
return Column(jc)
elif isinstance(item, Column):
return self.filter(item)
elif isinstance(item, (list, tuple)):
return self.select(*item)
elif isinstance(item, int):
jc = self._jdf.apply(self.columns[item])
return Column(jc)
else:
raise TypeError("unexpected item type: %s" % type(item))
@since(1.3)
def __getattr__(self, name):
"""Returns the :class:`Column` denoted by ``name``.
>>> df.select(df.age).collect()
[Row(age=2), Row(age=5)]
"""
if name not in self.columns:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
jc = self._jdf.apply(name)
return Column(jc)
@ignore_unicode_prefix
@since(1.3)
def select(self, *cols):
"""Projects a set of expressions and returns a new :class:`DataFrame`.
:param cols: list of column names (string) or expressions (:class:`Column`).
If one of the column names is '*', that column is expanded to include all columns
in the current DataFrame.
>>> df.select('*').collect()
[Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
>>> df.select('name', 'age').collect()
[Row(name=u'Alice', age=2), Row(name=u'Bob', age=5)]
>>> df.select(df.name, (df.age + 10).alias('age')).collect()
[Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
"""
jdf = self._jdf.select(self._jcols(*cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def selectExpr(self, *expr):
"""Projects a set of SQL expressions and returns a new :class:`DataFrame`.
This is a variant of :func:`select` that accepts SQL expressions.
>>> df.selectExpr("age * 2", "abs(age)").collect()
[Row((age * 2)=4, abs(age)=2), Row((age * 2)=10, abs(age)=5)]
"""
if len(expr) == 1 and isinstance(expr[0], list):
expr = expr[0]
jdf = self._jdf.selectExpr(self._jseq(expr))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
"""
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def groupBy(self, *cols):
"""Groups the :class:`DataFrame` using the specified columns,
so we can run aggregation on them. See :class:`GroupedData`
for all the available aggregate functions.
:func:`groupby` is an alias for :func:`groupBy`.
:param cols: list of columns to group by.
Each element should be a column name (string) or an expression (:class:`Column`).
>>> df.groupBy().avg().collect()
[Row(avg(age)=3.5)]
>>> sorted(df.groupBy('name').agg({'age': 'mean'}).collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(df.name).avg().collect())
[Row(name=u'Alice', avg(age)=2.0), Row(name=u'Bob', avg(age)=5.0)]
>>> sorted(df.groupBy(['name', df.age]).count().collect())
[Row(name=u'Alice', age=2, count=1), Row(name=u'Bob', age=5, count=1)]
"""
jgd = self._jdf.groupBy(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def rollup(self, *cols):
"""
Create a multi-dimensional rollup for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.rollup("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.rollup(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.4)
def cube(self, *cols):
"""
Create a multi-dimensional cube for the current :class:`DataFrame` using
the specified columns, so we can run aggregation on them.
>>> df.cube("name", df.age).count().orderBy("name", "age").show()
+-----+----+-----+
| name| age|count|
+-----+----+-----+
| null|null| 2|
| null| 2| 1|
| null| 5| 1|
|Alice|null| 1|
|Alice| 2| 1|
| Bob|null| 1|
| Bob| 5| 1|
+-----+----+-----+
"""
jgd = self._jdf.cube(self._jcols(*cols))
from pyspark.sql.group import GroupedData
return GroupedData(jgd, self)
@since(1.3)
def agg(self, *exprs):
""" Aggregate on the entire :class:`DataFrame` without groups
(shorthand for ``df.groupBy.agg()``).
>>> df.agg({"age": "max"}).collect()
[Row(max(age)=5)]
>>> from pyspark.sql import functions as F
>>> df.agg(F.min(df.age)).collect()
[Row(min(age)=2)]
"""
return self.groupBy().agg(*exprs)
@since(2.0)
def union(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.union(other._jdf), self.sql_ctx)
@since(1.3)
def unionAll(self, other):
""" Return a new :class:`DataFrame` containing union of rows in this and another frame.
This is equivalent to `UNION ALL` in SQL. To do a SQL-style set union
(that does deduplication of elements), use this function followed by :func:`distinct`.
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return self.union(other)
@since(2.3)
def unionByName(self, other):
""" Returns a new :class:`DataFrame` containing union of rows in this and another frame.
This is different from both `UNION ALL` and `UNION DISTINCT` in SQL. To do a SQL-style set
union (that does deduplication of elements), use this function followed by :func:`distinct`.
The difference between this function and :func:`union` is that this function
resolves columns by name (not by position):
>>> df1 = spark.createDataFrame([[1, 2, 3]], ["col0", "col1", "col2"])
>>> df2 = spark.createDataFrame([[4, 5, 6]], ["col1", "col2", "col0"])
>>> df1.unionByName(df2).show()
+----+----+----+
|col0|col1|col2|
+----+----+----+
| 1| 2| 3|
| 6| 4| 5|
+----+----+----+
"""
return DataFrame(self._jdf.unionByName(other._jdf), self.sql_ctx)
@since(1.3)
def intersect(self, other):
""" Return a new :class:`DataFrame` containing rows only in
both this frame and another frame.
This is equivalent to `INTERSECT` in SQL.
"""
return DataFrame(self._jdf.intersect(other._jdf), self.sql_ctx)
@since(2.4)
def intersectAll(self, other):
""" Return a new :class:`DataFrame` containing rows in both this dataframe and other
dataframe while preserving duplicates.
This is equivalent to `INTERSECT ALL` in SQL.
>>> df1 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3), ("c", 4)], ["C1", "C2"])
>>> df2 = spark.createDataFrame([("a", 1), ("a", 1), ("b", 3)], ["C1", "C2"])
>>> df1.intersectAll(df2).sort("C1", "C2").show()
+---+---+
| C1| C2|
+---+---+
| a| 1|
| a| 1|
| b| 3|
+---+---+
Also as standard in SQL, this function resolves columns by position (not by name).
"""
return DataFrame(self._jdf.intersectAll(other._jdf), self.sql_ctx)
@since(1.3)
def subtract(self, other):
""" Return a new :class:`DataFrame` containing rows in this frame
but not in another frame.
This is equivalent to `EXCEPT DISTINCT` in SQL.
"""
return DataFrame(getattr(self._jdf, "except")(other._jdf), self.sql_ctx)
@since(1.4)
def dropDuplicates(self, subset=None):
"""Return a new :class:`DataFrame` with duplicate rows removed,
optionally only considering certain columns.
For a static batch :class:`DataFrame`, it just drops duplicate rows. For a streaming
:class:`DataFrame`, it will keep all data across triggers as intermediate state to drop
duplicates rows. You can use :func:`withWatermark` to limit how late the duplicate data can
be and system will accordingly limit the state. In addition, too late data older than
watermark will be dropped to avoid any possibility of duplicates.
:func:`drop_duplicates` is an alias for :func:`dropDuplicates`.
>>> from pyspark.sql import Row
>>> df = sc.parallelize([ \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=5, height=80), \\
... Row(name='Alice', age=10, height=80)]).toDF()
>>> df.dropDuplicates().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
| 10| 80|Alice|
+---+------+-----+
>>> df.dropDuplicates(['name', 'height']).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 5| 80|Alice|
+---+------+-----+
"""
if subset is None:
jdf = self._jdf.dropDuplicates()
else:
jdf = self._jdf.dropDuplicates(self._jseq(subset))
return DataFrame(jdf, self.sql_ctx)
@since("1.3.1")
def dropna(self, how='any', thresh=None, subset=None):
"""Returns a new :class:`DataFrame` omitting rows with null values.
:func:`DataFrame.dropna` and :func:`DataFrameNaFunctions.drop` are aliases of each other.
:param how: 'any' or 'all'.
If 'any', drop a row if it contains any nulls.
If 'all', drop a row only if all its values are null.
:param thresh: int, default None
If specified, drop rows that have less than `thresh` non-null values.
This overwrites the `how` parameter.
:param subset: optional list of column names to consider.
>>> df4.na.drop().show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
+---+------+-----+
"""
if how is not None and how not in ['any', 'all']:
raise ValueError("how ('" + how + "') should be 'any' or 'all'")
if subset is None:
subset = self.columns
elif isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
if thresh is None:
thresh = len(subset) if how == 'any' else 1
return DataFrame(self._jdf.na().drop(thresh, self._jseq(subset)), self.sql_ctx)
@since("1.3.1")
def fillna(self, value, subset=None):
"""Replace null values, alias for ``na.fill()``.
:func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other.
:param value: int, long, float, string, bool or dict.
Value to replace null values with.
If the value is a dict, then `subset` is ignored and `value` must be a mapping
from column name (string) to replacement value. The replacement value must be
an int, long, float, boolean, or string.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.fill(50).show()
+---+------+-----+
|age|height| name|
+---+------+-----+
| 10| 80|Alice|
| 5| 50| Bob|
| 50| 50| Tom|
| 50| 50| null|
+---+------+-----+
>>> df5.na.fill(False).show()
+----+-------+-----+
| age| name| spy|
+----+-------+-----+
| 10| Alice|false|
| 5| Bob|false|
|null|Mallory| true|
+----+-------+-----+
>>> df4.na.fill({'age': 50, 'name': 'unknown'}).show()
+---+------+-------+
|age|height| name|
+---+------+-------+
| 10| 80| Alice|
| 5| null| Bob|
| 50| null| Tom|
| 50| null|unknown|
+---+------+-------+
"""
if not isinstance(value, (float, int, long, basestring, bool, dict)):
raise ValueError("value should be a float, int, long, string, bool or dict")
# Note that bool validates isinstance(int), but we don't want to
# convert bools to floats
if not isinstance(value, bool) and isinstance(value, (int, long)):
value = float(value)
if isinstance(value, dict):
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
elif subset is None:
return DataFrame(self._jdf.na().fill(value), self.sql_ctx)
else:
if isinstance(subset, basestring):
subset = [subset]
elif not isinstance(subset, (list, tuple)):
raise ValueError("subset should be a list or tuple of column names")
return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
@since(1.4)
def replace(self, to_replace, value=_NoValue, subset=None):
"""Returns a new :class:`DataFrame` replacing a value with another value.
:func:`DataFrame.replace` and :func:`DataFrameNaFunctions.replace` are
aliases of each other.
Values to_replace and value must have the same type and can only be numerics, booleans,
or strings. Value can have None. When replacing, the new value will be cast
to the type of the existing column.
For numeric replacements all values to be replaced should have unique
floating point representation. In case of conflicts (for example with `{42: -1, 42.0: 1}`)
and arbitrary replacement will be used.
:param to_replace: bool, int, long, float, string, list or dict.
Value to be replaced.
If the value is a dict, then `value` is ignored or can be omitted, and `to_replace`
must be a mapping between a value and a replacement.
:param value: bool, int, long, float, string, list or None.
The replacement value must be a bool, int, long, float, string or None. If `value` is a
list, `value` should be of the same length and type as `to_replace`.
If `value` is a scalar and `to_replace` is a sequence, then `value` is
used as a replacement for each item in `to_replace`.
:param subset: optional list of column names to consider.
Columns specified in subset that do not have matching data type are ignored.
For example, if `value` is a string, and subset contains a non-string column,
then the non-string column is simply ignored.
>>> df4.na.replace(10, 20).show()
+----+------+-----+
| age|height| name|
+----+------+-----+
| 20| 80|Alice|
| 5| null| Bob|
|null| null| Tom|
|null| null| null|
+----+------+-----+
>>> df4.na.replace('Alice', None).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace({'Alice': None}).show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80|null|
| 5| null| Bob|
|null| null| Tom|
|null| null|null|
+----+------+----+
>>> df4.na.replace(['Alice', 'Bob'], ['A', 'B'], 'name').show()
+----+------+----+
| age|height|name|
+----+------+----+
| 10| 80| A|
| 5| null| B|
|null| null| Tom|
|null| null|null|
+----+------+----+
"""
if value is _NoValue:
if isinstance(to_replace, dict):
value = None
else:
raise TypeError("value argument is required when to_replace is not a dictionary.")
# Helper functions
def all_of(types):
"""Given a type or tuple of types and a sequence of xs
check if each x is instance of type(s)
>>> all_of(bool)([True, False])
True
>>> all_of(basestring)(["a", 1])
False
"""
def all_of_(xs):
return all(isinstance(x, types) for x in xs)
return all_of_
all_of_bool = all_of(bool)
all_of_str = all_of(basestring)
all_of_numeric = all_of((float, int, long))
# Validate input types
valid_types = (bool, float, int, long, basestring, list, tuple)
if not isinstance(to_replace, valid_types + (dict, )):
raise ValueError(
"to_replace should be a bool, float, int, long, string, list, tuple, or dict. "
"Got {0}".format(type(to_replace)))
if not isinstance(value, valid_types) and value is not None \
and not isinstance(to_replace, dict):
raise ValueError("If to_replace is not a dict, value should be "
"a bool, float, int, long, string, list, tuple or None. "
"Got {0}".format(type(value)))
if isinstance(to_replace, (list, tuple)) and isinstance(value, (list, tuple)):
if len(to_replace) != len(value):
raise ValueError("to_replace and value lists should be of the same length. "
"Got {0} and {1}".format(len(to_replace), len(value)))
if not (subset is None or isinstance(subset, (list, tuple, basestring))):
raise ValueError("subset should be a list or tuple of column names, "
"column name or None. Got {0}".format(type(subset)))
# Reshape input arguments if necessary
if isinstance(to_replace, (float, int, long, basestring)):
to_replace = [to_replace]
if isinstance(to_replace, dict):
rep_dict = to_replace
if value is not None:
warnings.warn("to_replace is a dict and value is not None. value will be ignored.")
else:
if isinstance(value, (float, int, long, basestring)) or value is None:
value = [value for _ in range(len(to_replace))]
rep_dict = dict(zip(to_replace, value))
if isinstance(subset, basestring):
subset = [subset]
# Verify we were not passed in mixed type generics.
if not any(all_of_type(rep_dict.keys())
and all_of_type(x for x in rep_dict.values() if x is not None)
for all_of_type in [all_of_bool, all_of_str, all_of_numeric]):
raise ValueError("Mixed type replacements are not supported")
if subset is None:
return DataFrame(self._jdf.na().replace('*', rep_dict), self.sql_ctx)
else:
return DataFrame(
self._jdf.na().replace(self._jseq(subset), self._jmap(rep_dict)), self.sql_ctx)
@since(2.0)
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
"""
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list
@since(1.4)
def corr(self, col1, col2, method=None):
"""
Calculates the correlation of two columns of a DataFrame as a double value.
Currently only supports the Pearson Correlation Coefficient.
:func:`DataFrame.corr` and :func:`DataFrameStatFunctions.corr` are aliases of each other.
:param col1: The name of the first column
:param col2: The name of the second column
:param method: The correlation method. Currently only supports "pearson"
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
if not method:
method = "pearson"
if not method == "pearson":
raise ValueError("Currently only the calculation of the Pearson Correlation " +
"coefficient is supported.")
return self._jdf.stat().corr(col1, col2, method)
@since(1.4)
def cov(self, col1, col2):
"""
Calculate the sample covariance for the given columns, specified by their names, as a
double value. :func:`DataFrame.cov` and :func:`DataFrameStatFunctions.cov` are aliases.
:param col1: The name of the first column
:param col2: The name of the second column
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return self._jdf.stat().cov(col1, col2)
@since(1.4)
def crosstab(self, col1, col2):
"""
Computes a pair-wise frequency table of the given columns. Also known as a contingency
table. The number of distinct values for each column should be less than 1e4. At most 1e6
non-zero pair frequencies will be returned.
The first column of each row will be the distinct values of `col1` and the column names
will be the distinct values of `col2`. The name of the first column will be `$col1_$col2`.
Pairs that have no occurrences will have zero as their counts.
:func:`DataFrame.crosstab` and :func:`DataFrameStatFunctions.crosstab` are aliases.
:param col1: The name of the first column. Distinct items will make the first item of
each row.
:param col2: The name of the second column. Distinct items will make the column names
of the DataFrame.
"""
if not isinstance(col1, basestring):
raise ValueError("col1 should be a string.")
if not isinstance(col2, basestring):
raise ValueError("col2 should be a string.")
return DataFrame(self._jdf.stat().crosstab(col1, col2), self.sql_ctx)
@since(1.4)
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumn(self, colName, col):
"""
Returns a new :class:`DataFrame` by adding a column or replacing the
existing column that has the same name.
The column expression must be an expression over this DataFrame; attempting to add
a column from some other dataframe will raise an error.
:param colName: string, name of the new column.
:param col: a :class:`Column` expression for the new column.
>>> df.withColumn('age2', df.age + 2).collect()
[Row(age=2, name=u'Alice', age2=4), Row(age=5, name=u'Bob', age2=7)]
"""
assert isinstance(col, Column), "col should be Column"
return DataFrame(self._jdf.withColumn(colName, col._jc), self.sql_ctx)
@ignore_unicode_prefix
@since(1.3)
def withColumnRenamed(self, existing, new):
"""Returns a new :class:`DataFrame` by renaming an existing column.
This is a no-op if schema doesn't contain the given column name.
:param existing: string, name of the existing column to rename.
:param new: string, new name of the column.
>>> df.withColumnRenamed('age', 'age2').collect()
[Row(age2=2, name=u'Alice'), Row(age2=5, name=u'Bob')]
"""
return DataFrame(self._jdf.withColumnRenamed(existing, new), self.sql_ctx)
@since(1.4)
@ignore_unicode_prefix
def drop(self, *cols):
"""Returns a new :class:`DataFrame` that drops the specified column.
This is a no-op if schema doesn't contain the given column name(s).
:param cols: a string name of the column to drop, or a
:class:`Column` to drop, or a list of string name of the columns to drop.
>>> df.drop('age').collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.drop(df.age).collect()
[Row(name=u'Alice'), Row(name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df.name).collect()
[Row(age=5, height=85, name=u'Bob')]
>>> df.join(df2, df.name == df2.name, 'inner').drop(df2.name).collect()
[Row(age=5, name=u'Bob', height=85)]
>>> df.join(df2, 'name', 'inner').drop('age', 'height').collect()
[Row(name=u'Bob')]
"""
if len(cols) == 1:
col = cols[0]
if isinstance(col, basestring):
jdf = self._jdf.drop(col)
elif isinstance(col, Column):
jdf = self._jdf.drop(col._jc)
else:
raise TypeError("col should be a string or a Column")
else:
for col in cols:
if not isinstance(col, basestring):
raise TypeError("each col in the param list should be a string")
jdf = self._jdf.drop(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@ignore_unicode_prefix
def toDF(self, *cols):
"""Returns a new class:`DataFrame` that with new specified column names
:param cols: list of new column names (string)
>>> df.toDF('f1', 'f2').collect()
[Row(f1=2, f2=u'Alice'), Row(f1=5, f2=u'Bob')]
"""
jdf = self._jdf.toDF(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
@since(1.3)
def toPandas(self):
"""
Returns the contents of this :class:`DataFrame` as Pandas ``pandas.DataFrame``.
This is only available if Pandas is installed and available.
.. note:: This method should only be used if the resulting Pandas's DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: Usage with spark.sql.execution.arrow.enabled=True is experimental.
>>> df.toPandas() # doctest: +SKIP
age name
0 2 Alice
1 5 Bob
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
if self.sql_ctx._conf.pandasRespectSessionTimeZone():
timezone = self.sql_ctx._conf.sessionLocalTimeZone()
else:
timezone = None
if self.sql_ctx._conf.arrowEnabled():
use_arrow = True
try:
from pyspark.sql.types import to_arrow_schema
from pyspark.sql.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
to_arrow_schema(self.schema)
except Exception as e:
if self.sql_ctx._conf.arrowFallbackEnabled():
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true; however, "
"failed by the reason below:\n %s\n"
"Attempting non-optimization as "
"'spark.sql.execution.arrow.fallback.enabled' is set to "
"true." % _exception_message(e))
warnings.warn(msg)
use_arrow = False
else:
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and will not continue because automatic fallback "
"with 'spark.sql.execution.arrow.fallback.enabled' has been set to "
"false.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Try to use Arrow optimization when the schema is supported and the required version
# of PyArrow is found, if 'spark.sql.execution.arrow.enabled' is enabled.
if use_arrow:
try:
from pyspark.sql.types import _check_dataframe_convert_date, \
_check_dataframe_localize_timestamps
import pyarrow
batches = self._collectAsArrow()
if len(batches) > 0:
table = pyarrow.Table.from_batches(batches)
pdf = table.to_pandas()
pdf = _check_dataframe_convert_date(pdf, self.schema)
return _check_dataframe_localize_timestamps(pdf, timezone)
else:
return pd.DataFrame.from_records([], columns=self.columns)
except Exception as e:
# We might have to allow fallback here as well but multiple Spark jobs can
# be executed. So, simply fail in this case for now.
msg = (
"toPandas attempted Arrow optimization because "
"'spark.sql.execution.arrow.enabled' is set to true, but has reached "
"the error below and can not continue. Note that "
"'spark.sql.execution.arrow.fallback.enabled' does not have an effect "
"on failures in the middle of computation.\n %s" % _exception_message(e))
warnings.warn(msg)
raise
# Below is toPandas without Arrow optimization.
pdf = pd.DataFrame.from_records(self.collect(), columns=self.columns)
dtype = {}
for field in self.schema:
pandas_type = _to_corrected_pandas_type(field.dataType)
# SPARK-21766: if an integer field is nullable and has null values, it can be
# inferred by pandas as float column. Once we convert the column with NaN back
# to integer type e.g., np.int16, we will hit exception. So we use the inferred
# float type, not the corrected type from the schema in this case.
if pandas_type is not None and \
not(isinstance(field.dataType, IntegralType) and field.nullable and
pdf[field.name].isnull().any()):
dtype[field.name] = pandas_type
for f, t in dtype.items():
pdf[f] = pdf[f].astype(t, copy=False)
if timezone is None:
return pdf
else:
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
for field in self.schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
pdf[field.name] = \
_check_series_convert_timestamps_local_tz(pdf[field.name], timezone)
return pdf
def _collectAsArrow(self):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
.. note:: Experimental.
"""
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
results = list(_load_from_socket(sock_info, ArrowCollectSerializer()))
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order]
##########################################################################################
# Pandas compatibility
##########################################################################################
groupby = copy_func(
groupBy,
sinceversion=1.4,
doc=":func:`groupby` is an alias for :func:`groupBy`.")
drop_duplicates = copy_func(
dropDuplicates,
sinceversion=1.4,
doc=":func:`drop_duplicates` is an alias for :func:`dropDuplicates`.")
where = copy_func(
filter,
sinceversion=1.3,
doc=":func:`where` is an alias for :func:`filter`.")
def _to_scala_map(sc, jm):
"""
Convert a dict into a JVM Map.
"""
return sc._jvm.PythonUtils.toScalaMap(jm)
def _to_corrected_pandas_type(dt):
"""
When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong.
This method gets the corrected data type for Pandas if that type may be inferred uncorrectly.
"""
import numpy as np
if type(dt) == ByteType:
return np.int8
elif type(dt) == ShortType:
return np.int16
elif type(dt) == IntegerType:
return np.int32
elif type(dt) == FloatType:
return np.float32
else:
return None
class DataFrameNaFunctions(object):
"""Functionality for working with missing data in :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def drop(self, how='any', thresh=None, subset=None):
return self.df.dropna(how=how, thresh=thresh, subset=subset)
drop.__doc__ = DataFrame.dropna.__doc__
def fill(self, value, subset=None):
return self.df.fillna(value=value, subset=subset)
fill.__doc__ = DataFrame.fillna.__doc__
def replace(self, to_replace, value=_NoValue, subset=None):
return self.df.replace(to_replace, value, subset)
replace.__doc__ = DataFrame.replace.__doc__
class DataFrameStatFunctions(object):
"""Functionality for statistic functions with :class:`DataFrame`.
.. versionadded:: 1.4
"""
def __init__(self, df):
self.df = df
def approxQuantile(self, col, probabilities, relativeError):
return self.df.approxQuantile(col, probabilities, relativeError)
approxQuantile.__doc__ = DataFrame.approxQuantile.__doc__
def corr(self, col1, col2, method=None):
return self.df.corr(col1, col2, method)
corr.__doc__ = DataFrame.corr.__doc__
def cov(self, col1, col2):
return self.df.cov(col1, col2)
cov.__doc__ = DataFrame.cov.__doc__
def crosstab(self, col1, col2):
return self.df.crosstab(col1, col2)
crosstab.__doc__ = DataFrame.crosstab.__doc__
def freqItems(self, cols, support=None):
return self.df.freqItems(cols, support)
freqItems.__doc__ = DataFrame.freqItems.__doc__
def sampleBy(self, col, fractions, seed=None):
return self.df.sampleBy(col, fractions, seed)
sampleBy.__doc__ = DataFrame.sampleBy.__doc__
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext, SparkSession
import pyspark.sql.dataframe
from pyspark.sql.functions import from_unixtime
globs = pyspark.sql.dataframe.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['spark'] = SparkSession(sc)
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF()
globs['df3'] = sc.parallelize([Row(name='Alice', age=2),
Row(name='Bob', age=5)]).toDF()
globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80),
Row(name='Bob', age=5, height=None),
Row(name='Tom', age=None, height=None),
Row(name=None, age=None, height=None)]).toDF()
globs['df5'] = sc.parallelize([Row(name='Alice', spy=False, age=10),
Row(name='Bob', spy=None, age=5),
Row(name='Mallory', spy=True, age=None)]).toDF()
globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846),
Row(name='Bob', time=1479442946)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.dataframe, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 7,354,053,555,749,424,000 | 37.908426 | 100 | 0.565298 | false |
lahosken/pants | contrib/android/src/python/pants/contrib/android/tasks/zipalign.py | 12 | 3407 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import subprocess
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.util.dirutil import safe_mkdir
from pants.contrib.android.targets.android_binary import AndroidBinary
from pants.contrib.android.tasks.android_task import AndroidTask
logger = logging.getLogger(__name__)
class Zipalign(AndroidTask):
"""Task to run zipalign, an archive alignment tool."""
@classmethod
def prepare(cls, options, round_manager):
super(Zipalign, cls).prepare(options, round_manager)
round_manager.require_data('release_apk')
@staticmethod
def is_zipaligntarget(target):
"""Determine whether the target is a candidate for the zipalign task."""
return isinstance(target, AndroidBinary)
def __init__(self, *args, **kwargs):
super(Zipalign, self).__init__(*args, **kwargs)
self._distdir = self.get_options().pants_distdir
def _render_args(self, package, target):
"""Create arg list for the zipalign process.
:param string package: Location of a signed apk product from the SignApk task.
:param AndroidBinary target: Target to be zipaligned.
"""
# Glossary of used zipalign flags:
# : '-f' is to force overwrite of existing outfile.
# : '4' is the mandated byte-alignment boundaries. If not 4, zipalign doesn't do anything.
# : Final two args are infile, outfile.
output_name = '{0}.signed.apk'.format(target.manifest.package_name)
outfile = os.path.join(self.zipalign_out(target), output_name)
args = [self.zipalign_binary(target), '-f', '4', package, outfile]
logger.debug('Executing: {0}'.format(' '.join(args)))
return args
def execute(self):
targets = self.context.targets(self.is_zipaligntarget)
for target in targets:
def get_products_path(target):
"""Get path of target's apks that are signed with release keystores by SignApk task."""
apks = self.context.products.get('release_apk')
packages = apks.get(target)
if packages:
for tgts, products in packages.items():
for prod in products:
yield os.path.join(tgts, prod)
packages = list(get_products_path(target))
for package in packages:
safe_mkdir(self.zipalign_out(target))
args = self._render_args(package, target)
with self.context.new_workunit(name='zipalign', labels=[WorkUnitLabel.MULTITOOL]) as workunit:
returncode = subprocess.call(args, stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if returncode:
raise TaskError('The zipalign process exited non-zero: {0}'.format(returncode))
def zipalign_binary(self, target):
"""Return the appropriate zipalign binary."""
zipalign_binary = os.path.join('build-tools', target.build_tools_version, 'zipalign')
return self.android_sdk.register_android_tool(zipalign_binary)
def zipalign_out(self, target):
"""Compute the outdir for the zipalign task."""
return os.path.join(self._distdir, target.name)
| apache-2.0 | 7,741,950,886,075,162,000 | 38.616279 | 102 | 0.687702 | false |
someboredkiddo/garcon | tests/fixtures/flows/example.py | 3 | 1046 | from __future__ import print_function
from garcon import activity
from garcon import runner
domain = 'dev'
name = 'workflow_name'
create = activity.create(domain, name)
activity_1 = create(
name='activity_1',
tasks=runner.Sync(
lambda activity, context:
print('activity_1')))
activity_2 = create(
name='activity_2',
requires=[activity_1],
tasks=runner.Async(
lambda activity, context:
print('activity_2_task_1'),
lambda activity, context:
print('activity_2_task_2')))
activity_3 = create(
name='activity_3',
requires=[activity_1],
tasks=runner.Sync(
lambda activity, context:
print('activity_3')))
activity_4 = create(
name='activity_4',
requires=[activity_3, activity_2],
tasks=runner.Sync(
lambda activity, context:
print('activity_4')))
def on_exception(actor, exception):
"""Handler for exceptions.
Useful if you use sentry or other similar systems.
"""
print(exception)
| mit | 861,737,106,363,308,300 | 21.255319 | 54 | 0.622371 | false |
neilLasrado/erpnext | erpnext/templates/pages/integrations/gocardless_checkout.py | 5 | 2805 | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt
import json
from erpnext.erpnext_integrations.doctype.gocardless_settings.gocardless_settings import gocardless_initialization, get_gateway_controller
from frappe.utils import get_url
no_cache = 1
expected_keys = ('amount', 'title', 'description', 'reference_doctype', 'reference_docname',
'payer_name', 'payer_email', 'order_id', 'currency')
def get_context(context):
context.no_cache = 1
# all these keys exist in form_dict
if not (set(expected_keys) - set(frappe.form_dict.keys())):
for key in expected_keys:
context[key] = frappe.form_dict[key]
context['amount'] = flt(context['amount'])
gateway_controller = get_gateway_controller(context.reference_docname)
context['header_img'] = frappe.db.get_value("GoCardless Settings", gateway_controller, "header_img")
else:
frappe.redirect_to_message(_('Some information is missing'),
_('Looks like someone sent you to an incomplete URL. Please ask them to look into it.'))
frappe.local.flags.redirect_location = frappe.local.response.location
raise frappe.Redirect
@frappe.whitelist(allow_guest=True)
def check_mandate(data, reference_doctype, reference_docname):
data = json.loads(data)
client = gocardless_initialization(reference_docname)
payer = frappe.get_doc("Customer", data["payer_name"])
if payer.customer_type == "Individual" and payer.customer_primary_contact is not None:
primary_contact = frappe.get_doc("Contact", payer.customer_primary_contact)
prefilled_customer = {
"company_name": payer.name,
"given_name": primary_contact.first_name,
}
if primary_contact.last_name is not None:
prefilled_customer.update({"family_name": primary_contact.last_name})
if primary_contact.email_id is not None:
prefilled_customer.update({"email": primary_contact.email_id})
else:
prefilled_customer.update({"email": frappe.session.user})
else:
prefilled_customer = {
"company_name": payer.name,
"email": frappe.session.user
}
success_url = get_url("./integrations/gocardless_confirmation?reference_doctype=" + reference_doctype + "&reference_docname=" + reference_docname)
try:
redirect_flow = client.redirect_flows.create(params={
"description": _("Pay {0} {1}".format(data['amount'], data['currency'])),
"session_token": frappe.session.user,
"success_redirect_url": success_url,
"prefilled_customer": prefilled_customer
})
return {"redirect_to": redirect_flow.redirect_url}
except Exception as e:
frappe.log_error(e, "GoCardless Payment Error")
return {"redirect_to": '/integrations/payment-failed'} | gpl-3.0 | 4,937,311,264,333,105,000 | 35.441558 | 147 | 0.729768 | false |
popazerty/obh-sh4 | lib/python/Tools/NumericalTextInput.py | 17 | 3773 | # -*- coding: UTF-8 -*-
from enigma import eTimer
from Components.Language import language
# Dict languageCode -> array of strings
MAP_SEARCH = (
u"%_0",
u" 1",
u"abc2",
u"def3",
u"ghi4",
u"jkl5",
u"mno6",
u"pqrs7",
u"tuv8",
u"wxyz9",
)
MAP_SEARCH_UPCASE = (
U"0%_",
U"1 ",
U"ABC2",
U"DEF3",
U"GHI4",
U"JKL5",
U"MNO6",
U"PQRS7",
U"TUV8",
U"WXYZ9",
)
MAP_DEFAULT = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abc2ABC",
u"def3DEF",
u"ghi4GHI",
u"jkl5JKL",
u"mno6MNO",
u"pqrs7PQRS",
u"tuv8TUV",
u"wxyz9WXYZ",
)
MAP_DE = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcä2ABCÄ",
u"def3DEF",
u"ghi4GHI",
u"jkl5JKL",
u"mnoö6MNOÖ",
u"pqrsß7PQRSß",
u"tuvü8TUVÜ",
u"wxyz9WXYZ",
)
MAP_ES = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcáà2ABCÁÀ",
u"deéèf3DEFÉÈ",
u"ghiíì4GHIÍÌ",
u"jkl5JKL",
u"mnñoóò6MNÑOÓÒ",
u"pqrs7PQRS",
u"tuvúù8TUVÚÙ",
u"wxyz9WXYZ",
)
MAP_SE = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcåä2ABCÅÄ",
u"defé3DEFÉ",
u"ghi4GHI",
u"jkl5JKL",
u"mnoö6MNOÖ",
u"pqrs7PQRS",
u"tuv8TUV",
u"wxyz9WXYZ",
)
MAP_CZ = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abc2áčABCÁČ",
u"def3ďéěDEFĎÉĚ",
u"ghi4íGHIÍ",
u"jkl5JKL",
u"mno6ňóMNOŇÓ",
u"pqrs7řšPQRSŘŠ",
u"tuv8ťúůTUVŤÚŮ",
u"wxyz9ýžWXYZÝŽ",
)
MAP_SK = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abc2áäčABCÁÄČ",
u"def3ďéěDEFĎÉĚ",
u"ghi4íGHIÍ",
u"jkl5ľĺJKLĽĹ",
u"mno6ňóöôMNOŇÓÖÔ",
u"pqrs7řŕšPQRSŘŔŠ",
u"tuv8ťúůüTUVŤÚŮÜ",
u"wxyz9ýžWXYZÝŽ",
)
MAP_PL = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abcąć2ABCĄĆ",
u"defę3DEFĘ",
u"ghi4GHI",
u"jklł5JKLŁ",
u"mnońó6MNOŃÓ",
u"pqrsś7PQRSŚ",
u"tuv8TUV",
u"wxyzźż9WXYZŹŻ",
)
MAP_RU = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abcабвг2ABCАБВГ",
u"defдежз3DEFДЕЖЗ",
u"ghiийкл4GHIИЙКЛ",
u"jklмноп5JKLМНОП",
u"mnoрсту6MNOРСТУ",
u"pqrsфхцч7PQRSФХЦЧ",
u"tuvшщьы8TUVШЩЬЫ",
u"wxyzъэюя9WXYZЪЭЮЯ",
)
MAP_LV = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"aābcč2AĀBCČ",
u"deēf3DEĒF",
u"gģhiī4GĢHIĪ",
u"jkķlļ5JKĶLĻ",
u"mnņo6MNŅO",
u"pqrsš7PQRSŠ",
u"tuūv8TUŪV",
u"wxyzž9WXYZŽ",
)
MAPPINGS = {
'de_DE': MAP_DE,
'es_ES': MAP_ES,
'sv_SE': MAP_SE,
'fi_FI': MAP_SE,
'cs_CZ': MAP_CZ,
'sk_SK': MAP_SK,
'pl_PL': MAP_PL,
'ru_RU': MAP_RU,
'lv_LV': MAP_LV,
}
class NumericalTextInput:
def __init__(self, nextFunc=None, handleTimeout = True, search = False, mapping = None):
self.useableChars=None
self.nextFunction=nextFunc
if handleTimeout:
self.timer = eTimer()
self.timer.callback.append(self.timeout)
else:
self.timer = None
self.lastKey = -1
self.pos = -1
if mapping is not None:
self.mapping = mapping
elif search:
self.mapping = MAP_SEARCH
else:
self.mapping = MAPPINGS.get(language.getLanguage(), MAP_DEFAULT)
def setUseableChars(self, useable):
self.useableChars = unicode(useable)
def getKey(self, num):
cnt=0
if self.lastKey != num:
if self.lastKey != -1:
self.nextChar()
self.lastKey = num
self.pos = -1
if self.timer is not None:
self.timer.start(1000, True)
while True:
self.pos += 1
if len(self.mapping[num]) <= self.pos:
self.pos = 0
if self.useableChars:
pos = self.useableChars.find(self.mapping[num][self.pos])
if pos == -1:
cnt += 1
if cnt < len(self.mapping[num]):
continue
else:
return None
break
return self.mapping[num][self.pos]
def nextKey(self):
if self.timer is not None:
self.timer.stop()
self.lastKey = -1
def nextChar(self):
self.nextKey()
if self.nextFunction:
self.nextFunction()
def timeout(self):
if self.lastKey != -1:
self.nextChar()
| gpl-2.0 | 4,386,939,903,795,665,000 | 16.183575 | 89 | 0.593478 | false |
orderup/open-data-science | postgres2redshift/p2r_add_sortkeys.py | 4 | 3814 | #!/usr/bin/python
__author__ = 'Vlad Dubovskiy, DonorsChoose.org'
# Takes raw or cleaned up schema, infers sortkeys from all constraints, assigns manual sortkeys and inferred sortkeys to tables, exports as a new schema file.
# Usage: python p2r_add_sortkeys.py -i db_schema_clean.sql -o db_schema_final.sql
import re
import argparse
########################################
# Setup Globals
########################################
# They are passed through command line arguments. Keeping here for reference
#input_filename = 'db_schema_clean.sql'
#output_filename = 'db_schema_final.sql'
#raw_schema_flag = False # we cleaned up the file with some regex in p2r_main.sh before sending it to python.
# You might have to do the same, as this flag doesn't handle all cases (ex: CHECK constraints will stay in the final file, which you don't want)
########################################
# Define custom sorkeys
########################################
# These are sortkeys added on top of existent constraints in the database. It's a good practice to add sortkeys to columns by which you filter a lot, like dates
# By default, no manualkeys are set up. Format: manualkeys = {'table1': 'col1', 'table1': 'col2', 'table2': 'col1'}
manualkeys = {}
def add_sortkeys(input_filename, output_filename, raw_schema_flag=False):
with open(input_filename) as input_file:
lines = input_file.readlines()
text = ' '.join(lines)
# Building automatic table:sortkey dict from existent constraints
alter_pattern = r'ALTER TABLE[^;]*(?=PRIMARY KEY|FOREIGN KEY|UNIQUE).*'
alter_statements = re.findall(alter_pattern, text)
autokeys={}
for con in alter_statements:
line = con.split('\n')
if raw_schema_flag is True:
t = re.findall(r'ALTER TABLE ONLY(.*?)$', line[0])
else: t = re.findall(r'ALTER TABLE(.*?)$', line[0])
c = re.findall(r'FOREIGN KEY \((.*?)\)|PRIMARY KEY \((.*?)\)|UNIQUE \((.*?)\)', line[1])
# remove tuples, empties created by regex trying to match all alternatives
vals = []
for x in list(c[0]):
if len(x) > 0:
vals.append(x)
# check if a table was already added to the dict
table = t[0].strip()
if table not in autokeys:
autokeys[table] = []
autokeys[table].append(','.join(vals))
else:
autokeys[table].append(','.join(vals))
# remove duplicate and merged values from autokeys for each table
for key, value in autokeys.iteritems():
autokeys[key] = list(set([x.strip() for x in ','.join(value).split(',')]))
# append manual keys to autokeys before populating tables with sortkeys:
for key, value in manualkeys.iteritems():
autokeys[key].append(value)
# Let's keep all the tables that are not needing the sortkeys
# Build a list of all tables in the schema and then ensure they are being printed out in the for loop
table_name = r'CREATE TABLE(.*?)\('
# remove any whitespaces
tables = [t.strip() for t in re.findall(table_name, text)]
output_file = open(output_filename, 'w')
# loop through all tables
for table in tables:
pattern = '(CREATE TABLE '+table+'\s[^;]*)'
# if it needs a sortkey
if table in autokeys.keys():
columns = autokeys[table]
match = re.findall(pattern, text)
block = re.sub(pattern, match[0]+' sortkey(%s)'+';', match[0]) %(', '.join(columns))
output_file.write('\n\n %s' % block)
else:
output_file.write('\n\n %s' % re.findall(pattern, text)[0]+';')
output_file.write('\n\n')
input_file.close()
output_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This script adds sortkeys to postgres schema dump')
parser.add_argument('-i','--input', help='Input file name', required=True)
parser.add_argument('-o','--output',help='Output file name', required=True)
args = parser.parse_args()
add_sortkeys(args.input, args.output)
| gpl-2.0 | -6,626,431,401,281,253,000 | 39.147368 | 160 | 0.659675 | false |
rhyolight/nupic.son | app/soc/mapreduce/cache_list_items.py | 1 | 1641 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce to cache datastore entities for lists."""
import json
import pickle
from google.appengine.ext import ndb
from mapreduce import context
def mapProcess(entity):
# TODO: (Aruna) Fix this import
from melange.utils import lists
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
list_id = params['list_id']
col_funcs = [(c.col_id, c.getValue) for c in lists.getList(list_id).columns]
query_pickle = params['query_pickle']
query = pickle.loads(query_pickle)
data_id = lists.getDataId(query)
if(query.filter('__key__', entity.key()).get()):
item = json.dumps(lists.toListItemDict(entity, col_funcs))
yield (data_id, item)
def reduceProcess(data_id, entities):
# TODO: (Aruna) Fix these import
from melange.logic import cached_list
from melange.utils import lists
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
list_id = params['list_id']
ndb.transaction(lambda: cached_list.setCacheItems(
data_id, map(json.loads, entities), lists.getList(list_id).valid_period))
| apache-2.0 | 4,808,358,405,876,599,000 | 28.303571 | 79 | 0.729433 | false |
Abrackadabra/CloudBot | plugins/dramatica.py | 33 | 1332 | import re
from urllib import parse
from lxml import html
import requests
from cloudbot import hook
from cloudbot.util import formatting
api_url = "http://encyclopediadramatica.se/api.php"
ed_url = "http://encyclopediadramatica.se/"
@hook.command()
def drama(text):
"""<phrase> - gets the first paragraph of the Encyclopedia Dramatica article on <phrase>"""
search_response = requests.get(api_url, params={"action": "opensearch", "search": text})
if search_response.status_code != requests.codes.ok:
return "Error searching: {}".format(search_response.status_code)
data = search_response.json()
if not data[1]:
return "No results found."
article_name = data[1][0].replace(' ', '_')
url = ed_url + parse.quote(article_name, '')
page_response = requests.get(url)
if page_response.status_code != requests.codes.ok:
return "Error getting page: {}".format(page_response.status_code)
page = html.fromstring(page_response.text)
for p in page.xpath('//div[@id="bodyContent"]/p'):
if p.text_content():
summary = " ".join(p.text_content().splitlines())
summary = re.sub("\[\d+\]", "", summary)
summary = formatting.truncate(summary, 220)
return "{} - {}".format(summary, url)
return "Unknown Error."
| gpl-3.0 | 4,564,413,650,858,513,400 | 28.6 | 95 | 0.644895 | false |
sysbot/CouchPotatoServer | couchpotato/core/providers/torrent/ilovetorrents/__init__.py | 5 | 1983 | from .main import ILoveTorrents
def start():
return ILoveTorrents()
config = [{
'name': 'ilovetorrents',
'groups': [
{
'tab': 'searcher',
'list': 'torrent_providers',
'name': 'ILoveTorrents',
'description': 'Where the Love of Torrents is Born',
'wizard': True,
'options': [
{
'name': 'enabled',
'type': 'enabler',
'default': False
},
{
'name': 'username',
'label': 'Username',
'type': 'string',
'default': '',
'description': 'The user name for your ILT account',
},
{
'name': 'password',
'label': 'Password',
'type': 'password',
'default': '',
'description': 'The password for your ILT account.',
},
{
'name': 'seed_ratio',
'label': 'Seed ratio',
'type': 'float',
'default': 1,
'description': 'Will not be (re)moved until this seed ratio is met.',
},
{
'name': 'seed_time',
'label': 'Seed time',
'type': 'int',
'default': 40,
'description': 'Will not be (re)moved until this seed time (in hours) is met.',
},
{
'name': 'extra_score',
'advanced': True,
'label': 'Extra Score',
'type': 'int',
'default': 0,
'description': 'Starting score for each release found via this provider.',
}
],
}
]
}]
| gpl-3.0 | 605,895,064,743,644,000 | 31.508197 | 99 | 0.347958 | false |
Fusion-Rom/android_external_chromium_org | tools/memory_inspector/memory_inspector/frontends/www_server.py | 35 | 28613 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module implements a simple WSGI server for the memory_inspector Web UI.
The WSGI server essentially handles two kinds of requests:
- /ajax/foo/bar: The AJAX endpoints which exchange JSON data with the JS.
Requests routing is achieved using a simple @uri decorator which simply
performs regex matching on the request path.
- /static/content: Anything not matching the /ajax/ prefix is treated as a
static content request (for serving the index.html and JS/CSS resources).
The following HTTP status code are returned by the server:
- 200 - OK: The request was handled correctly.
- 404 - Not found: None of the defined handlers did match the /request/path.
- 410 - Gone: The path was matched but the handler returned an empty response.
This typically happens when the target device is disconnected.
"""
import cgi
import collections
import datetime
import dateutil.parser
import glob
import json
import memory_inspector
import mimetypes
import os
import posixpath
import re
import urlparse
import uuid
import wsgiref.simple_server
from memory_inspector import constants
from memory_inspector.core import backends
from memory_inspector.core import memory_map
from memory_inspector.classification import mmap_classifier
from memory_inspector.classification import native_heap_classifier
from memory_inspector.data import serialization
from memory_inspector.data import file_storage
from memory_inspector.frontends import background_tasks
_HTTP_OK = '200 - OK'
_HTTP_GONE = '410 - Gone'
_HTTP_NOT_FOUND = '404 - Not Found'
_PERSISTENT_STORAGE_PATH = os.path.join(
os.path.expanduser('~'), '.config', 'memory_inspector')
_CONTENT_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'www_content'))
_APP_PROCESS_RE = r'^[\w.:]+$' # Regex for matching app processes.
_STATS_HIST_SIZE = 120 # Keep at most 120 samples of stats per process.
_CACHE_LEN = 10 # Max length of |_cached_objs|.
# |_cached_objs| keeps the state of short-lived objects that the client needs to
# _cached_objs subsequent AJAX calls.
_cached_objs = collections.OrderedDict()
_persistent_storage = file_storage.Storage(_PERSISTENT_STORAGE_PATH)
_proc_stats_history = {} # /Android/device/PID -> deque([stats@T=0, stats@T=1])
class UriHandler(object):
"""Base decorator used to automatically route /requests/by/path.
Each handler is called with the following args:
args: a tuple of the matching regex groups.
req_vars: a dictionary of request args (querystring for GET, body for POST).
Each handler must return a tuple with the following elements:
http_code: a string with the HTTP status code (e.g., '200 - OK')
headers: a list of HTTP headers (e.g., [('Content-Type': 'foo/bar')])
body: the HTTP response body.
"""
_handlers = []
def __init__(self, path_regex, verb='GET', output_filter=None):
self._path_regex = path_regex
self._verb = verb
default_output_filter = lambda *x: x # Just return the same args unchanged.
self._output_filter = output_filter or default_output_filter
def __call__(self, handler):
UriHandler._handlers += [(
self._verb, self._path_regex, self._output_filter, handler)]
@staticmethod
def Handle(method, path, req_vars):
"""Finds a matching handler and calls it (or returns a 404 - Not Found)."""
for (match_method, path_regex, output_filter, fn) in UriHandler._handlers:
if method != match_method:
continue
m = re.match(path_regex, path)
if not m:
continue
(http_code, headers, body) = fn(m.groups(), req_vars)
return output_filter(http_code, headers, body)
return (_HTTP_NOT_FOUND, [], 'No AJAX handlers found')
class AjaxHandler(UriHandler):
"""Decorator for routing AJAX requests.
This decorator essentially groups the JSON serialization and the cache headers
which is shared by most of the handlers defined below.
"""
def __init__(self, path_regex, verb='GET'):
super(AjaxHandler, self).__init__(
path_regex, verb, AjaxHandler.AjaxOutputFilter)
@staticmethod
def AjaxOutputFilter(http_code, headers, body):
serialized_content = json.dumps(body, cls=serialization.Encoder)
extra_headers = [('Cache-Control', 'no-cache'),
('Expires', 'Fri, 19 Sep 1986 05:00:00 GMT')]
return http_code, headers + extra_headers, serialized_content
@AjaxHandler('/ajax/backends')
def _ListBackends(args, req_vars): # pylint: disable=W0613
return _HTTP_OK, [], [backend.name for backend in backends.ListBackends()]
@AjaxHandler('/ajax/devices')
def _ListDevices(args, req_vars): # pylint: disable=W0613
resp = []
for device in backends.ListDevices():
# The device settings must loaded at discovery time (i.e. here), not during
# startup, because it might have been plugged later.
for k, v in _persistent_storage.LoadSettings(device.id).iteritems():
device.settings[k] = v
resp += [{'backend': device.backend.name,
'id': device.id,
'name': device.name}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/dump/mmap/(\w+)/(\w+)/(\d+)')
def _DumpMmapsForProcess(args, req_vars): # pylint: disable=W0613
"""Dumps memory maps for a process.
The response is formatted according to the Google Charts DataTable format.
"""
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found or process died'
mmap = process.DumpMemoryMaps()
table = _ConvertMmapToGTable(mmap)
# Store the dump in the cache. The client might need it later for profiling.
cache_id = _CacheObject(mmap)
return _HTTP_OK, [], {'table': table, 'id': cache_id}
@AjaxHandler('/ajax/initialize/(\w+)/(\w+)$', 'POST')
def _InitializeDevice(args, req_vars): # pylint: disable=W0613
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
device.Initialize()
if req_vars['enableNativeTracing']:
device.EnableNativeTracing(True)
return _HTTP_OK, [], {
'isNativeTracingEnabled': device.IsNativeTracingEnabled()}
@AjaxHandler(r'/ajax/profile/create', 'POST')
def _CreateProfile(args, req_vars): # pylint: disable=W0613
"""Creates (and caches) a profile from a set of dumps.
The profiling data can be retrieved afterwards using the /profile/{PROFILE_ID}
endpoints (below).
"""
classifier = None # A classifier module (/classification/*_classifier.py).
dumps = {} # dump-time -> obj. to classify (e.g., |memory_map.Map|).
for arg in 'type', 'source', 'ruleset':
assert(arg in req_vars), 'Expecting %s argument in POST data' % arg
# Step 1: collect the memory dumps, according to what the client specified in
# the 'type' and 'source' POST arguments.
# Case 1a: The client requests to load data from an archive.
if req_vars['source'] == 'archive':
archive = _persistent_storage.OpenArchive(req_vars['archive'])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
first_timestamp = None
for timestamp_str in req_vars['snapshots']:
timestamp = dateutil.parser.parse(timestamp_str)
first_timestamp = first_timestamp or timestamp
time_delta = int((timestamp - first_timestamp).total_seconds())
if req_vars['type'] == 'mmap':
dumps[time_delta] = archive.LoadMemMaps(timestamp)
elif req_vars['type'] == 'nheap':
dumps[time_delta] = archive.LoadNativeHeap(timestamp)
# Case 1b: Use a dump recently cached (only mmap, via _DumpMmapsForProcess).
elif req_vars['source'] == 'cache':
assert(req_vars['type'] == 'mmap'), 'Only cached mmap dumps are supported.'
dumps[0] = _GetCacheObject(req_vars['id'])
if not dumps:
return _HTTP_GONE, [], 'No memory dumps could be retrieved'
# Initialize the classifier (mmap or nheap) and prepare symbols for nheap.
if req_vars['type'] == 'mmap':
classifier = mmap_classifier
elif req_vars['type'] == 'nheap':
classifier = native_heap_classifier
if not archive.HasSymbols():
return _HTTP_GONE, [], 'No symbols in archive %s' % req_vars['archive']
symbols = archive.LoadSymbols()
for nheap in dumps.itervalues():
nheap.SymbolizeUsingSymbolDB(symbols)
if not classifier:
return _HTTP_GONE, [], 'Classifier %s not supported.' % req_vars['type']
# Step 2: Load the rule-set specified by the client in the 'ruleset' POST arg.
if req_vars['ruleset'] == 'heuristic':
assert(req_vars['type'] == 'nheap'), (
'heuristic rules are supported only for nheap')
rules = native_heap_classifier.InferHeuristicRulesFromHeap(dumps[0])
else:
rules_path = os.path.join(constants.CLASSIFICATION_RULES_PATH,
req_vars['ruleset'])
if not os.path.isfile(rules_path):
return _HTTP_GONE, [], 'Cannot find the rule-set %s' % rules_path
with open(rules_path) as f:
rules = classifier.LoadRules(f.read())
# Step 3: Aggregate the dump data using the classifier and generate the
# profile data (which will be kept cached here in the server).
# The resulting profile will consist of 1+ snapshots (depending on the number
# dumps the client has requested to process) and a number of 1+ metrics
# (depending on the buckets' keys returned by the classifier).
# Converts the {time: dump_obj} dict into a {time: |AggregatedResult|} dict.
# using the classifier.
snapshots = collections.OrderedDict((time, classifier.Classify(dump, rules))
for time, dump in sorted(dumps.iteritems()))
# Add the profile to the cache (and eventually discard old items).
# |profile_id| is the key that the client will use in subsequent requests
# (to the /ajax/profile/{ID}/ endpoints) to refer to this particular profile.
profile_id = _CacheObject(snapshots)
first_snapshot = next(snapshots.itervalues())
return _HTTP_OK, [], {'id': profile_id,
'times': snapshots.keys(),
'metrics': first_snapshot.keys,
'rootBucket': first_snapshot.total.name + '/'}
@AjaxHandler(r'/ajax/profile/(\w+)/tree/(\d+)/(\d+)')
def _GetProfileTreeDataForSnapshot(args, req_vars): # pylint: disable=W0613
"""Gets the data for the tree chart for a given time and metric.
The response is formatted according to the Google Charts DataTable format.
"""
snapshot_id = args[0]
metric_index = int(args[1])
time = int(args[2])
snapshots = _GetCacheObject(snapshot_id)
if not snapshots:
return _HTTP_GONE, [], 'Cannot find the selected profile.'
if time not in snapshots:
return _HTTP_GONE, [], 'Cannot find snapshot at T=%d.' % time
snapshot = snapshots[time]
if metric_index >= len(snapshot.keys):
return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index
resp = {'cols': [{'label': 'bucket', 'type': 'string'},
{'label': 'parent', 'type': 'string'}],
'rows': []}
def VisitBucketAndAddRows(bucket, parent_id=''):
"""Recursively creates the (node, parent) visiting |ResultTree| in DFS."""
node_id = parent_id + bucket.name + '/'
node_label = '<dl><dt>%s</dt><dd>%s</dd></dl>' % (
bucket.name, _StrMem(bucket.values[metric_index]))
resp['rows'] += [{'c': [
{'v': node_id, 'f': node_label},
{'v': parent_id, 'f': None},
]}]
for child in bucket.children:
VisitBucketAndAddRows(child, node_id)
VisitBucketAndAddRows(snapshot.total)
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/profile/(\w+)/time_serie/(\d+)/(.*)$')
def _GetTimeSerieForSnapshot(args, req_vars): # pylint: disable=W0613
"""Gets the data for the area chart for a given metric and bucket.
The response is formatted according to the Google Charts DataTable format.
"""
snapshot_id = args[0]
metric_index = int(args[1])
bucket_path = args[2]
snapshots = _GetCacheObject(snapshot_id)
if not snapshots:
return _HTTP_GONE, [], 'Cannot find the selected profile.'
if metric_index >= len(next(snapshots.itervalues()).keys):
return _HTTP_GONE, [], 'Invalid metric id %d' % metric_index
def FindBucketByPath(bucket, path, parent_path=''): # Essentially a DFS.
cur_path = parent_path + bucket.name + '/'
if cur_path == path:
return bucket
for child in bucket.children:
res = FindBucketByPath(child, path, cur_path)
if res:
return res
return None
# The resulting data table will look like this (assuming len(metrics) == 2):
# Time Ashmem Dalvik Other
# 0 (1024,0) (4096,1024) (0,0)
# 30 (512,512) (1024,1024) (0,512)
# 60 (0,512) (1024,0) (512,0)
resp = {'cols': [], 'rows': []}
for time, aggregated_result in snapshots.iteritems():
bucket = FindBucketByPath(aggregated_result.total, bucket_path)
if not bucket:
return _HTTP_GONE, [], 'Bucket %s not found' % bucket_path
# If the user selected a non-leaf bucket, display the breakdown of its
# direct children. Otherwise just the leaf bucket.
children_buckets = bucket.children if bucket.children else [bucket]
# Create the columns (form the buckets) when processing the first snapshot.
if not resp['cols']:
resp['cols'] += [{'label': 'Time', 'type': 'string'}]
for child_bucket in children_buckets:
resp['cols'] += [{'label': child_bucket.name, 'type': 'number'}]
row = [{'v': str(time), 'f': None}]
for child_bucket in children_buckets:
row += [{'v': child_bucket.values[metric_index] / 1024, 'f': None}]
resp['rows'] += [{'c': row}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/profile/rules')
def _ListProfilingRules(args, req_vars): # pylint: disable=W0613
"""Lists the classification rule files available for profiling."""
rules = glob.glob(constants.CLASSIFICATION_RULES_PATH +
os.sep + '*' + os.sep + '*.py')
rules = [x.replace(constants.CLASSIFICATION_RULES_PATH, '')[1:] # Strip /.
for x in rules]
resp = {'mmap': filter(lambda x: 'mmap-' in x, rules),
'nheap': filter(lambda x: 'nheap-' in x, rules)}
resp['nheap'].insert(0, 'heuristic')
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/ps/(\w+)/(\w+)$') # /ajax/ps/Android/a0b1c2[?all=1]
def _ListProcesses(args, req_vars): # pylint: disable=W0613
"""Lists processes and their CPU / mem stats.
The response is formatted according to the Google Charts DataTable format.
"""
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
resp = {
'cols': [
{'label': 'Pid', 'type':'number'},
{'label': 'Name', 'type':'string'},
{'label': 'Cpu %', 'type':'number'},
{'label': 'Mem RSS Kb', 'type':'number'},
{'label': '# Threads', 'type':'number'},
],
'rows': []}
for process in device.ListProcesses():
# Exclude system apps if the request didn't contain the ?all=1 arg.
if not req_vars.get('all') and not re.match(_APP_PROCESS_RE, process.name):
continue
stats = process.GetStats()
resp['rows'] += [{'c': [
{'v': process.pid, 'f': None},
{'v': process.name, 'f': None},
{'v': stats.cpu_usage, 'f': None},
{'v': stats.vm_rss, 'f': None},
{'v': stats.threads, 'f': None},
]}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)$') # /ajax/stats/Android/a0b1c2
def _GetDeviceStats(args, req_vars): # pylint: disable=W0613
"""Lists device CPU / mem stats.
The response is formatted according to the Google Charts DataTable format.
"""
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
device_stats = device.GetStats()
cpu_stats = {
'cols': [
{'label': 'CPU', 'type':'string'},
{'label': 'Usr %', 'type':'number'},
{'label': 'Sys %', 'type':'number'},
{'label': 'Idle %', 'type':'number'},
],
'rows': []}
for cpu_idx in xrange(len(device_stats.cpu_times)):
cpu = device_stats.cpu_times[cpu_idx]
cpu_stats['rows'] += [{'c': [
{'v': '# %d' % cpu_idx, 'f': None},
{'v': cpu['usr'], 'f': None},
{'v': cpu['sys'], 'f': None},
{'v': cpu['idle'], 'f': None},
]}]
mem_stats = {
'cols': [
{'label': 'Section', 'type':'string'},
{'label': 'MB', 'type':'number', 'pattern': ''},
],
'rows': []}
for key, value in device_stats.memory_stats.iteritems():
mem_stats['rows'] += [{'c': [
{'v': key, 'f': None},
{'v': value / 1024, 'f': None}
]}]
return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats}
@AjaxHandler(r'/ajax/stats/(\w+)/(\w+)/(\d+)$') # /ajax/stats/Android/a0b1c2/42
def _GetProcessStats(args, req_vars): # pylint: disable=W0613
"""Lists CPU / mem stats for a given process (and keeps history).
The response is formatted according to the Google Charts DataTable format.
"""
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found'
proc_uri = '/'.join(args)
cur_stats = process.GetStats()
if proc_uri not in _proc_stats_history:
_proc_stats_history[proc_uri] = collections.deque(maxlen=_STATS_HIST_SIZE)
history = _proc_stats_history[proc_uri]
history.append(cur_stats)
cpu_stats = {
'cols': [
{'label': 'T', 'type':'string'},
{'label': 'CPU %', 'type':'number'},
{'label': '# Threads', 'type':'number'},
],
'rows': []
}
mem_stats = {
'cols': [
{'label': 'T', 'type':'string'},
{'label': 'Mem RSS Kb', 'type':'number'},
{'label': 'Page faults', 'type':'number'},
],
'rows': []
}
for stats in history:
cpu_stats['rows'] += [{'c': [
{'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None},
{'v': stats.cpu_usage, 'f': None},
{'v': stats.threads, 'f': None},
]}]
mem_stats['rows'] += [{'c': [
{'v': str(datetime.timedelta(seconds=stats.run_time)), 'f': None},
{'v': stats.vm_rss, 'f': None},
{'v': stats.page_faults, 'f': None},
]}]
return _HTTP_OK, [], {'cpu': cpu_stats, 'mem': mem_stats}
@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$') # /ajax/settings/Android[/id]
def _GetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613
backend = backends.GetBackend(args[0])
if not backend:
return _HTTP_GONE, [], 'Backend not found'
if args[1]:
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
settings = device.settings
else:
settings = backend.settings
assert(isinstance(settings, backends.Settings))
resp = {}
for key in settings.expected_keys:
resp[key] = {'description': settings.expected_keys[key],
'value': settings.values[key]}
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/settings/(\w+)/?(\w+)?$', 'POST')
def _SetDeviceOrBackendSettings(args, req_vars): # pylint: disable=W0613
backend = backends.GetBackend(args[0])
if not backend:
return _HTTP_GONE, [], 'Backend not found'
if args[1]:
device = _GetDevice(args)
if not device:
return _HTTP_GONE, [], 'Device not found'
settings = device.settings
storage_name = device.id
else:
settings = backend.settings
storage_name = backend.name
for key in req_vars.iterkeys():
settings[key] = req_vars[key]
_persistent_storage.StoreSettings(storage_name, settings.values)
return _HTTP_OK, [], ''
@AjaxHandler(r'/ajax/storage/list')
def _ListStorage(args, req_vars): # pylint: disable=W0613
resp = {
'cols': [
{'label': 'Archive', 'type':'string'},
{'label': 'Snapshot', 'type':'string'},
{'label': 'Mem maps', 'type':'boolean'},
{'label': 'N. Heap', 'type':'boolean'},
],
'rows': []}
for archive_name in _persistent_storage.ListArchives():
archive = _persistent_storage.OpenArchive(archive_name)
first_timestamp = None
for timestamp in archive.ListSnapshots():
first_timestamp = timestamp if not first_timestamp else first_timestamp
time_delta = '%d s.' % (timestamp - first_timestamp).total_seconds()
resp['rows'] += [{'c': [
{'v': archive_name, 'f': None},
{'v': timestamp.isoformat(), 'f': time_delta},
{'v': archive.HasMemMaps(timestamp), 'f': None},
{'v': archive.HasNativeHeap(timestamp), 'f': None},
]}]
return _HTTP_OK, [], resp
@AjaxHandler(r'/ajax/storage/(.+)/(.+)/mmaps')
def _LoadMmapsFromStorage(args, req_vars): # pylint: disable=W0613
archive = _persistent_storage.OpenArchive(args[0])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
timestamp = dateutil.parser.parse(args[1])
if not archive.HasMemMaps(timestamp):
return _HTTP_GONE, [], 'No mmaps for snapshot %s' % timestamp
mmap = archive.LoadMemMaps(timestamp)
return _HTTP_OK, [], {'table': _ConvertMmapToGTable(mmap)}
@AjaxHandler(r'/ajax/storage/(.+)/(.+)/nheap')
def _LoadNheapFromStorage(args, req_vars):
"""Returns a Google Charts DataTable dictionary for the nheap."""
archive = _persistent_storage.OpenArchive(args[0])
if not archive:
return _HTTP_GONE, [], 'Cannot open archive %s' % req_vars['archive']
timestamp = dateutil.parser.parse(args[1])
if not archive.HasNativeHeap(timestamp):
return _HTTP_GONE, [], 'No native heap dump for snapshot %s' % timestamp
nheap = archive.LoadNativeHeap(timestamp)
symbols = archive.LoadSymbols()
nheap.SymbolizeUsingSymbolDB(symbols)
resp = {
'cols': [
{'label': 'Allocated', 'type':'number'},
{'label': 'Resident', 'type':'number'},
{'label': 'Flags', 'type':'number'},
{'label': 'Stack Trace', 'type':'string'},
],
'rows': []}
for alloc in nheap.allocations:
strace = '<dl>'
for frame in alloc.stack_trace.frames:
# Use the fallback libname.so+0xaddr if symbol info is not available.
symbol_name = frame.symbol.name if frame.symbol else '??'
source_info = (str(frame.symbol.source_info[0]) if
frame.symbol and frame.symbol.source_info else frame.raw_address)
strace += '<dd title="%s">%s</dd><dt>%s</dt>' % (
cgi.escape(source_info),
cgi.escape(posixpath.basename(source_info)),
cgi.escape(symbol_name))
strace += '</dl>'
resp['rows'] += [{'c': [
{'v': alloc.size, 'f': _StrMem(alloc.size)},
{'v': alloc.resident_size, 'f': _StrMem(alloc.resident_size)},
{'v': alloc.flags, 'f': None},
{'v': strace, 'f': None},
]}]
return _HTTP_OK, [], resp
# /ajax/tracer/start/Android/device-id/pid
@AjaxHandler(r'/ajax/tracer/start/(\w+)/(\w+)/(\d+)', 'POST')
def _StartTracer(args, req_vars):
for arg in 'interval', 'count', 'traceNativeHeap':
assert(arg in req_vars), 'Expecting %s argument in POST data' % arg
process = _GetProcess(args)
if not process:
return _HTTP_GONE, [], 'Device not found or process died'
task_id = background_tasks.StartTracer(
storage_path=_PERSISTENT_STORAGE_PATH,
process=process,
interval=int(req_vars['interval']),
count=int(req_vars['count']),
trace_native_heap=req_vars['traceNativeHeap'])
return _HTTP_OK, [], task_id
@AjaxHandler(r'/ajax/tracer/status/(\d+)') # /ajax/tracer/status/{task_id}
def _GetTracerStatus(args, req_vars): # pylint: disable=W0613
task = background_tasks.Get(int(args[0]))
if not task:
return _HTTP_GONE, [], 'Task not found'
return _HTTP_OK, [], task.GetProgress()
@UriHandler(r'^(?!/ajax)/(.*)$')
def _StaticContent(args, req_vars): # pylint: disable=W0613
# Give the browser a 1-day TTL cache to minimize the start-up time.
cache_headers = [('Cache-Control', 'max-age=86400, public')]
req_path = args[0] if args[0] else 'index.html'
file_path = os.path.abspath(os.path.join(_CONTENT_DIR, req_path))
if (os.path.isfile(file_path) and
os.path.commonprefix([file_path, _CONTENT_DIR]) == _CONTENT_DIR):
mtype = 'text/plain'
guessed_mime = mimetypes.guess_type(file_path)
if guessed_mime and guessed_mime[0]:
mtype = guessed_mime[0]
with open(file_path, 'rb') as f:
body = f.read()
return _HTTP_OK, cache_headers + [('Content-Type', mtype)], body
return _HTTP_NOT_FOUND, cache_headers, file_path + ' not found'
def _GetDevice(args):
"""Returns a |backends.Device| instance from a /backend/device URI."""
assert(len(args) >= 2), 'Malformed request. Expecting /backend/device'
return backends.GetDevice(backend_name=args[0], device_id=args[1])
def _GetProcess(args):
"""Returns a |backends.Process| instance from a /backend/device/pid URI."""
assert(len(args) >= 3 and args[2].isdigit()), (
'Malformed request. Expecting /backend/device/pid')
device = _GetDevice(args)
if not device:
return None
return device.GetProcess(int(args[2]))
def _ConvertMmapToGTable(mmap):
"""Returns a Google Charts DataTable dictionary for the given mmap."""
assert(isinstance(mmap, memory_map.Map))
table = {
'cols': [
{'label': 'Start', 'type':'string'},
{'label': 'End', 'type':'string'},
{'label': 'Length Kb', 'type':'number'},
{'label': 'Prot', 'type':'string'},
{'label': 'RSS Kb', 'type':'number'},
{'label': 'Priv. Dirty Kb', 'type':'number'},
{'label': 'Priv. Clean Kb', 'type':'number'},
{'label': 'Shared Dirty Kb', 'type':'number'},
{'label': 'Shared Clean Kb', 'type':'number'},
{'label': 'File', 'type':'string'},
{'label': 'Offset', 'type':'number'},
{'label': 'Resident Pages', 'type':'string'},
],
'rows': []}
for entry in mmap.entries:
table['rows'] += [{'c': [
{'v': '%08x' % entry.start, 'f': None},
{'v': '%08x' % entry.end, 'f': None},
{'v': entry.len / 1024, 'f': None},
{'v': entry.prot_flags, 'f': None},
{'v': entry.rss_bytes / 1024, 'f': None},
{'v': entry.priv_dirty_bytes / 1024, 'f': None},
{'v': entry.priv_clean_bytes / 1024, 'f': None},
{'v': entry.shared_dirty_bytes / 1024, 'f': None},
{'v': entry.shared_clean_bytes / 1024, 'f': None},
{'v': entry.mapped_file, 'f': None},
{'v': entry.mapped_offset, 'f': None},
{'v': '[%s]' % (','.join(map(str, entry.resident_pages))), 'f': None},
]}]
return table
def _CacheObject(obj_to_store):
"""Stores an object in the server-side cache and returns its unique id."""
if len(_cached_objs) >= _CACHE_LEN:
_cached_objs.popitem(last=False)
obj_id = uuid.uuid4().hex
_cached_objs[obj_id] = obj_to_store
return str(obj_id)
def _GetCacheObject(obj_id):
"""Retrieves an object in the server-side cache by its id."""
return _cached_objs.get(obj_id)
def _StrMem(nbytes):
"""Converts a number (of bytes) into a human readable string (kb, mb)."""
UNITS = ['B', 'K', 'M', 'G']
for unit in UNITS:
if abs(nbytes) < 1024.0 or unit == UNITS[-1]:
return ('%3.1f' % nbytes).replace('.0','') + ' ' + unit
nbytes /= 1024.0
def _HttpRequestHandler(environ, start_response):
"""Parses a single HTTP request and delegates the handling through UriHandler.
This essentially wires up wsgiref.simple_server with our @UriHandler(s).
"""
path = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
if method == 'POST':
req_body_size = int(environ.get('CONTENT_LENGTH', 0))
req_body = environ['wsgi.input'].read(req_body_size)
req_vars = json.loads(req_body)
else:
req_vars = urlparse.parse_qs(environ['QUERY_STRING'])
(http_code, headers, body) = UriHandler.Handle(method, path, req_vars)
start_response(http_code, headers)
return [body]
def Start(http_port):
# Load the saved backends' settings (some of them might be needed to bootstrap
# as, for instance, the adb path for the Android backend).
memory_inspector.RegisterAllBackends()
for backend in backends.ListBackends():
for k, v in _persistent_storage.LoadSettings(backend.name).iteritems():
backend.settings[k] = v
httpd = wsgiref.simple_server.make_server('', http_port, _HttpRequestHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass # Don't print useless stack traces when the user hits CTRL-C.
background_tasks.TerminateAll() | bsd-3-clause | 2,631,374,592,987,371,500 | 36.600526 | 80 | 0.632649 | false |
lastship/plugin.video.lastship | resources/lib/modules/recaptcha/captcha9kw.py | 2 | 2975 | # -*- coding: UTF-8 -*-
"""
Lastship Add-on (C) 2019
Credits to Placenta and Covenant; our thanks go to their creators
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Addon Name: Lastship
# Addon id: plugin.video.lastship
# Addon Provider: LastShip
import xbmc
import json
from resources.lib.modules import client
from resources.lib.modules import control
from resources.lib.modules import utils
class captcha9KW:
def __init__(self):
self.ApiKey = control.setting('Captcha9kw.ApiKey')
self.SolveType = control.setting('Captcha9kw.SolveType')
self.IsAlive = True
self.time = int(control.setting('Recaptcha2.TimeOut'))
def solve(self, url, siteKey):
if self.ApiKey == "":
control.infoDialog("Kein Captcha9KW API-Key eingetragen!")
return
token = ''
post = {
'apikey': self.ApiKey,
'action': 'usercaptchaupload',
'interactive': '1',
'json': '1',
'file-upload-01': siteKey,
'oldsource': 'recaptchav2',
'pageurl': url,
'maxtimeout': self.time
}
if self.SolveType == 'true':
post['selfsolve'] = '1'
try:
token = ''
data = client.request('https://www.9kw.eu/index.cgi', post=post)
if data:
data = utils.byteify(json.loads(data))
if 'captchaid' in data:
captchaid = data['captchaid']
tries = 0
while tries < self.time and self.IsAlive:
tries += 1
xbmc.sleep(1000)
data = client.request('https://www.9kw.eu/index.cgi?apikey=' + self.ApiKey + '&action=usercaptchacorrectdata&json=1&id=' + captchaid)
if data:
print str(data)
data = utils.byteify(json.loads(data))
token = data['answer']
if token is not None and token != '':
break
except Exception as e:
print '9kw Error: ' + str(e)
return token
def setKill(self):
self.IsAlive = False
| gpl-3.0 | -1,577,141,404,081,919,500 | 32.195402 | 157 | 0.547563 | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationcertpolicy_vpnvserver_binding.py | 1 | 5341 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationcertpolicy_vpnvserver_binding(base_resource) :
""" Binding class showing the vpnvserver that can be bound to authenticationcertpolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the client cert authentication policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the client cert authentication policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationcertpolicy_vpnvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationcertpolicy_vpnvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch authenticationcertpolicy_vpnvserver_binding resources.
"""
try :
obj = authenticationcertpolicy_vpnvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of authenticationcertpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationcertpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count authenticationcertpolicy_vpnvserver_binding resources configued on NetScaler.
"""
try :
obj = authenticationcertpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of authenticationcertpolicy_vpnvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationcertpolicy_vpnvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationcertpolicy_vpnvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationcertpolicy_vpnvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationcertpolicy_vpnvserver_binding = [authenticationcertpolicy_vpnvserver_binding() for _ in range(length)]
| apache-2.0 | -4,838,830,044,407,734,000 | 28.346154 | 145 | 0.709792 | false |
cheezium/seniordesign | arduo_joy/build/catkin_generated/installspace/_setup_util.py | 4 | 10309 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import os
import platform
import sys
# environment at generation time
CMAKE_PREFIX_PATH = '/opt/ros/groovy'.split(';')
setup_dir = '/usr/local'
if setup_dir and setup_dir not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, setup_dir)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'CPATH': 'include',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': 'lib',
'PATH': 'bin',
'PKG_CONFIG_PATH': 'lib/pkgconfig',
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolder = env_var_subfolders[key]
value = _rollback_env_variable(unmodified_environ, key, subfolder)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolder):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolder: str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte'))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolder):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if subfolder:
path = os.path.join(path, subfolder)
# exclude any path already in env and any path we already added
if path not in environ_paths and path not in checked_paths:
checked_paths.append(path)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
specific_env_hooks = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
generic_env_hooks.remove(generic_env_hooks_by_filename[filename])
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
specific_env_hooks.remove(specific_env_hooks_by_filename[filename])
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS', os.pathsep.join(generic_env_hooks + specific_env_hooks)))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
exit(1)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
sys.exit(0)
| mit | 8,130,460,454,100,689,000 | 40.236 | 158 | 0.675914 | false |
alexandrucoman/labs | python/solutii/alexandru_tudose/paint/cursor.py | 5 | 1505 | #!/usr/bin/env python
# *-* coding: UTF-8 *-*
"""Tuxy dorește să împlementeze un nou paint pentru consolă.
În timpul dezvoltării proiectului s-a izbit de o problemă
pe care nu o poate rezolva singur și a apelat la ajutorul tău.
Aplicația ține un istoric al tuturor mișcărilor pe care le-a
făcut utlizatorul în fișierul istoric.tuxy
Exemplu de istoric.tuxy:
STÂNGA 2
JOS 2
DREAPTA 5
Fișierul de mai sus ne spune că utilizatorul a mutat cursorul
2 căsuțe la stânga după care 2 căsuțe in jos iar ultima acțiune
a fost să poziționeze cursorul cu 5 căsuțe în dreapta față de
ultima poziție.
El dorește un utilitar care să îi spună care este distanța dintre
punctul de origine (0, 0) și poziția curentă a cursorului.
"""
from __future__ import print_function
def distanta():
"""Funcția citește conținutul fișierului istoric.tuxy și
calculează distanța dintre punctul de origine și poziția
curentă a cursorului.
"""
ccx, ccy = 0, 0
with open("istoric.tuxy", "r") as fin:
for mesaj in fin:
alfa = mesaj.split()
beta = alfa[0]
if beta[:2] == 'SU':
ccx += int(alfa[1])
if beta[0] == 'D':
ccy += int(alfa[1])
if beta[0] == 'J':
ccx -= int(alfa[1])
if beta[:2] == 'ST':
ccy -= int(alfa[1])
print((ccx * ccx + ccy * ccy) ** 0.5)
if __name__ == "__main__":
distanta()
| mit | -8,207,332,963,981,705,000 | 27.490196 | 65 | 0.618032 | false |
brentp/methylcode | methylcoder/sanity_check.py | 1 | 2397 | from pyfasta import Fasta
import sys
import os
import numpy as np
def check_txt(txt, fa):
f = Fasta(fa, flatten_inplace=True)
for line in open(txt):
seqid, mtype, bp, cs, ts = line.rstrip().split()
bp = int(bp)
mtype = int(mtype)
assert mtype > 0
if mtype < 4:
assert f[seqid][bp] == 'C'
else:
assert f[seqid][bp] == 'G'
print txt, "OK"
return 0
def check_bin(binpath, fa_path):
# a.5.methyltype.bin => 5
seqid = binpath.split(".")[-3]
is_m = ".methyl." in binpath
is_mtype = ".methyltype." in binpath
dtype = np.float32 if is_m else np.uint8 if is_mtype else np.uint32
fa = Fasta(fa_path)
bin = np.fromfile(binpath, dtype=dtype)
assert bin.shape[0] == len(fa[seqid]), (bin.shape[0], len(fa[seqid]))
assert not np.any(np.isinf(bin))
assert not np.any(np.isnan(bin))
assert not np.any(bin < 0)
if is_m:
assert 0 == np.min(bin), (binpath, np.min(bin))
assert 1 >= np.max(bin)
assert 0 < np.average(bin) < 1
else:
# TODO: add checks.
pass
print binpath, "OK"
if __name__ == "__main__":
import optparse
usage = """check output files created by run_bowtie.py
usage: %prog [options] files_to_check"""
p = optparse.OptionParser(usage)
p.add_option("-b", dest="bin", action='store_true',
help="check binary files")
p.add_option("-t", dest="txt", action='store_true',
help="check a text file")
p.add_option("-f", dest="fasta",
help="path to the fasta file (required!)")
opts, args = p.parse_args()
if not (opts.bin or opts.txt):
print "must specify either binary or text file with -b or -t"
sys.exit(p.print_help())
if not opts.fasta:
print "must specify a fasta file"
sys.exit(p.print_help())
assert os.path.exists(opts.fasta)
if opts.txt:
if not (args[0] and os.path.exists(args[0])):
print "must specify a txt file to check"
sys.exit(p.print_help())
check_txt(args[0], opts.fasta)
elif opts.bin:
for binfile in args:
if not os.path.exists(binfile):
print "specified binary file: %s does not exist" % binfile
sys.exit(p.print_help())
check_bin(binfile, opts.fasta)
| bsd-3-clause | -9,056,002,846,849,222,000 | 28.9625 | 74 | 0.562787 | false |
pdellaert/ansible | lib/ansible/modules/cloud/google/gcp_storage_object.py | 16 | 9980 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_storage_object
description:
- Upload or download a file from a GCS bucket.
short_description: Creates a GCP Object
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
action:
description:
- Upload or download from the bucket.
- 'Some valid choices include: "download", "upload"'
required: false
type: str
overwrite:
description:
- "'Overwrite the file on the bucket/local machine. If overwrite is false and
a difference exists between GCS + local, module will fail with error' ."
required: false
type: bool
src:
description:
- Source location of file (may be local machine or cloud depending on action).
required: false
type: path
dest:
description:
- Destination location of file (may be local machine or cloud depending on action).
required: false
type: path
bucket:
description:
- The name of the bucket.
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
'''
EXAMPLES = '''
- name: create a object
gcp_storage_object:
action: download
bucket: ansible-bucket
src: modules.zip
dest: "~/modules.zip"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
action:
description:
- Upload or download from the bucket.
returned: success
type: str
overwrite:
description:
- "'Overwrite the file on the bucket/local machine. If overwrite is false and a
difference exists between GCS + local, module will fail with error' ."
returned: success
type: bool
src:
description:
- Source location of file (may be local machine or cloud depending on action).
returned: success
type: str
dest:
description:
- Destination location of file (may be local machine or cloud depending on action).
returned: success
type: str
bucket:
description:
- The name of the bucket.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import os
import mimetypes
import hashlib
import base64
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
action=dict(type='str'),
overwrite=dict(type='bool'),
src=dict(type='path'),
dest=dict(type='path'),
bucket=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
remote_object = fetch_resource(module, self_link(module))
local_file_exists = os.path.isfile(local_file_path(module))
# Check if files exist.
if module.params['action'] == 'download' and not remote_object:
module.fail_json(msg="File does not exist in bucket")
if module.params['action'] == 'upload' and not local_file_exists:
module.fail_json(msg="File does not exist on disk")
# Check if we'll be overwriting files.
if not module.params['overwrite']:
remote_object['changed'] = False
if module.params['action'] == 'download' and local_file_exists:
# If files differ, throw an error
if get_md5_local(local_file_path(module)) != remote_object['md5Hash']:
module.fail_json(msg="Local file is different than remote file")
# If files are the same, module is done running.
else:
module.exit_json(**remote_object)
elif module.params['action'] == 'upload' and remote_object:
# If files differ, throw an error
if get_md5_local(local_file_path(module)) != remote_object['md5Hash']:
module.fail_json(msg="Local file is different than remote file")
# If files are the same, module is done running.
else:
module.exit_json(**remote_object)
# Upload/download the files
auth = GcpSession(module, 'storage')
if module.params['action'] == 'download':
results = download_file(module)
else:
results = upload_file(module)
module.exit_json(**results)
def download_file(module):
auth = GcpSession(module, 'storage')
data = auth.get(media_link(module))
with open(module.params['dest'], 'w') as f:
f.write(data.text.encode('utf8'))
return fetch_resource(module, self_link(module))
def upload_file(module):
auth = GcpSession(module, 'storage')
with open(module.params['src'], 'r') as f:
results = return_if_object(module, auth.post_contents(upload_link(module), f, object_headers(module)))
results['changed'] = True
return results
def get_md5_local(path):
md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
return base64.b64encode(md5.digest())
def get_md5_remote(module):
resource = fetch_resource(module, self_link(module))
return resource.get('md5Hash')
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
if module.params['action'] == 'download':
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
else:
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{dest}".format(**module.params)
def local_file_path(module):
if module.params['action'] == 'download':
return module.params['dest']
else:
return module.params['src']
def media_link(module):
if module.params['action'] == 'download':
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
else:
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{dest}?alt=media".format(**module.params)
def upload_link(module):
return "https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?uploadType=media&name={dest}".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def object_headers(module):
return {
"name": module.params['dest'],
"Content-Type": mimetypes.guess_type(module.params['src'])[0],
"Content-Length": str(os.path.getsize(module.params['src'])),
}
if __name__ == '__main__':
main()
| gpl-3.0 | -5,811,129,193,039,105,000 | 29.993789 | 123 | 0.609018 | false |
cisco-openstack/neutron | neutron/tests/unit/ipam/test_subnet_alloc.py | 27 | 9443 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
from neutron import manager
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit import testlib_api
class TestSubnetAllocation(testlib_api.SqlTestCase):
def setUp(self):
super(TestSubnetAllocation, self).setUp()
self._tenant_id = 'test-tenant'
self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
self.plugin = manager.NeutronManager.get_plugin()
self.ctx = context.get_admin_context()
cfg.CONF.set_override('allow_overlapping_ips', True)
def _create_subnet_pool(self, plugin, ctx, name, prefix_list,
min_prefixlen, ip_version,
max_prefixlen=attributes.ATTR_NOT_SPECIFIED,
default_prefixlen=attributes.ATTR_NOT_SPECIFIED,
default_quota=attributes.ATTR_NOT_SPECIFIED,
shared=False):
subnetpool = {'subnetpool': {'name': name,
'tenant_id': self._tenant_id,
'prefixes': prefix_list,
'min_prefixlen': min_prefixlen,
'max_prefixlen': max_prefixlen,
'default_prefixlen': default_prefixlen,
'shared': shared,
'default_quota': default_quota}}
return plugin.create_subnetpool(ctx, subnetpool)
def _get_subnetpool(self, ctx, plugin, id):
return plugin.get_subnetpool(ctx, id)
def test_allocate_any_subnet(self):
prefix_list = ['10.1.0.0/16', '192.168.1.0/24']
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
prefix_list, 21, 4)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
with self.ctx.session.begin(subtransactions=True):
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.AnySubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
constants.IPv4, 21)
res = sa.allocate_subnet(req)
detail = res.get_details()
prefix_set = netaddr.IPSet(iterable=prefix_list)
allocated_set = netaddr.IPSet(iterable=[detail.subnet_cidr])
self.assertTrue(allocated_set.issubset(prefix_set))
self.assertEqual(detail.prefixlen, 21)
def test_allocate_specific_subnet(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.0.0/16', '192.168.1.0/24'],
21, 4)
with self.ctx.session.begin(subtransactions=True):
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
'10.1.2.0/24')
res = sa.allocate_subnet(req)
detail = res.get_details()
sp = self._get_subnetpool(self.ctx, self.plugin, sp['id'])
self.assertEqual(str(detail.subnet_cidr), '10.1.2.0/24')
self.assertEqual(detail.prefixlen, 24)
def test_insufficient_prefix_space_for_any_allocation(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.1.0/24', '192.168.1.0/24'],
21, 4)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.AnySubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
constants.IPv4,
21)
self.assertRaises(n_exc.SubnetAllocationError,
sa.allocate_subnet, req)
def test_insufficient_prefix_space_for_specific_allocation(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.0.0/24'],
21, 4)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
'10.1.0.0/21')
self.assertRaises(n_exc.SubnetAllocationError,
sa.allocate_subnet, req)
def test_allocate_any_subnet_gateway(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.0.0/16', '192.168.1.0/24'],
21, 4)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
with self.ctx.session.begin(subtransactions=True):
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.AnySubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
constants.IPv4, 21)
res = sa.allocate_subnet(req)
detail = res.get_details()
self.assertEqual(detail.gateway_ip,
detail.subnet_cidr.network + 1)
def test_allocate_specific_subnet_specific_gateway(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.0.0/16', '192.168.1.0/24'],
21, 4)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
with self.ctx.session.begin(subtransactions=True):
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
'10.1.2.0/24',
gateway_ip='10.1.2.254')
res = sa.allocate_subnet(req)
detail = res.get_details()
self.assertEqual(detail.gateway_ip,
netaddr.IPAddress('10.1.2.254'))
def test_allocate_specific_ipv6_subnet_specific_gateway(self):
# Same scenario as described in bug #1466322
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['2210::/64'],
64, 6)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
with self.ctx.session.begin(subtransactions=True):
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
'2210::/64',
'2210::ffff:ffff:ffff:ffff')
res = sa.allocate_subnet(req)
detail = res.get_details()
self.assertEqual(detail.gateway_ip,
netaddr.IPAddress('2210::ffff:ffff:ffff:ffff'))
def test__allocation_value_for_tenant_no_allocations(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['10.1.0.0/16', '192.168.1.0/24'],
21, 4)
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
value = sa._allocations_used_by_tenant(32)
self.assertEqual(value, 0)
def test_subnetpool_default_quota_exceeded(self):
sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp',
['fe80::/48'],
48, 6, default_quota=1)
sp = self.plugin._get_subnetpool(self.ctx, sp['id'])
sa = subnet_alloc.SubnetAllocator(sp, self.ctx)
req = ipam_req.SpecificSubnetRequest(self._tenant_id,
uuidutils.generate_uuid(),
'fe80::/63')
self.assertRaises(n_exc.SubnetPoolQuotaExceeded,
sa.allocate_subnet,
req)
| apache-2.0 | -1,965,950,730,658,608,600 | 50.043243 | 78 | 0.523457 | false |
axbaretto/beam | sdks/python/apache_beam/internal/windmill_service_pb2.py | 2 | 9690 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: windmill_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import windmill_pb2 as windmill__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='windmill_service.proto',
package='google.dataflow.windmillservice.v1alpha1',
syntax='proto2',
serialized_pb=_b('\n\x16windmill_service.proto\x12(google.dataflow.windmillservice.v1alpha1\x1a\x0ewindmill.proto2\xf9\x02\n\x1c\x43loudWindmillServiceV1Alpha1\x12>\n\x07GetWork\x12\x18.windmill.GetWorkRequest\x1a\x19.windmill.GetWorkResponse\x12>\n\x07GetData\x12\x18.windmill.GetDataRequest\x1a\x19.windmill.GetDataResponse\x12G\n\nCommitWork\x12\x1b.windmill.CommitWorkRequest\x1a\x1c.windmill.CommitWorkResponse\x12\x44\n\tGetConfig\x12\x1a.windmill.GetConfigRequest\x1a\x1b.windmill.GetConfigResponse\x12J\n\x0bReportStats\x12\x1c.windmill.ReportStatsRequest\x1a\x1d.windmill.ReportStatsResponseB7\n5com.apache_beam.sdk.runners.worker.windmill')
,
dependencies=[windmill__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n5com.apache_beam.sdk.runners.worker.windmill'))
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaCloudWindmillServiceV1Alpha1Servicer(object):
"""The Cloud Windmill Service API used by GCE to acquire and process streaming
Dataflow work.
"""
def GetWork(self, request, context):
"""Gets streaming Dataflow work.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetData(self, request, context):
"""Gets data from Windmill.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def CommitWork(self, request, context):
"""Commits previously acquired work.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def GetConfig(self, request, context):
"""Gets dependant configuration from windmill.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ReportStats(self, request, context):
"""Reports stats to Windmill.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaCloudWindmillServiceV1Alpha1Stub(object):
"""The Cloud Windmill Service API used by GCE to acquire and process streaming
Dataflow work.
"""
def GetWork(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets streaming Dataflow work.
"""
raise NotImplementedError()
GetWork.future = None
def GetData(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets data from Windmill.
"""
raise NotImplementedError()
GetData.future = None
def CommitWork(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Commits previously acquired work.
"""
raise NotImplementedError()
CommitWork.future = None
def GetConfig(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Gets dependant configuration from windmill.
"""
raise NotImplementedError()
GetConfig.future = None
def ReportStats(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Reports stats to Windmill.
"""
raise NotImplementedError()
ReportStats.future = None
def beta_create_CloudWindmillServiceV1Alpha1_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'CommitWork'): windmill__pb2.CommitWorkRequest.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetConfig'): windmill__pb2.GetConfigRequest.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetData'): windmill__pb2.GetDataRequest.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetWork'): windmill__pb2.GetWorkRequest.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'ReportStats'): windmill__pb2.ReportStatsRequest.FromString,
}
response_serializers = {
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'CommitWork'): windmill__pb2.CommitWorkResponse.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetConfig'): windmill__pb2.GetConfigResponse.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetData'): windmill__pb2.GetDataResponse.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetWork'): windmill__pb2.GetWorkResponse.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'ReportStats'): windmill__pb2.ReportStatsResponse.SerializeToString,
}
method_implementations = {
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'CommitWork'): face_utilities.unary_unary_inline(servicer.CommitWork),
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetConfig'): face_utilities.unary_unary_inline(servicer.GetConfig),
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetData'): face_utilities.unary_unary_inline(servicer.GetData),
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetWork'): face_utilities.unary_unary_inline(servicer.GetWork),
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'ReportStats'): face_utilities.unary_unary_inline(servicer.ReportStats),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_CloudWindmillServiceV1Alpha1_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'CommitWork'): windmill__pb2.CommitWorkRequest.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetConfig'): windmill__pb2.GetConfigRequest.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetData'): windmill__pb2.GetDataRequest.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetWork'): windmill__pb2.GetWorkRequest.SerializeToString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'ReportStats'): windmill__pb2.ReportStatsRequest.SerializeToString,
}
response_deserializers = {
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'CommitWork'): windmill__pb2.CommitWorkResponse.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetConfig'): windmill__pb2.GetConfigResponse.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetData'): windmill__pb2.GetDataResponse.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'GetWork'): windmill__pb2.GetWorkResponse.FromString,
('google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', 'ReportStats'): windmill__pb2.ReportStatsResponse.FromString,
}
cardinalities = {
'CommitWork': cardinality.Cardinality.UNARY_UNARY,
'GetConfig': cardinality.Cardinality.UNARY_UNARY,
'GetData': cardinality.Cardinality.UNARY_UNARY,
'GetWork': cardinality.Cardinality.UNARY_UNARY,
'ReportStats': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.dataflow.windmillservice.v1alpha1.CloudWindmillServiceV1Alpha1', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 2,961,241,021,045,849,600 | 58.085366 | 652 | 0.79257 | false |
rgommers/scipy | scipy/odr/odrpack.py | 12 | 41982 | """
Python wrappers for Orthogonal Distance Regression (ODRPACK).
Notes
=====
* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an
array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
efficiency and convenience, the input and output arrays of the fitting
function (and its Jacobians) are passed to FORTRAN without transposition.
Therefore, where the ODRPACK documentation says that the X array is of shape
(N, M), it will be passed to the Python function as an array of shape (M, N).
If M==1, the 1-D case, then nothing matters; if M>1, then your
Python functions will be dealing with arrays that are indexed in reverse of
the ODRPACK documentation. No real issue, but watch out for your indexing of
the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth
observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
you can always use the transpose() function from SciPy explicitly.
* Examples -- See the accompanying file test/test.py for examples of how to set
up fits of your own. Some are taken from the User's Guide; some are from
other sources.
* Models -- Some common models are instantiated in the accompanying module
models.py . Contributions are welcome.
Credits
=======
* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
Robert Kern
[email protected]
"""
import os
import numpy
from warnings import warn
from scipy.odr import __odrpack
__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
'Data', 'RealData', 'Model', 'Output', 'ODR',
'odr_error', 'odr_stop']
odr = __odrpack.odr
class OdrWarning(UserWarning):
"""
Warning indicating that the data passed into
ODR will cause problems when passed into 'odr'
that the user should be aware of.
"""
pass
class OdrError(Exception):
"""
Exception indicating an error in fitting.
This is raised by `~scipy.odr.odr` if an error occurs during fitting.
"""
pass
class OdrStop(Exception):
"""
Exception stopping fitting.
You can raise this exception in your objective function to tell
`~scipy.odr.odr` to stop fitting.
"""
pass
# Backwards compatibility
odr_error = OdrError
odr_stop = OdrStop
__odrpack._set_exceptions(OdrError, OdrStop)
def _conv(obj, dtype=None):
""" Convert an object to the preferred form for input to the odr routine.
"""
if obj is None:
return obj
else:
if dtype is None:
obj = numpy.asarray(obj)
else:
obj = numpy.asarray(obj, dtype)
if obj.shape == ():
# Scalar.
return obj.dtype.type(obj)
else:
return obj
def _report_error(info):
""" Interprets the return code of the odr routine.
Parameters
----------
info : int
The return code of the odr routine.
Returns
-------
problems : list(str)
A list of messages about why the odr() routine stopped.
"""
stopreason = ('Blank',
'Sum of squares convergence',
'Parameter convergence',
'Both sum of squares and parameter convergence',
'Iteration limit reached')[info % 5]
if info >= 5:
# questionable results or fatal error
I = (info//10000 % 10,
info//1000 % 10,
info//100 % 10,
info//10 % 10,
info % 10)
problems = []
if I[0] == 0:
if I[1] != 0:
problems.append('Derivatives possibly not correct')
if I[2] != 0:
problems.append('Error occurred in callback')
if I[3] != 0:
problems.append('Problem is not full rank at solution')
problems.append(stopreason)
elif I[0] == 1:
if I[1] != 0:
problems.append('N < 1')
if I[2] != 0:
problems.append('M < 1')
if I[3] != 0:
problems.append('NP < 1 or NP > N')
if I[4] != 0:
problems.append('NQ < 1')
elif I[0] == 2:
if I[1] != 0:
problems.append('LDY and/or LDX incorrect')
if I[2] != 0:
problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
if I[3] != 0:
problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
if I[4] != 0:
problems.append('LWORK and/or LIWORK too small')
elif I[0] == 3:
if I[1] != 0:
problems.append('STPB and/or STPD incorrect')
if I[2] != 0:
problems.append('SCLB and/or SCLD incorrect')
if I[3] != 0:
problems.append('WE incorrect')
if I[4] != 0:
problems.append('WD incorrect')
elif I[0] == 4:
problems.append('Error in derivatives')
elif I[0] == 5:
problems.append('Error occurred in callback')
elif I[0] == 6:
problems.append('Numerical error detected')
return problems
else:
return [stopreason]
class Data:
"""
The data to fit.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the ith input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the ith observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is an
array of integers with the same shape as data.x that determines which
input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
Each argument is attached to the member of the instance of the same name.
The structures of `x` and `y` are described in the Model class docstring.
If `y` is an integer, then the Data instance can only be used to fit with
implicit models where the dimensionality of the response is equal to the
specified value of `y`.
The `we` argument weights the effect a deviation in the response variable
has on the fit. The `wd` argument weights the effect a deviation in the
input variable has on the fit. To handle multidimensional inputs and
responses easily, the structure of these arguments has the n'th
dimensional axis first. These arguments heavily use the structured
arguments feature of ODRPACK to conveniently and flexibly support all
options. See the ODRPACK User's Guide for a full explanation of how these
weights are used in the algorithm. Basically, a higher value of the weight
for a particular data point makes a deviation at that point more
detrimental to the fit.
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta={}):
self.x = _conv(x)
if not isinstance(self.x, numpy.ndarray):
raise ValueError(("Expected an 'ndarray' of data for 'x', "
"but instead got data of type '{name}'").format(
name=type(self.x).__name__))
self.y = _conv(y)
self.we = _conv(we)
self.wd = _conv(wd)
self.fix = _conv(fix)
self.meta = meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
by keywords.
Examples
--------
::
data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata dictionary.
"""
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class RealData(Data):
"""
The data, with weightings as actual standard deviations and/or
covariances.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
sx : array_like, optional
Standard deviations of `x`.
`sx` are standard deviations of `x` and are converted to weights by
dividing 1.0 by their squares.
sy : array_like, optional
Standard deviations of `y`.
`sy` are standard deviations of `y` and are converted to weights by
dividing 1.0 by their squares.
covx : array_like, optional
Covariance of `x`
`covx` is an array of covariance matrices of `x` and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
covy : array_like, optional
Covariance of `y`
`covy` is an array of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's
covariance matrix.
fix : array_like, optional
The argument and member fix is the same as Data.fix and ODR.ifixx:
It is an array of integers with the same shape as `x` that
determines which input observations are treated as fixed. One can
use a sequence of length m (the dimensionality of the input
observations) to fix some dimensions for all observations. A value
of 0 fixes the observation, a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
The weights `wd` and `we` are computed from provided values as follows:
`sx` and `sy` are converted to weights by dividing 1.0 by their squares.
For example, ``wd = 1./numpy.power(`sx`, 2)``.
`covx` and `covy` are arrays of covariance matrices and are converted to
weights by performing a matrix inversion on each observation's covariance
matrix. For example, ``we[i] = numpy.linalg.inv(covy[i])``.
These arguments follow the same structured argument conventions as wd and
we only restricted by their natures: `sx` and `sy` can't be rank-3, but
`covx` and `covy` can be.
Only set *either* `sx` or `covx` (not both). Setting both will raise an
exception. Same with `sy` and `covy`.
"""
def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
fix=None, meta={}):
if (sx is not None) and (covx is not None):
raise ValueError("cannot set both sx and covx")
if (sy is not None) and (covy is not None):
raise ValueError("cannot set both sy and covy")
# Set flags for __getattr__
self._ga_flags = {}
if sx is not None:
self._ga_flags['wd'] = 'sx'
else:
self._ga_flags['wd'] = 'covx'
if sy is not None:
self._ga_flags['we'] = 'sy'
else:
self._ga_flags['we'] = 'covy'
self.x = _conv(x)
if not isinstance(self.x, numpy.ndarray):
raise ValueError(("Expected an 'ndarray' of data for 'x', "
"but instead got data of type '{name}'").format(
name=type(self.x).__name__))
self.y = _conv(y)
self.sx = _conv(sx)
self.sy = _conv(sy)
self.covx = _conv(covx)
self.covy = _conv(covy)
self.fix = _conv(fix)
self.meta = meta
def _sd2wt(self, sd):
""" Convert standard deviation to weights.
"""
return 1./numpy.power(sd, 2)
def _cov2wt(self, cov):
""" Convert covariance matrix(-ices) to weights.
"""
from scipy.linalg import inv
if len(cov.shape) == 2:
return inv(cov)
else:
weights = numpy.zeros(cov.shape, float)
for i in range(cov.shape[-1]): # n
weights[:,:,i] = inv(cov[:,:,i])
return weights
def __getattr__(self, attr):
lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
('wd', 'covx'): (self._cov2wt, self.covx),
('we', 'sy'): (self._sd2wt, self.sy),
('we', 'covy'): (self._cov2wt, self.covy)}
if attr not in ('wd', 'we'):
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
else:
func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
if arg is not None:
return func(*(arg,))
else:
return None
class Model:
"""
The Model class stores information about the function you wish to fit.
It stores the function itself, at the least, and optionally stores
functions which compute the Jacobians used during fitting. Also, one
can provide a function that will provide reasonable starting values
for the fit parameters possibly given the set of data.
Parameters
----------
fcn : function
fcn(beta, x) --> y
fjacb : function
Jacobian of fcn wrt the fit parameters beta.
fjacb(beta, x) --> @f_i(x,B)/@B_j
fjacd : function
Jacobian of fcn wrt the (possibly multidimensional) input
variable.
fjacd(beta, x) --> @f_i(x,B)/@x_j
extra_args : tuple, optional
If specified, `extra_args` should be a tuple of extra
arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
by `apply(fcn, (beta, x) + extra_args)`
estimate : array_like of rank-1
Provides estimates of the fit parameters from the data
estimate(data) --> estbeta
implicit : boolean
If TRUE, specifies that the model
is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
against
meta : dict, optional
freeform dictionary of metadata for the model
Notes
-----
Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
return a NumPy array. The `estimate` object takes an instance of the
Data class.
Here are the rules for the shapes of the argument and return
arrays of the callback functions:
`x`
if the input data is single-dimensional, then `x` is rank-1
array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)``
If the input data is multi-dimensional, then `x` is a rank-2 array;
i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.
In all cases, it has the same shape as the input data array passed to
`~scipy.odr.odr`. `m` is the dimensionality of the input data,
`n` is the number of observations.
`y`
if the response variable is single-dimensional, then `y` is a
rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.
If the response variable is multi-dimensional, then `y` is a rank-2
array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
(q, n)`` where `q` is the dimensionality of the response variable.
`beta`
rank-1 array of length `p` where `p` is the number of parameters;
i.e. ``beta = array([B_1, B_2, ..., B_p])``
`fjacb`
if the response variable is multi-dimensional, then the
return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] =
d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then
the return array is only rank-2 and with shape `(p, n)`.
`fjacd`
as with fjacb, only the return array's shape is `(q, m, n)`
such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data
point. If `q == 1`, then the return array's shape is `(m, n)`. If
`m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`.
"""
def __init__(self, fcn, fjacb=None, fjacd=None,
extra_args=None, estimate=None, implicit=0, meta=None):
self.fcn = fcn
self.fjacb = fjacb
self.fjacd = fjacd
if extra_args is not None:
extra_args = tuple(extra_args)
self.extra_args = extra_args
self.estimate = estimate
self.implicit = implicit
self.meta = meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
here.
Examples
--------
set_meta(name="Exponential", equation="y = a exp(b x) + c")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata.
"""
if attr in self.meta:
return self.meta[attr]
else:
raise AttributeError("'%s' not in metadata" % attr)
class Output:
"""
The Output class stores the output of an ODR run.
Attributes
----------
beta : ndarray
Estimated parameter values, of shape (q,).
sd_beta : ndarray
Standard deviations of the estimated parameters, of shape (p,).
cov_beta : ndarray
Covariance matrix of the estimated parameters, of shape (p,p).
delta : ndarray, optional
Array of estimated errors in input variables, of same shape as `x`.
eps : ndarray, optional
Array of estimated errors in response variables, of same shape as `y`.
xplus : ndarray, optional
Array of ``x + delta``.
y : ndarray, optional
Array ``y = fcn(x + delta)``.
res_var : float, optional
Residual variance.
sum_square : float, optional
Sum of squares error.
sum_square_delta : float, optional
Sum of squares of delta error.
sum_square_eps : float, optional
Sum of squares of eps error.
inv_condnum : float, optional
Inverse condition number (cf. ODRPACK UG p. 77).
rel_error : float, optional
Relative error in function values computed within fcn.
work : ndarray, optional
Final work array.
work_ind : dict, optional
Indices into work for drawing out values (cf. ODRPACK UG p. 83).
info : int, optional
Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
stopreason : list of str, optional
`info` interpreted into English.
Notes
-----
Takes one argument for initialization, the return value from the
function `~scipy.odr.odr`. The attributes listed as "optional" above are
only present if `~scipy.odr.odr` was run with ``full_output=1``.
"""
def __init__(self, output):
self.beta = output[0]
self.sd_beta = output[1]
self.cov_beta = output[2]
if len(output) == 4:
# full output
self.__dict__.update(output[3])
self.stopreason = _report_error(self.info)
def pprint(self):
""" Pretty-print important results.
"""
print('Beta:', self.beta)
print('Beta Std Error:', self.sd_beta)
print('Beta Covariance:', self.cov_beta)
if hasattr(self, 'info'):
print('Residual Variance:',self.res_var)
print('Inverse Condition #:', self.inv_condnum)
print('Reason(s) for Halting:')
for r in self.stopreason:
print(' %s' % r)
class ODR:
"""
The ODR class gathers all information and coordinates the running of the
main fitting routine.
Members of instances of the ODR class have the same names as the arguments
to the initialization routine.
Parameters
----------
data : Data class instance
instance of the Data class
model : Model class instance
instance of the Model class
Other Parameters
----------------
beta0 : array_like of rank-1
a rank-1 sequence of initial parameter values. Optional if
model provides an "estimate" function to estimate these values.
delta0 : array_like of floats of rank-1, optional
a (double-precision) float array to hold the initial values of
the errors in the input variables. Must be same shape as data.x
ifixb : array_like of ints of rank-1, optional
sequence of integers with the same length as beta0 that determines
which parameters are held fixed. A value of 0 fixes the parameter,
a value > 0 makes the parameter free.
ifixx : array_like of ints with same shape as data.x, optional
an array of integers with the same shape as data.x that determines
which input observations are treated as fixed. One can use a sequence
of length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
job : int, optional
an integer telling ODRPACK what tasks to perform. See p. 31 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_job post-initialization for a more readable interface.
iprint : int, optional
an integer telling ODRPACK what to print. See pp. 33-34 of the
ODRPACK User's Guide if you absolutely must set the value here. Use the
method set_iprint post-initialization for a more readable interface.
errfile : str, optional
string with the filename to print ODRPACK errors to. If the file already
exists, an error will be thrown. The `overwrite` argument can be used to
prevent this. *Do Not Open This File Yourself!*
rptfile : str, optional
string with the filename to print ODRPACK summaries to. If the file
already exists, an error will be thrown. The `overwrite` argument can be
used to prevent this. *Do Not Open This File Yourself!*
ndigit : int, optional
integer specifying the number of reliable digits in the computation
of the function.
taufac : float, optional
float specifying the initial trust region. The default value is 1.
The initial trust region is equal to taufac times the length of the
first computed Gauss-Newton step. taufac must be less than 1.
sstol : float, optional
float specifying the tolerance for convergence based on the relative
change in the sum-of-squares. The default value is eps**(1/2) where eps
is the smallest value such that 1 + eps > 1 for double precision
computation on the machine. sstol must be less than 1.
partol : float, optional
float specifying the tolerance for convergence based on the relative
change in the estimated parameters. The default value is eps**(2/3) for
explicit models and ``eps**(1/3)`` for implicit models. partol must be less
than 1.
maxit : int, optional
integer specifying the maximum number of iterations to perform. For
first runs, maxit is the total number of iterations performed and
defaults to 50. For restarts, maxit is the number of additional
iterations to perform and defaults to 10.
stpb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
finite difference derivatives wrt the parameters.
stpd : optional
array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
step sizes to compute finite difference derivatives wrt the input
variable errors. If stpd is a rank-1 array with length m (the
dimensionality of the input variable), then the values are broadcast to
all observations.
sclb : array_like, optional
sequence (``len(stpb) == len(beta0)``) of scaling factors for the
parameters. The purpose of these scaling factors are to scale all of
the parameters to around unity. Normally appropriate scaling factors
are computed if this argument is not specified. Specify them yourself
if the automatic procedure goes awry.
scld : array_like, optional
array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
factors for the *errors* in the input variables. Again, these factors
are automatically computed if you do not provide them. If scld.shape ==
(m,), then the scaling factors are broadcast to all observations.
work : ndarray, optional
array to hold the double-valued working data for ODRPACK. When
restarting, takes the value of self.output.work.
iwork : ndarray, optional
array to hold the integer-valued working data for ODRPACK. When
restarting, takes the value of self.output.iwork.
overwrite : bool, optional
If it is True, output files defined by `errfile` and `rptfile` are
overwritten. The default is False.
Attributes
----------
data : Data
The data for this fit
model : Model
The model used in fit
output : Output
An instance if the Output class containing all of the returned
data from an invocation of ODR.run() or ODR.restart()
"""
def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
overwrite=False):
self.data = data
self.model = model
if beta0 is None:
if self.model.estimate is not None:
self.beta0 = _conv(self.model.estimate(self.data))
else:
raise ValueError(
"must specify beta0 or provide an estimater with the model"
)
else:
self.beta0 = _conv(beta0)
if ifixx is None and data.fix is not None:
ifixx = data.fix
if overwrite:
# remove output files for overwriting.
if rptfile is not None and os.path.exists(rptfile):
os.remove(rptfile)
if errfile is not None and os.path.exists(errfile):
os.remove(errfile)
self.delta0 = _conv(delta0)
# These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
# platforms.
# XXX: some other FORTRAN compilers may not agree.
self.ifixx = _conv(ifixx, dtype=numpy.int32)
self.ifixb = _conv(ifixb, dtype=numpy.int32)
self.job = job
self.iprint = iprint
self.errfile = errfile
self.rptfile = rptfile
self.ndigit = ndigit
self.taufac = taufac
self.sstol = sstol
self.partol = partol
self.maxit = maxit
self.stpb = _conv(stpb)
self.stpd = _conv(stpd)
self.sclb = _conv(sclb)
self.scld = _conv(scld)
self.work = _conv(work)
self.iwork = _conv(iwork)
self.output = None
self._check()
def _check(self):
""" Check the inputs for consistency, but don't bother checking things
that the builtin function odr will check.
"""
x_s = list(self.data.x.shape)
if isinstance(self.data.y, numpy.ndarray):
y_s = list(self.data.y.shape)
if self.model.implicit:
raise OdrError("an implicit model cannot use response data")
else:
# implicit model with q == self.data.y
y_s = [self.data.y, x_s[-1]]
if not self.model.implicit:
raise OdrError("an explicit model needs response data")
self.set_job(fit_type=1)
if x_s[-1] != y_s[-1]:
raise OdrError("number of observations do not match")
n = x_s[-1]
if len(x_s) == 2:
m = x_s[0]
else:
m = 1
if len(y_s) == 2:
q = y_s[0]
else:
q = 1
p = len(self.beta0)
# permissible output array shapes
fcn_perms = [(q, n)]
fjacd_perms = [(q, m, n)]
fjacb_perms = [(q, p, n)]
if q == 1:
fcn_perms.append((n,))
fjacd_perms.append((m, n))
fjacb_perms.append((p, n))
if m == 1:
fjacd_perms.append((q, n))
if p == 1:
fjacb_perms.append((q, n))
if m == q == 1:
fjacd_perms.append((n,))
if p == q == 1:
fjacb_perms.append((n,))
# try evaluating the supplied functions to make sure they provide
# sensible outputs
arglist = (self.beta0, self.data.x)
if self.model.extra_args is not None:
arglist = arglist + self.model.extra_args
res = self.model.fcn(*arglist)
if res.shape not in fcn_perms:
print(res.shape)
print(fcn_perms)
raise OdrError("fcn does not output %s-shaped array" % y_s)
if self.model.fjacd is not None:
res = self.model.fjacd(*arglist)
if res.shape not in fjacd_perms:
raise OdrError(
"fjacd does not output %s-shaped array" % repr((q, m, n)))
if self.model.fjacb is not None:
res = self.model.fjacb(*arglist)
if res.shape not in fjacb_perms:
raise OdrError(
"fjacb does not output %s-shaped array" % repr((q, p, n)))
# check shape of delta0
if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
raise OdrError(
"delta0 is not a %s-shaped array" % repr(self.data.x.shape))
if self.data.x.size == 0:
warn(("Empty data detected for ODR instance. "
"Do not expect any fitting to occur"),
OdrWarning)
def _gen_work(self):
""" Generate a suitable work array if one does not already exist.
"""
n = self.data.x.shape[-1]
p = self.beta0.shape[0]
if len(self.data.x.shape) == 2:
m = self.data.x.shape[0]
else:
m = 1
if self.model.implicit:
q = self.data.y
elif len(self.data.y.shape) == 2:
q = self.data.y.shape[0]
else:
q = 1
if self.data.we is None:
ldwe = ld2we = 1
elif len(self.data.we.shape) == 3:
ld2we, ldwe = self.data.we.shape[1:]
else:
# Okay, this isn't precisely right, but for this calculation,
# it's fine
ldwe = 1
ld2we = self.data.we.shape[1]
if self.job % 10 < 2:
# ODR not OLS
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
else:
# OLS not ODR
lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
5*q + q*(p+m) + ldwe*ld2we*q)
if isinstance(self.work, numpy.ndarray) and self.work.shape == (lwork,)\
and self.work.dtype.str.endswith('f8'):
# the existing array is fine
return
else:
self.work = numpy.zeros((lwork,), float)
def set_job(self, fit_type=None, deriv=None, var_calc=None,
del_init=None, restart=None):
"""
Sets the "job" parameter is a hopefully comprehensible way.
If an argument is not specified, then the value is left as is. The
default value from class initialization is for all of these options set
to 0.
Parameters
----------
fit_type : {0, 1, 2} int
0 -> explicit ODR
1 -> implicit ODR
2 -> ordinary least-squares
deriv : {0, 1, 2, 3} int
0 -> forward finite differences
1 -> central finite differences
2 -> user-supplied derivatives (Jacobians) with results
checked by ODRPACK
3 -> user-supplied derivatives, no checking
var_calc : {0, 1, 2} int
0 -> calculate asymptotic covariance matrix and fit
parameter uncertainties (V_B, s_B) using derivatives
recomputed at the final solution
1 -> calculate V_B and s_B using derivatives from last iteration
2 -> do not calculate V_B and s_B
del_init : {0, 1} int
0 -> initial input variable offsets set to 0
1 -> initial offsets provided by user in variable "work"
restart : {0, 1} int
0 -> fit is not a restart
1 -> fit is a restart
Notes
-----
The permissible values are different from those given on pg. 31 of the
ODRPACK User's Guide only in that one cannot specify numbers greater than
the last value for each variable.
If one does not supply functions to compute the Jacobians, the fitting
procedure will change deriv to 0, finite differences, as a default. To
initialize the input variable offsets by yourself, set del_init to 1 and
put the offsets into the "work" variable correctly.
"""
if self.job is None:
job_l = [0, 0, 0, 0, 0]
else:
job_l = [self.job // 10000 % 10,
self.job // 1000 % 10,
self.job // 100 % 10,
self.job // 10 % 10,
self.job % 10]
if fit_type in (0, 1, 2):
job_l[4] = fit_type
if deriv in (0, 1, 2, 3):
job_l[3] = deriv
if var_calc in (0, 1, 2):
job_l[2] = var_calc
if del_init in (0, 1):
job_l[1] = del_init
if restart in (0, 1):
job_l[0] = restart
self.job = (job_l[0]*10000 + job_l[1]*1000 +
job_l[2]*100 + job_l[3]*10 + job_l[4])
def set_iprint(self, init=None, so_init=None,
iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
""" Set the iprint parameter for the printing of computation reports.
If any of the arguments are specified here, then they are set in the
iprint member. If iprint is not set manually or with this method, then
ODRPACK defaults to no printing. If no filename is specified with the
member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
print to stdout in addition to the specified filename by setting the
so_* arguments to this function, but one cannot specify to print to
stdout but not a file since one can do that by not specifying a rptfile
filename.
There are three reports: initialization, iteration, and final reports.
They are represented by the arguments init, iter, and final
respectively. The permissible values are 0, 1, and 2 representing "no
report", "short report", and "long report" respectively.
The argument iter_step (0 <= iter_step <= 9) specifies how often to make
the iteration report; the report will be made for every iter_step'th
iteration starting with iteration one. If iter_step == 0, then no
iteration report is made, regardless of the other arguments.
If the rptfile is None, then any so_* arguments supplied will raise an
exception.
"""
if self.iprint is None:
self.iprint = 0
ip = [self.iprint // 1000 % 10,
self.iprint // 100 % 10,
self.iprint // 10 % 10,
self.iprint % 10]
# make a list to convert iprint digits to/from argument inputs
# rptfile, stdout
ip2arg = [[0, 0], # none, none
[1, 0], # short, none
[2, 0], # long, none
[1, 1], # short, short
[2, 1], # long, short
[1, 2], # short, long
[2, 2]] # long, long
if (self.rptfile is None and
(so_init is not None or
so_iter is not None or
so_final is not None)):
raise OdrError(
"no rptfile specified, cannot output to stdout twice")
iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
if init is not None:
iprint_l[0] = init
if so_init is not None:
iprint_l[1] = so_init
if iter is not None:
iprint_l[2] = iter
if so_iter is not None:
iprint_l[3] = so_iter
if final is not None:
iprint_l[4] = final
if so_final is not None:
iprint_l[5] = so_final
if iter_step in range(10):
# 0..9
ip[2] = iter_step
ip[0] = ip2arg.index(iprint_l[0:2])
ip[1] = ip2arg.index(iprint_l[2:4])
ip[3] = ip2arg.index(iprint_l[4:6])
self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
def run(self):
""" Run the fitting routine with all of the information given and with ``full_output=1``.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
kwds = {'full_output': 1}
kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
'stpd', 'sclb', 'scld', 'work', 'iwork']
if self.delta0 is not None and (self.job // 10000) % 10 == 0:
# delta0 provided and fit is not a restart
self._gen_work()
d0 = numpy.ravel(self.delta0)
self.work[:len(d0)] = d0
# set the kwds from other objects explicitly
if self.model.fjacb is not None:
kwds['fjacb'] = self.model.fjacb
if self.model.fjacd is not None:
kwds['fjacd'] = self.model.fjacd
if self.data.we is not None:
kwds['we'] = self.data.we
if self.data.wd is not None:
kwds['wd'] = self.data.wd
if self.model.extra_args is not None:
kwds['extra_args'] = self.model.extra_args
# implicitly set kwds from self's members
for attr in kwd_l:
obj = getattr(self, attr)
if obj is not None:
kwds[attr] = obj
self.output = Output(odr(*args, **kwds))
return self.output
def restart(self, iter=None):
""" Restarts the run with iter more iterations.
Parameters
----------
iter : int, optional
ODRPACK's default for the number of new iterations is 10.
Returns
-------
output : Output instance
This object is also assigned to the attribute .output .
"""
if self.output is None:
raise OdrError("cannot restart: run() has not been called before")
self.set_job(restart=1)
self.work = self.output.work
self.iwork = self.output.iwork
self.maxit = iter
return self.run()
| bsd-3-clause | -632,033,799,590,013,700 | 35.761821 | 97 | 0.583131 | false |
MIPS/external-deqp | framework/delibs/scripts/git-check.py | 7 | 1690 | # Script for checking which projects have unsubmitted modifications in them.
#
# Usage:
# - recommended to add a alias/bat/sh for a shorter command
# - running without parameters will check any existing known dE projects.
# - can give projects names on command line, if only wish to check a sub-set
# e.g., git-check.py delibs deqp
import os
import sys
COMMANDS = ["pull", "push", "check"]
ALL_REPOS = ["delibs", "deqp", "movies", "domeni", "demisc"]
# Defaults.
command = "check"
repos = ALL_REPOS
# Parse command line.
numArgs = len(sys.argv)
if (numArgs == 1):
pass
else:
if (sys.argv[1] in COMMANDS):
command = sys.argv[1]
if (numArgs > 2):
repos = sys.argv[2:]
else:
repos = sys.argv[1:]
def findRepo(x):
for repo in ALL_REPOS:
if repo.startswith(x):
return repo
print "%s not a valid repository directory" % x
sys.exit(1)
repoDirs = [findRepo(x) for x in repos]
# Find git base repo directory.
oldDir = os.getcwd()
baseDir = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "../.."))
foundAny = False
# Execute the command.
print "## Executing '%s' on repos: %s" % (command.upper(), ", ".join(repoDirs))
print ""
for gitDir in repoDirs:
subDir = os.path.join(baseDir, gitDir)
if os.path.exists(subDir):
foundAny = True
print "***** Check directory '%s' *****" % subDir
os.chdir(subDir)
if command == "check":
os.system("git status")
os.system("git push --dry-run")
elif command == "push":
os.system("git push")
elif command == "pull":
os.system("git pull")
else:
assert False
print ""
if not foundAny:
print "No subdirs found -- tried %s" % repoDirs
print "Searching in '%s'" % baseDir
os.chdir(oldDir)
| apache-2.0 | -7,436,733,265,433,103,000 | 23.142857 | 79 | 0.660947 | false |
rbuffat/pyidf | tests/test_curvechillerpartloadwithlift.py | 1 | 6208 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.performance_curves import CurveChillerPartLoadWithLift
log = logging.getLogger(__name__)
class TestCurveChillerPartLoadWithLift(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_curvechillerpartloadwithlift(self):
pyidf.validation_level = ValidationLevel.error
obj = CurveChillerPartLoadWithLift()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_coefficient1_c1 = 2.2
obj.coefficient1_c1 = var_coefficient1_c1
# real
var_coefficient2_c2 = 3.3
obj.coefficient2_c2 = var_coefficient2_c2
# real
var_coefficient3_c3 = 4.4
obj.coefficient3_c3 = var_coefficient3_c3
# real
var_coefficient4_c4 = 5.5
obj.coefficient4_c4 = var_coefficient4_c4
# real
var_coefficient5_c5 = 6.6
obj.coefficient5_c5 = var_coefficient5_c5
# real
var_coefficient6_c6 = 7.7
obj.coefficient6_c6 = var_coefficient6_c6
# real
var_coefficient7_c7 = 8.8
obj.coefficient7_c7 = var_coefficient7_c7
# real
var_coefficient8_c8 = 9.9
obj.coefficient8_c8 = var_coefficient8_c8
# real
var_coefficient9_c9 = 10.1
obj.coefficient9_c9 = var_coefficient9_c9
# real
var_coefficient10_c10 = 11.11
obj.coefficient10_c10 = var_coefficient10_c10
# real
var_coefficient11_c11 = 12.12
obj.coefficient11_c11 = var_coefficient11_c11
# real
var_coefficient12_c12 = 13.13
obj.coefficient12_c12 = var_coefficient12_c12
# real
var_minimum_value_of_x = 14.14
obj.minimum_value_of_x = var_minimum_value_of_x
# real
var_maximum_value_of_x = 15.15
obj.maximum_value_of_x = var_maximum_value_of_x
# real
var_minimum_value_of_y = 16.16
obj.minimum_value_of_y = var_minimum_value_of_y
# real
var_maximum_value_of_y = 17.17
obj.maximum_value_of_y = var_maximum_value_of_y
# real
var_minimum_value_of_z = 18.18
obj.minimum_value_of_z = var_minimum_value_of_z
# real
var_maximum_value_of_z = 19.19
obj.maximum_value_of_z = var_maximum_value_of_z
# real
var_minimum_curve_output = 20.2
obj.minimum_curve_output = var_minimum_curve_output
# real
var_maximum_curve_output = 21.21
obj.maximum_curve_output = var_maximum_curve_output
# alpha
var_input_unit_type_for_x = "Dimensionless"
obj.input_unit_type_for_x = var_input_unit_type_for_x
# alpha
var_input_unit_type_for_y = "Dimensionless"
obj.input_unit_type_for_y = var_input_unit_type_for_y
# alpha
var_input_unit_type_for_z = "Dimensionless"
obj.input_unit_type_for_z = var_input_unit_type_for_z
# alpha
var_output_unit_type = "Dimensionless"
obj.output_unit_type = var_output_unit_type
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.curvechillerpartloadwithlifts[0].name, var_name)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient1_c1, var_coefficient1_c1)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient2_c2, var_coefficient2_c2)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient3_c3, var_coefficient3_c3)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient4_c4, var_coefficient4_c4)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient5_c5, var_coefficient5_c5)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient6_c6, var_coefficient6_c6)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient7_c7, var_coefficient7_c7)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient8_c8, var_coefficient8_c8)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient9_c9, var_coefficient9_c9)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient10_c10, var_coefficient10_c10)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient11_c11, var_coefficient11_c11)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].coefficient12_c12, var_coefficient12_c12)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].minimum_value_of_x, var_minimum_value_of_x)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].maximum_value_of_x, var_maximum_value_of_x)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].minimum_value_of_y, var_minimum_value_of_y)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].maximum_value_of_y, var_maximum_value_of_y)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].minimum_value_of_z, var_minimum_value_of_z)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].maximum_value_of_z, var_maximum_value_of_z)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].minimum_curve_output, var_minimum_curve_output)
self.assertAlmostEqual(idf2.curvechillerpartloadwithlifts[0].maximum_curve_output, var_maximum_curve_output)
self.assertEqual(idf2.curvechillerpartloadwithlifts[0].input_unit_type_for_x, var_input_unit_type_for_x)
self.assertEqual(idf2.curvechillerpartloadwithlifts[0].input_unit_type_for_y, var_input_unit_type_for_y)
self.assertEqual(idf2.curvechillerpartloadwithlifts[0].input_unit_type_for_z, var_input_unit_type_for_z)
self.assertEqual(idf2.curvechillerpartloadwithlifts[0].output_unit_type, var_output_unit_type) | apache-2.0 | 6,758,843,055,376,080,000 | 45.335821 | 116 | 0.685245 | false |
dmick/teuthology | teuthology/test/test_contextutil.py | 3 | 1989 | from pytest import raises
from teuthology import contextutil
from logging import ERROR
class TestSafeWhile(object):
def setup(self):
contextutil.log.setLevel(ERROR)
self.fake_sleep = lambda s: True
self.s_while = contextutil.safe_while
def test_6_5_10_deal(self):
with raises(contextutil.MaxWhileTries):
with self.s_while(_sleeper=self.fake_sleep) as proceed:
while proceed():
pass
def test_6_0_1_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
tries=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 6 seconds' in str(error)
def test_1_0_10_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
sleep=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 10 seconds' in str(error)
def test_6_1_10_deal(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
increment=1,
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert 'waiting for 105 seconds' in str(error)
def test_action(self):
with raises(contextutil.MaxWhileTries) as error:
with self.s_while(
action='doing the thing',
_sleeper=self.fake_sleep
) as proceed:
while proceed():
pass
assert "'doing the thing' reached maximum tries" in str(error)
def test_no_raise(self):
with self.s_while(_raise=False, _sleeper=self.fake_sleep) as proceed:
while proceed():
pass
assert True
| mit | 6,726,763,939,141,251,000 | 28.25 | 77 | 0.536953 | false |
FrodeSolheim/fs-uae-launcher | launcher/extra/glowicon.py | 1 | 10243 | #!/usr/bin/env python3
import os
import sys
sys.path.insert(0, "../git/fs-uae-master/fs-uae-launcher")
from PIL import Image, ImageFilter
# from fsui.qt.qt import QApplication
from fsui.qt.qt import QImage, QPainter, QSvgRenderer
"""
Module for creating glow-icon-alike effects on icons.
Quick and dirty code. No effort has been spent on trying to make the code
efficient or anything like that. The intention is to use this as a
pre-processing step.
"""
# Glowicons are 46x46. The extra radius due to the glow effect is 4 pixels, so
# the actual icons are effectively 38x38. If we scale up the icons to 150%,
# we get 69x69 incl. glow and 57x57 excl. glow.
# Suggest standardizing on 76x76 (64x64 + 4 * 1.5 radius). Icons are encouraged
# to have some space around them, so that the effective bounding area for most
# icons are 56x56 og 58x58 or something like that.
SIZE = (76, 76)
# We need to render temporarily at a slightly bigger size, due to filters and
# edge conditions. Otherwise, the bigger icons will have cutoffs in the glow
# effect. After rendering to a temporarily bigger size, we just trim the image
# down to the desired size.
TEMPSIZE = (86, 86)
def darken(im, factor=0.67):
pixels = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
r, g, b, a = pixels[x, y]
r = int(r * factor)
g = int(g * factor)
b = int(b * factor)
pixels[x, y] = r, g, b, a
def transparency_threshold(im):
pixels = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
r, g, b, a = pixels[x, y]
if a > 128:
a = 255
else:
a = 0
pixels[x, y] = r, g, b, a
def dilate_colored(im, c=(0, 0, 0, 0)):
result = Image.new("RGBA", im.size)
src = im.load()
dst = result.load()
for y in range(1, im.size[1] - 1):
for x in range(1, im.size[0] - 1):
p = (c[0], c[1], c[2], 0)
for dy in [-1, 0, 1]:
for dx in [-1, 0, 1]:
p2 = src[x + dx, y + dy]
if p2[3] > 0:
p = (c[0], c[1], c[2], 255)
break
dst[x, y] = p
return result
def multiply_alpha(im, factor=0.5):
pixels = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
r, g, b, a = pixels[x, y]
a = int(a * factor)
pixels[x, y] = r, g, b, a
# def add_glow(orig):
# base = Image.new("RGBA", TEMPSIZE, (0, 0, 0, 0))
# base.paste(
# orig,
# ((TEMPSIZE[0] - orig.size[0]) // 2, (TEMPSIZE[1] - orig.size[1]) // 2),
# )
# dark = base.copy()
# darken(dark)
# g1 = base.copy()
# transparency_threshold(g1)
# white = dilate_colored(g1, (255, 255, 255, 0))
# white2 = dilate_colored(white, (255, 255, 255, 0))
# yellow = dilate_colored(white2, (0xFF, 0xFF, 0x00))
# orange = dilate_colored(yellow, (0xFF, 0xBB, 0x00))
# orange2 = dilate_colored(orange, (0xFF, 0x99, 0x00))
# white = white.filter(ImageFilter.BLUR)
# white2 = white2.filter(ImageFilter.BLUR)
# yellow = yellow.filter(ImageFilter.BLUR)
# orange = orange.filter(ImageFilter.BLUR)
# orange2 = orange2.filter(ImageFilter.BLUR)
# multiply_alpha(yellow, 0.67)
# multiply_alpha(orange2, 0.67)
# multiply_alpha(orange2, 0.33)
# glow = Image.new("RGBA", TEMPSIZE, (0, 0, 0, 0))
# glow = Image.alpha_composite(glow, orange2)
# glow = Image.alpha_composite(glow, orange)
# glow = Image.alpha_composite(glow, yellow)
# glow = Image.alpha_composite(glow, white2)
# # Render the white twice here is on purpose, to make the thin inner white
# # area more distinct.
# glow = Image.alpha_composite(glow, white)
# glow = Image.alpha_composite(glow, white)
# glow = Image.alpha_composite(glow, dark)
# # Cut to final size.
# glow = glow.crop(
# (
# (TEMPSIZE[0] - SIZE[0]) // 2,
# (TEMPSIZE[1] - SIZE[1]) // 2,
# SIZE[0] + (TEMPSIZE[0] - SIZE[0]) // 2,
# SIZE[1] + (TEMPSIZE[1] - SIZE[1]) // 2,
# )
# )
# return glow
def add_glow(orig):
base = Image.new("RGBA", TEMPSIZE, (0, 0, 0, 0))
base.paste(
orig,
((TEMPSIZE[0] - orig.size[0]) // 2, (TEMPSIZE[1] - orig.size[1]) // 2),
)
dark = base.copy()
darken(dark)
g1 = base.copy()
transparency_threshold(g1)
white = dilate_colored(g1, (255, 255, 255, 0))
# white2 = dilate_colored(white, (255, 255, 255, 0))
yellow = dilate_colored(white, (0xFF, 0xFF, 0x00))
orange = dilate_colored(yellow, (0xFF, 0xBB, 0x00))
orange2 = dilate_colored(orange, (0xFF, 0x99, 0x00))
white = white.filter(ImageFilter.BLUR)
# white2 = white2.filter(ImageFilter.BLUR)
yellow = yellow.filter(ImageFilter.BLUR)
orange = orange.filter(ImageFilter.BLUR)
orange2 = orange2.filter(ImageFilter.BLUR)
multiply_alpha(yellow, 0.67)
multiply_alpha(orange, 0.50)
multiply_alpha(orange2, 0.25)
glow = Image.new("RGBA", TEMPSIZE, (0, 0, 0, 0))
glow = Image.alpha_composite(glow, orange2)
glow = Image.alpha_composite(glow, orange)
glow = Image.alpha_composite(glow, yellow)
# glow = Image.alpha_composite(glow, white2)
# Render the white twice here is on purpose, to make the thin inner white
# area more distinct.
glow = Image.alpha_composite(glow, white)
glow = Image.alpha_composite(glow, white)
glow = Image.alpha_composite(glow, dark)
# Cut to final size.
glow = glow.crop(
(
(TEMPSIZE[0] - SIZE[0]) // 2,
(TEMPSIZE[1] - SIZE[1]) // 2,
SIZE[0] + (TEMPSIZE[0] - SIZE[0]) // 2,
SIZE[1] + (TEMPSIZE[1] - SIZE[1]) // 2,
)
)
return glow
# _application = None
def open_svg(path):
# global _application
# if _application is None:
# _application = QApplication(sys.argv)
renderer = QSvgRenderer(path)
image = QImage(64, 64, QImage.Format_RGBA8888)
image.fill(0x00000000)
painter = QPainter(image)
renderer.render(painter)
painter.end()
# del painter
return Image.frombytes(
"RGBA", (64, 64), image.bits().asstring(64 * 64 * 4)
)
def load_icon_76x76(path, overlay=None):
if path.endswith(".svg"):
orig = open_svg(path)
else:
orig = Image.open(path)
new = Image.new("RGBA", SIZE, (0, 0, 0, 0))
new.paste(
orig,
((SIZE[0] - orig.size[0]) // 2, (SIZE[1] - orig.size[1]) // 2),
)
if overlay:
# print("overlay", overlay)
# overlay_img = Image.new("RGBA", SIZE, (0, 0, 0, 0))
# return overlay_img
overlay_org = Image.open(overlay)
overlay_img = Image.new("RGBA", SIZE, (0, 0, 0, 0))
# return overlay_img
overlay_img.paste(
overlay_org,
(
(SIZE[0] - overlay_org.size[0]) // 2,
(SIZE[1] - overlay_org.size[1]) // 2,
),
)
new = Image.alpha_composite(new, overlay_img)
# overlay_path = os.path.dirname(path)
# if os.path.exists(overlay_path)
return new
def save_icon(im, name):
im.save(os.path.join(name))
def process_icon_in_directory(dirpath, overlay=None):
iconname = os.path.basename(dirpath)
print(iconname)
src1 = os.path.join(dirpath, iconname + ".svg")
if not os.path.exists(src1):
src1 = os.path.join(dirpath, iconname + ".png")
im1 = load_icon_76x76(src1, overlay=overlay)
src2 = os.path.join(dirpath, iconname + "_2.png")
save_icon(im1, os.path.splitext(dirpath)[0] + "_Normal.png")
if os.path.exists(src2):
im2 = load_icon_76x76(src2, overlay=overlay)
else:
im2 = im1
im2 = add_glow(im2)
save_icon(im2, os.path.splitext(dirpath)[0] + "_Selected.png")
def create_icon(output, sources, glow):
if len(sources) == 1:
src = sources[0]
overlay = None
else:
src, overlay = sources
# im = load_icon_76x76(src, overlay=overlay)
im = load_icon_76x76(src, overlay=False)
if glow:
im = add_glow(im)
if overlay:
# print("overlay", overlay)
# overlay_img = Image.new("RGBA", SIZE, (0, 0, 0, 0))
# return overlay_img
overlay_org = Image.open(overlay)
overlay_img = Image.new("RGBA", SIZE, (0, 0, 0, 0))
# return overlay_img
overlay_img.paste(
overlay_org,
(
(SIZE[0] - overlay_org.size[0]) // 2,
(SIZE[1] - overlay_org.size[1]) // 2,
),
)
# if glow:
# darken(overlay_img)
im = Image.alpha_composite(im, overlay_img)
save_icon(im, output)
# def add_to_json(argv, glow=False):
# import json
# path = argv[1]
# with open(path.split("/data")[0] + "/src/icons/icons.json") as f:
# doc = json.load(f)
# sources = []
# for arg in argv[2:]:
# p1 = os.path.join(os.getcwd(), arg)
# # print(p1)
# p2 = os.path.normpath(os.path.join(os.getcwd(), path.split("/data")[0]))
# # print(p2)
# p = p1[len(p2) + 1 + 10:]
# # print(p)
# # assert False
# sources.append(p)
# doc["data/" + path.split("/data/")[1]] = {
# "type": "glow" if "glow" else "normal",
# "sources": sources
# }
# with open(path.split("/data")[0] + "/src/icons/icons.json", "w") as f:
# json.dump(doc, f, sort_keys=True, indent=4)
def main():
if "--base" in sys.argv:
sys.argv.remove("--base")
# add_to_json(sys.argv)
return create_icon(sys.argv[1], sys.argv[2:], glow=False)
if "--glow" in sys.argv:
sys.argv.remove("--glow")
# add_to_json(sys.argv, glow=True)
return create_icon(sys.argv[1], sys.argv[2:], glow=True)
assert False
path = sys.argv[1]
if len(sys.argv) > 2:
overlay = sys.argv[2]
else:
overlay = None
if os.path.isdir(path):
process_icon_in_directory(path, overlay=overlay)
if __name__ == "__main__":
main()
| gpl-2.0 | -3,327,958,431,067,278,300 | 29.485119 | 82 | 0.559895 | false |
wawachoo/BDL | bdl/progress.py | 1 | 4630 | import threading
import copy
import time
from collections import namedtuple
ProgressState = namedtuple("ProgressState", ["count", "finished", "failed",
"percentage"])
class Progress:
def __init__(self, count=0, name=None):
"""Initializes object.
Arguments:
count (int, optional): Number of items to download. `< 1` means
that the number of items cannot be deduced.
name (str, optional): Name of the current operation.
"""
self.__lock = threading.Lock()
self.reset()
self.__count = count
self.__name = name
def reset(self):
"""Reset progress state.
"""
with self.__lock:
self.__entries = []
self.__currents = []
self.__finished = []
self.__failed = []
self.__count = 0
self.__name = None
@property
def count(self):
with self.__lock:
return copy.copy(self.__count)
@count.setter
def count(self, value):
with self.__lock:
self.__count = value
@property
def name(self):
with self.__lock:
return copy.copy(self.__name)
@name.setter
def name(self, value):
with self.__lock:
self.__name = value
def add(self, url, percentage=0):
"""Add an `url` to the progress list.
Arguments:
url (str): URL to add.
percentage (int, optional): URL download progress.
"""
with self.__lock:
if len(self.__entries) > 0:
curpos = (len(self.__entries) - 1)
else:
curpos = 0
curtime = time.time()
self.__entries.append({
"url": url,
"begin": percentage > 0 and curtime or -1,
"end": percentage < 100 and curtime or -1,
"percentage": percentage})
self.__currents.append(curpos)
def __mark(self, url, new_container, **kwargs):
"""Add `url` in `new_container`.
Arguments:
url (str): Item to update.
new_container (object, None): Item new container.
**kwargs: Item values to update.
"""
with self.__lock:
pos = 0
for entry in self.__entries:
pos += 1
if entry["url"] == url:
# Removes from `current` and add to specified container.
if new_container is not None:
try:
self.__currents.remove(pos)
except ValueError:
pass
new_container.append(pos)
# Update values.
for key, value in kwargs.items():
entry[key] = value
def update(self, url, percentage=0):
"""Update an `url` progress percentage.
Arguments:
url (str): URL to update.
percentage (int, optional): Download progress.
"""
self.__mark(url, None, percentage=percentage)
def mark_finished(self, url):
"""Marks `url` as failed.
Arguments:
url (str): URL to mark as finished.
"""
self.__mark(url, self.__finished)
def mark_failed(self, url):
"""Marks `url` as failed.
Arguments:
url (str): URL to mark as failed.
"""
self.__mark(url, self.__failed)
@property
def total(self):
"""Returns global state.
"""
with self.__lock:
if self.__count > 0:
percentage = len(self.__entries) / self.__count * 100
else:
percentage = 0
return ProgressState(count=self.__count,
finished=len(self.__finished),
failed=len(self.__failed),
percentage=percentage)
def __get_container(self, container):
"""Returns state of selected `container`.
"""
with self.__lock:
items = []
for entry_pos in container:
items.append(copy.copy(self.__entries[entry_pos]))
return items
@property
def currents(self):
return self.__get_container(self.__currents)
@property
def finished(self):
return self.__get_container(self.__finished)
@property
def failed(self):
return self.__get_container(self.__failed)
| gpl-3.0 | -2,175,473,359,973,949,700 | 28.119497 | 76 | 0.484233 | false |
clinton-hall/nzbToMedia | libs/common/mutagen/_senf/__init__.py | 4 | 2775 | # -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
if os.name != "nt":
# make imports work
_winapi = object()
from ._fsnative import fsnative, path2fsn, fsn2text, fsn2bytes, \
bytes2fsn, uri2fsn, fsn2uri, text2fsn, fsn2norm
from ._print import print_, input_, supports_ansi_escape_codes
from ._stdlib import sep, pathsep, curdir, pardir, altsep, extsep, devnull, \
defpath, getcwd, expanduser, expandvars
from ._argv import argv
from ._environ import environ, getenv, unsetenv, putenv
from ._temp import mkstemp, gettempdir, gettempprefix, mkdtemp
fsnative, print_, getcwd, getenv, unsetenv, putenv, environ, expandvars, \
path2fsn, fsn2text, fsn2bytes, bytes2fsn, uri2fsn, fsn2uri, mkstemp, \
gettempdir, gettempprefix, mkdtemp, input_, expanduser, text2fsn, \
supports_ansi_escape_codes, fsn2norm
version = (1, 3, 4)
"""Tuple[`int`, `int`, `int`]: The version tuple (major, minor, micro)"""
version_string = ".".join(map(str, version))
"""`str`: A version string"""
argv = argv
"""List[`fsnative`]: Like `sys.argv` but contains unicode under
Windows + Python 2
"""
sep = sep
"""`fsnative`: Like `os.sep` but a `fsnative`"""
pathsep = pathsep
"""`fsnative`: Like `os.pathsep` but a `fsnative`"""
curdir = curdir
"""`fsnative`: Like `os.curdir` but a `fsnative`"""
pardir = pardir
"""`fsnative`: Like `os.pardir` but a fsnative"""
altsep = altsep
"""`fsnative` or `None`: Like `os.altsep` but a `fsnative` or `None`"""
extsep = extsep
"""`fsnative`: Like `os.extsep` but a `fsnative`"""
devnull = devnull
"""`fsnative`: Like `os.devnull` but a `fsnative`"""
defpath = defpath
"""`fsnative`: Like `os.defpath` but a `fsnative`"""
__all__ = []
| gpl-3.0 | 3,557,201,742,648,609,000 | 29.494505 | 77 | 0.713874 | false |
wtriplett/lonestar5_launch | launch_slurm_ls5.py | 1 | 5224 | #!/usr/bin/env python
# launch script for stampede
# deals with both command files for parametric launcher and with single commands
import argparse
import sys,os
from tempfile import *
import subprocess
import math
MAXCORES=4104
MAXNODES=171
# set up argument args
def launch_slurm_ls5 (serialcmd='', script_name='', runtime='01:00:00',
jobname='launch', projname='', queue='normal', email=False, qsubfile='',
keepqsubfile=False, ignoreuser=False, test=False, parser=[], c=[], max_cores_per_node=None,
verbose=0, hold=[], outfile=[], cwd=[], nodes=0, use_hyperthreading=True):
if use_hyperthreading:
ncores_per_node = 48
else:
ncores_per_node = 24
if max_cores_per_node is None:
max_cores_per_node = ncores_per_node
elif int(max_cores_per_node) > ncores_per_node:
print("Requested max cores per node (%s) exceeds available cores per node (%d)." \
% (max_cores_per_node, ncores_per_node))
if use_hyperthreading is False:
print("Enabling hyperthreading (--ht) would double the available cores per node.")
sys.exit()
max_cores_per_node = int(max_cores_per_node)
if len(serialcmd) > 0:
print('sorry, serial mode is not currently supported')
sys.exit(1)
#parametric = 0
#print('Running serial command: '+cmd)
#nnodes = 1
#parenv = '1way'
#queue = 'serial'
elif script_name:
parametric = 1
print('Submitting parametric job file: ' + script_name)
try:
f = open(script_name,'r')
except:
print('%s does not exist -e!' % script_name)
sys.exit(0)
script_cmds = f.readlines()
f.close()
ncmds = len(script_cmds)
print('found %d commands' % ncmds)
# need to check for empty lines
for s in script_cmds:
if s.strip() == '':
print('command file contains empty lines - please remove them first')
sys.exit()
if not nodes:
nodes = math.ceil(float(ncmds)/float(max_cores_per_node))
print('Number of compute nodes not specified - estimating as %d' % nodes)
if int(nodes) > MAXNODES:
print('Warning # of nodes exceeds max allowed (%d), reducing requested nodes to %d.' \
% (nodes, MAXNODES))
nodes=MAXNODES
else:
print('ERROR: you must either specify a script name (using -s) or a command to run\n\n')
sys.exit()
if not qsubfile:
qsubfile,qsubfilepath = mkstemp(prefix=jobname+"_",dir='.',suffix='.slurm',text=True)
os.close(qsubfile)
total_cores = max_cores_per_node*int(nodes)
print('Outputting qsub commands to %s' % qsubfilepath)
qsubfile = open(qsubfilepath,'w')
qsubfile.write('#!/bin/bash\n#\n')
qsubfile.write('# SLURM control file automatically created by launch\n')
if parametric == 1:
qsubfile.write('#SBATCH -N %d\n'%int(nodes))
else:
print('sorry - serial mode is not currently supported')
sys.exit(1)
#qsubfile.write('# Launching single command: %s\n#\n#\n'%cmd)
qsubfile.write('#SBATCH -J %s # Job Name\n'%jobname)
qsubfile.write('#SBATCH -o {0}.o%j # Name of the output file (eg. myMPI.oJobID)\n'.format(jobname))
qsubfile.write('#SBATCH -p %s\n' % queue)
qsubfile.write('#SBATCH -t %s\n' % runtime)
qsubfile.write('#SBATCH -n %d\n' % total_cores) #ncmds)
if type(hold) is str:
qsubfile.write("#SBATCH -d afterok")
qsubfile.write(":{0}".format(int(hold)))
qsubfile.write('\n')
if projname != "":
qsubfile.write("#SBATCH -A {0}\n".format(projname))
try:
waitfor
except:
waitfor = None
if waitfor:
qsubfile.write('#SBATCH -d %d\n' % waitfor)
qsubfile.write('#----------------\n# Job Submission\n#----------------\n')
#qsubfile.write('umask 2\n\n')
if not parametric:
# currently not supported...
qsubfile.write('\n\nset -x # Echo commands, use "set echo" with csh\n')
qsubfile.write(cmd+'\n')
else:
#qsubfile.write('module load launcher\n')
qsubfile.write('export LAUNCHER_PLUGIN_DIR=$LAUNCHER_DIR/plugins\n')
qsubfile.write('export LAUNCHER_RMI=SLURM\n')
qsubfile.write('export LAUNCHER_JOB_FILE=%s\n'%script_name)
#qsubfile.write('cd $WORKDIR\n')
#qsubfile.write('echo " WORKING DIR: $WORKDIR/"\n')
qsubfile.write('$LAUNCHER_DIR/paramrun\n')
qsubfile.write('echo " "\necho " Parameteric Job Complete"\necho " "\n')
qsubfile.close()
jobid = None
if not test:
process = subprocess.Popen('sbatch %s' % qsubfilepath, shell=True, stdout=subprocess.PIPE)
for line in process.stdout:
print(line.strip())
if line.find('Submitted batch job') == 0:
jobid=int(line.strip().split(' ')[3])
process.wait()
if not keepqsubfile:
print('Deleting qsubfile: %s'%qsubfilepath)
os.remove(qsubfilepath)
return jobid
| mit | 4,205,118,558,536,959,500 | 33.368421 | 103 | 0.586332 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/toolz/tests/test_signatures.py | 8 | 2927 | import functools
import toolz._signatures as _sigs
from toolz._signatures import builtins, _is_valid_args, _is_partial_args
from toolz.compatibility import PY3
def test_is_valid(check_valid=_is_valid_args, incomplete=False):
orig_check_valid = check_valid
check_valid = lambda func, *args, **kwargs: orig_check_valid(func, args, kwargs)
assert check_valid(lambda x: None) is None
f = builtins.abs
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, x=1) is False
assert check_valid(f, 1, 2) is False
f = builtins.complex
assert check_valid(f)
assert check_valid(f, 1)
assert check_valid(f, real=1)
assert check_valid(f, 1, 2)
assert check_valid(f, 1, imag=2)
assert check_valid(f, 1, real=2) is False
assert check_valid(f, 1, 2, 3) is False
assert check_valid(f, 1, 2, imag=3) is False
f = builtins.int
assert check_valid(f)
assert check_valid(f, 1)
assert check_valid(f, x=1)
assert check_valid(f, 1, 2)
assert check_valid(f, 1, base=2)
assert check_valid(f, x=1, base=2)
assert check_valid(f, base=2) is incomplete
assert check_valid(f, 1, 2, 3) is False
f = builtins.map
assert check_valid(f) is incomplete
assert check_valid(f, 1) is incomplete
assert check_valid(f, 1, 2)
assert check_valid(f, 1, 2, 3)
assert check_valid(f, 1, 2, 3, 4)
f = builtins.min
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, iterable=1) is False
assert check_valid(f, 1, 2)
assert check_valid(f, 1, 2, 3)
assert check_valid(f, key=None) is incomplete
assert check_valid(f, 1, key=None)
assert check_valid(f, 1, 2, key=None)
assert check_valid(f, 1, 2, 3, key=None)
assert check_valid(f, key=None, default=None) is (PY3 and incomplete)
assert check_valid(f, 1, key=None, default=None) is PY3
assert check_valid(f, 1, 2, key=None, default=None) is False
assert check_valid(f, 1, 2, 3, key=None, default=None) is False
f = builtins.range
assert check_valid(f) is incomplete
assert check_valid(f, 1)
assert check_valid(f, 1, 2)
assert check_valid(f, 1, 2, 3)
assert check_valid(f, 1, 2, step=3) is False
assert check_valid(f, 1, 2, 3, 4) is False
f = functools.partial
assert orig_check_valid(f, (), {}) is incomplete
assert orig_check_valid(f, (), {'func': 1}) is incomplete
assert orig_check_valid(f, (1,), {})
assert orig_check_valid(f, (1,), {'func': 1})
assert orig_check_valid(f, (1, 2), {})
def test_is_partial():
test_is_valid(check_valid=_is_partial_args, incomplete=True)
def test_for_coverage(): # :)
assert _sigs._is_arity(1, 1) is None
assert _sigs._is_arity(1, all)
assert _sigs._has_varargs(None) is None
assert _sigs._has_keywords(None) is None
assert _sigs._num_required_args(None) is None
| gpl-3.0 | -5,542,759,133,621,346,000 | 32.643678 | 84 | 0.650495 | false |
olebole/astrometry.net | sdss/yanny.py | 2 | 36973 | #
# yanny.py
#
# Python library for reading & writing yanny files.
#
# B. A. Weaver, NYU, 2008-10-20
#
# $Id: yanny.py 141452 2012-12-18 00:12:50Z weaver $
#
"""Python library for reading & writing yanny files.
yanny is an object-oriented interface to FTCL/yanny data files following
these specifications_.
The format of the returned object is similar to that returned by
``read_yanny()`` in the efftickle perl package (in the yannytools product).
Currently multidimensional arrays are only supported for type ``char``, but a
close reading of the specifications indicates that multidimensional arrays
were only ever intended to be supported for type ``char``.
.. _specifications: http://www.sdss3.org/dr8/software/par.php
"""
__author__ = 'Benjamin Weaver <[email protected]>'
__version__ = '$Revision: 141452 $'.split(': ')[1].split()[0]
__all__ = [ 'yanny', 'read_yanny', 'write_yanny', 'write_yanny_append' ]
__docformat__ = "restructuredtext en"
#
# Modules
#
import re
import os
import os.path
import datetime
import numpy
#
# Classes
#
class yanny(dict):
"""An object interface to a yanny file.
Most users will use the convenience functions defined in this package, but
this object provides a somewhat more powerful way of reading &
writing the data in a yanny file.
Attributes
----------
np : bool
If True, data in a yanny file will be converted into a NumPy record
array.
debug : bool
If True, some simple debugging statements will be turned on.
_filename : str
The name of a yanny parameter file.
_contents : str
The complete contents of a yanny parameter file.
_struct_type_caches : dict
A dictionary of dictionaries, one dictionary for every structure
definition in a yanny parameter file. Contains the types of
each column
_struct_isarray_caches : dict
A dictionary of dictionaries, one dictionary for every structure
definition in a yanny parameter file. Contains a boolean value
for every column.
_enum_cache : dict
Initially ``None``, this attribute is initialized the first time
the ``isenum()`` method is called. The keyword is the name of the
enum type, the value is a list of the possible values of that type.
Parameters
----------
filename : str
The name of a yanny file.
np : bool, optional
If True, data in a yanny file will be converted into a NumPy record
array. Default is False
debug : bool, optional
If True, some simple debugging statements will be turned on. Default
is False.
"""
#
#
#
@staticmethod
def get_token(string):
"""Removes the first 'word' from string.
If the 'word' is enclosed in double quotes, it returns the
contents of the double quotes. If the 'word' is enclosed in
braces, it returns the contents of the braces, but does not
attempt to split the array. If the 'word' is the last word of the
string, remainder is set equal to the empty string. This is
basically a wrapper on some convenient regular expressions.
Parameters
----------
string : str
A string containing words.
Returns
-------
get_token : tuple
A tuple containing the first word and the remainder of the string.
Examples
--------
>>> yanny.yanny.get_token("The quick brown fox")
('The','quick brown fox')
"""
if string[0] == '"':
(word, remainder) = re.search(r'^"([^"]*)"\s*(.*)',
string).groups()
elif string[0] == '{':
(word, remainder) = re.search(r'^\{\s*([^}]*)\s*\}\s*(.*)',
string).groups()
else:
try:
(word, remainder) = re.split(r'\s+',string,1)
except ValueError:
(word, remainder) = (string, '')
if remainder is None:
remainder = ''
return (word,remainder)
#
#
#
@staticmethod
def protect(x):
"""Used to appropriately quote string that might contain whitespace.
This method is mostly for internal use by the yanny object.
Parameters
----------
x : str
The data to protect.
Returns
-------
protect : str
The data with white space protected by quotes.
Examples
--------
>>> yanny.yanny.protect('This string contains whitespace.')
'"This string contains whitespace."'
"""
s = str(x)
if len(s) == 0 or re.search(r'\s+',s) is not None:
return '"' + s + '"'
else:
return s
#
#
#
@staticmethod
def dtype_to_struct(dt,structname='mystruct',enums=dict()):
"""Convert a NumPy dtype object describing a record array to
a typedef struct statement.
The second argument is the name of the structure.
If any of the columns are enum types, enums must
be a dictionary with the keys the column names, and the values
are a tuple containing the name of the enum type as the first item
and a tuple or list of possible values as the second item.
Parameters
----------
dt : numpy.dtype
The dtype of a NumPy record array.
structname : str, optional
The name to give the structure in the yanny file. Defaults to 'MYSTRUCT'.
enums : dict, optional
A dictionary containing enum information. See details above.
Returns
-------
dtype_to_struct : dict
A dictionary suitable for setting the 'symbols' dictionary of a new
yanny object.
Examples
--------
"""
dtmap = {'i2':'short','i4':'int','i8':'long','f4':'float',
'f8':'double'}
returnenums = list()
for e in enums:
lines = list()
lines.append('typedef enum {')
for n in enums[e][1]:
lines.append(" {0},".format(n))
lines[-1] = lines[-1].strip(',')
lines.append('}} {0};'.format(enums[e][0].upper()))
returnenums.append("\n".join(lines))
#lines.append('')
lines = list()
lines.append('typedef struct {')
for c in dt.names:
if dt[c].kind == 'V':
t = dt[c].subdtype[0].str[1:]
l = dt[c].subdtype[1][0]
s = dt[c].subdtype[0].itemsize
else:
t = dt[c].str[1:]
l = 0
s = dt[c].itemsize
line = ' '
if t[0] == 'S':
if c in enums:
line += enums[c][0].upper()
else:
line += 'char'
else:
line += dtmap[t]
line += ' {0}'.format(c)
if l > 0:
line += "[{0:d}]".format(l)
if t[0] == 'S' and c not in enums:
line += "[{0:d}]".format(s)
line += ';'
lines.append(line)
lines.append('}} {0};'.format(structname.upper()))
return {structname.upper():list(dt.names),'enum':returnenums,'struct':["\n".join(lines)]}
#
#
#
def __init__(self,filename=None,np=False,debug=False):
"""Create a yanny object using a yanny file.
Create a yanny object using a yanny file, filename. If the file exists,
it is read, & the dict structure of the object will be basically the
same as that returned by ``read_yanny()`` in the efftickle package.
If the file does not exist, or if no filename is given, a blank
structure is returned. Other methods allow for subsequent writing
to the file.
"""
#
# The symbol hash is inherited from the old read_yanny
#
self['symbols'] = dict()
#
# Create special attributes that contain the internal status of the object
# this should prevent overlap with keywords in the data files
#
self._filename = ''
self._contents = ''
#
# Since the re is expensive, cache the structure types keyed by the field.
# Create a dictionary for each structure found.
#
self._struct_type_caches = dict()
self._struct_isarray_caches = dict()
self._enum_cache = None
#
# Optionally convert numeric data into NumPy arrays
#
self.np = np
#
# Turn on simple debugging
#
self.debug = debug
#
# If the file exists, read it
#
if filename is not None:
if os.access(filename,os.R_OK):
self._filename = filename
with open(filename,'r') as f:
self._contents = f.read()
self._parse()
return
#
#
#
def __str__(self):
"""Implement the ``str()`` function for yanny objects.
Simply prints the current contents of the yanny file.
"""
return self._contents
#
#
#
def __eq__(self,other):
"""Test two yanny objects for equality.
Two yanny objects are assumed to be equal if their contents are equal.
"""
if isinstance(other,yanny):
return str(other) == str(self)
return NotImplemented
#
#
#
def __ne__(self,other):
"""Test two yanny objects for inequality.
Two yanny objects are assumed to be unequal if their contents are unequal.
"""
if isinstance(other,yanny):
return str(other) != str(self)
return NotImplemented
#
#
#
def __nonzero__(self):
"""Give a yanny object a definite truth value.
A yanny object is considered ``True`` if its contents are non-zero.
"""
return len(self._contents) > 0
#
#
#
def type(self,structure,variable):
"""Returns the type of a variable defined in a structure.
Returns ``None`` if the structure or the variable is undefined.
"""
if structure not in self:
return None
if variable not in self.columns(structure):
return None
defl = list(filter(lambda x: x.find(structure.lower()) > 0,
self['symbols']['struct']))
defu = list(filter(lambda x: x.find(structure.upper()) > 0,
self['symbols']['struct']))
if len(defl) != 1 and len(defu) != 1:
return None
elif len(defl) == 1:
definition = defl
else:
definition = defu
#
# Added code to cache values to speed up parsing large files.
# 2009.05.11 / Demitri Muna, NYU
# Find (or create) the cache for this structure.
#
try:
cache = self._struct_type_caches[structure]
except KeyError:
self._struct_type_caches[structure] = dict()
cache = self._struct_type_caches[structure] # cache for one struct type
#
# Lookup (or create) the value for this variable
#
try:
var_type = cache[variable]
except KeyError:
if self.debug:
print(variable)
typere = re.compile(r'(\S+)\s+{0}([[<].*[]>]|);'.format(variable))
(typ,array) = typere.search(definition[0]).groups()
var_type = typ + array.replace('<','[').replace('>',']')
cache[variable] = var_type
return var_type
#
#
#
def basetype(self,structure,variable):
"""Returns the bare type of a variable, stripping off any array
information."""
typ = self.type(structure,variable)
if self.debug:
print(variable, typ)
try:
return typ[0:typ.index('[')]
except ValueError:
return typ
#
#
#
def isarray(self,structure,variable):
"""Returns True if the variable is an array type.
For character types, this means a two-dimensional array,
*e.g.*: ``char[5][20]``.
"""
try:
cache = self._struct_isarray_caches[structure]
except KeyError:
self._struct_isarray_caches[structure] = dict()
cache = self._struct_isarray_caches[structure]
try:
result = cache[variable]
except KeyError:
typ = self.type(structure,variable)
character_array = re.compile(r'char[[<]\d*[]>][[<]\d*[]>]')
if ((character_array.search(typ) is not None) or
(typ.find('char') < 0 and (typ.find('[') >= 0
or typ.find('<') >= 0))):
cache[variable] = True
else:
cache[variable] = False
result = cache[variable]
return result
#
#
#
def isenum(self,structure,variable):
"""Returns true if a variable is an enum type.
"""
if self._enum_cache is None:
self._enum_cache = dict()
if 'enum' in self['symbols']:
for e in self['symbols']['enum']:
m = re.search(r'typedef\s+enum\s*\{([^}]+)\}\s*(\w+)\s*;',e).groups()
self._enum_cache[m[1]] = re.split(r',\s*',m[0].strip())
else:
return False
return self.basetype(structure,variable) in self._enum_cache
#
#
#
def array_length(self,structure,variable):
"""Returns the length of an array type or 1 if the variable is not
an array.
For character types, this is the length of a two-dimensional
array, *e.g.*, ``char[5][20]`` has length 5.
"""
if self.isarray(structure,variable):
typ = self.type(structure,variable)
return int(typ[typ.index('[')+1:typ.index(']')])
else:
return 1
#
#
#
def char_length(self,structure,variable):
"""Returns the length of a character field.
*e.g.* ``char[5][20]`` is an array of 5 strings of length 20.
Returns ``None`` if the variable is not a character type. If the
length is not specified, *i.e.* ``char[]``, it returns the length of
the largest string.
"""
typ = self.type(structure,variable)
if typ.find('char') < 0:
return None
try:
return int(typ[typ.rfind('[')+1:typ.rfind(']')])
except ValueError:
if self.isarray(structure,variable):
return max([max(map(len,r)) for r in self[structure][variable]])
else:
return max(map(len,self[structure][variable]))
#
#
#
def dtype(self,structure):
"""Returns a NumPy dtype object suitable for describing a table as
a record array.
Treats enums as string, which is what the IDL reader does.
"""
dt = list()
dtmap = {'short':'i2', 'int':'i4', 'long':'i8', 'float':'f',
'double':'d' }
for c in self.columns(structure):
typ = self.basetype(structure,c)
if typ == 'char':
d = "S{0:d}".format(self.char_length(structure,c))
elif self.isenum(structure,c):
d = "S{0:d}".format(max(map(len,self._enum_cache[typ])))
else:
d = dtmap[typ]
if self.isarray(structure,c):
dt.append((c,d,(self.array_length(structure,c),)))
else:
dt.append((c,d))
dt = numpy.dtype(dt)
return dt
#
#
#
def convert(self,structure,variable,value):
"""Converts value into the appropriate (Python) type.
* ``short`` & ``int`` are converted to Python ``int``.
* ``long`` is converted to Python ``long``.
* ``float`` & ``double`` are converted to Python ``float``.
* Other types are not altered.
There may be further conversions into NumPy types, but this is the
first stage.
"""
typ = self.basetype(structure,variable)
if (typ == 'short' or typ == 'int'):
if self.isarray(structure,variable):
return map(int, value)
else:
return int(value)
if typ == 'long':
if self.isarray(structure,variable):
return map(long, value)
else:
return long(value)
if (typ == 'float' or typ == 'double'):
if self.isarray(structure,variable):
return map(float, value)
else:
return float(value)
return value
#
#
#
def tables(self):
"""Returns a list of all the defined structures.
This is just the list of keys of the object with the 'internal'
keys removed.
"""
foo = self['symbols'].keys()
foo.remove('struct')
foo.remove('enum')
return foo
#
#
#
def columns(self,table):
"""Returns an ordered list of column names associated with a particular
table.
The order is the same order as they are defined in the yanny file.
"""
foo = list()
if table in self['symbols']:
return self['symbols'][table]
return foo
#
#
#
def size(self,table):
"""Returns the number of rows in a table.
"""
foo = self.columns(table)
return len(self[table][foo[0]])
#
#
#
def pairs(self):
"""Returns a list of keys to keyword/value pairs.
Equivalent to doing ``self.keys()``, but with all the data tables &
other control structures stripped out.
"""
p = list()
foo = self.tables()
for k in self.keys():
if k == 'symbols' or k in foo:
continue
p.append(k)
return p
#
#
#
def row(self,table,index):
"""Returns a list containing a single row from a specified table in column order
If index is out of range, it returns an empty list.
If the yanny object instance is set up for NumPy record arrays, then
a single row can be obtained with::
>>> row0 = par['TABLE'][0]
"""
datarow = list()
if table in self and index >= 0 and index < self.size(table):
for c in self.columns(table):
datarow.append(self[table][c][index])
return datarow
#
#
#
def set_filename(self,newfile):
"""Updates the filename associated with the yanny object.
Use this if the object was created with no filename.
"""
self._filename = newfile
return
#
#
#
def list_of_dicts(self, table):
"""Construct a list of dictionaries.
Takes a table from the yanny object and constructs a list object
containing one row per entry. Each item in the list is a dictionary
keyed by the struct value names.
If the yanny object instance is set up for NumPy record arrays, then
the same functionality can be obtained with::
>>> foo = par['TABLE'][0]['column']
"""
return_list = list()
d = dict()
struct_fields = self.columns(table) # I'm assuming these are in order...
for i in range(self.size(table)):
one_row = self.row(table, i) # one row as a list
j = 0
for key in struct_fields:
d[key] = one_row[j]
j = j + 1
return_list.append(dict(d)) # append a new dict (copy of d)
return return_list
#
#
#
def new_dict_from_pairs(self):
"""Returns a new dictionary of keyword/value pairs.
The new dictionary (*i.e.*, not a yanny object) contains the keys
that ``self.pairs()`` returns. There are two reasons this is convenient:
* the key 'symbols' that is part of the yanny object will not be present
* a simple yanny file can be read with no further processing
Example
-------
Read a yanny file and return only the pairs::
>>> new_dict = yanny.yanny(file).new_dict_from_pairs()
added: Demitri Muna, NYU 2009-04-28
"""
new_dictionary = dict()
for key in self.pairs():
new_dictionary[key] = self[key]
return new_dictionary
#
#
#
def write(self,*args):
"""Write a yanny object to a file.
This assumes that the filename used to create the object was not that
of a pre-existing file. If a file of the same name is detected,
this method will *not* attempt to overwrite it, but will print a warning.
This also assumes that the special 'symbols' key has been properly
created. This will not necessarily make the file very human-readable,
especially if the data lines are long. If the name of a new file is
given, it will write to the new file (assuming it doesn't exist).
If the writing is successful, the data in the object will be updated.
"""
if len(args) > 0:
newfile = args[0]
else:
if len(self._filename) > 0:
newfile = self._filename
else:
raise ValueError("No filename specified!")
basefile = os.path.basename(newfile)
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
contents = "#\n# {0}\n#\n# Created by yanny.py\n#\n# {1}\n#\n".format(basefile,timestamp)
#
# Print any key/value pairs
#
for key in self.pairs():
contents += "{0} {1}\n".format(key,self[key])
#
# Print out enum definitions
#
if len(self['symbols']['enum']) > 0:
contents += "\n" + "\n\n".join(self['symbols']['enum']) + "\n"
#
# Print out structure definitions
#
if len(self['symbols']['struct']) > 0:
contents += "\n" + "\n\n".join(self['symbols']['struct']) + "\n"
contents += "\n"
#
# Print out the data tables
#
for sym in self.tables():
columns = self.columns(sym)
for k in range(self.size(sym)):
line = list()
line.append(sym)
for col in columns:
if self.isarray(sym,col):
datum = '{' + ' '.join(map(self.protect,self[sym][col][k])) + '}'
else:
datum = self.protect(self[sym][col][k])
line.append(datum)
contents += "{0}\n".format(' '.join(line))
#
# Actually write the data to file
#
if os.access(newfile,os.F_OK):
print("{0} exists, aborting write!".format(newfile))
print("For reference, here's what would have been written:")
print(contents)
else:
with open(newfile,'w') as f:
f.write(contents)
self._contents = contents
self._filename = newfile
self._parse()
return
#
#
#
def append(self,datatable):
"""Appends data to an existing FTCL/yanny file.
Tries as much as possible to preserve the ordering & format of the
original file. The datatable should adhere to the format of the
yanny object, but it is not necessary to reproduce the 'symbols'
dictionary. It will not try to append data to a file that does not
exist. If the append is successful, the data in the object will be updated.
"""
if len(self._filename) == 0:
raise ValueError("No filename is set for this object. Use the set_filename method to set the filename!")
if type(datatable) != dict:
raise ValueError("Data to append is not of the correct type. Use a dict!")
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')
contents = ''
#
# Print any key/value pairs
#
for key in datatable.keys():
if key.upper() in self.tables() or key == 'symbols':
continue
contents += "{0} {1}\n".format(key, datatable[key])
#
# Print out the data tables
#
for sym in self.tables():
if sym.lower() in datatable:
datasym = sym.lower()
else:
datasym = sym
if datasym in datatable:
columns = self.columns(sym)
for k in range(len(datatable[datasym][columns[0]])):
line = list()
line.append(sym)
for col in columns:
if self.isarray(sym,col):
datum = '{' + ' '.join(map(self.protect,datatable[datasym][col][k])) + '}'
else:
datum = self.protect(datatable[datasym][col][k])
line.append(datum)
contents += "{0}\n".format(' '.join(line))
#
# Actually write the data to file
#
if len(contents) > 0:
contents = ("# Appended by yanny.py at {0}.\n".format(timestamp)) + contents
if os.access(self._filename,os.W_OK):
with open(self._filename,'a') as f:
f.write(contents)
self._contents += contents
self._parse()
else:
print("{0} does not exist, aborting append!".format(self._filename))
print("For reference, here's what would have been written:")
print(contents)
else:
print("Nothing to be appended!")
return
#
#
#
def _parse(self):
"""Converts text into tables that users can use.
This method is for use internally by the yanny object. It is not
meant to be called by users.
Parsing proceeds in this order:
#. Lines that end with a backslash character ``\`` are reattached
to following lines.
#. Structure & enum definitions are identified, saved into the
'symbols' dictionary & stripped from the contents.
#. Structure definitions are interpreted.
#. At this point, the remaining lines of the original file can only
contain these things:
* 'blank' lines, including lines that only contain comments
* keyword/value pairs
* structure rows
#. The remaining lines are scanned sequentially.
#. 'Blank' lines are identified & ignored.
#. Whitespace & comments are stripped from non-blank lines.
#. Empty double braces ``{{}}`` are converted into empty double
quotes ``""``.
#. If the first word on a line matches the name of a structure,
the line is broken up into tokens & each token or set of tokens
(for arrays) is converted to the appropriate Python type.
#. If the first word on a line does not match the name of a
structure, it must be a keyword, so this line is interpreted
as a keyword/value pair. No further processing is done to
the value.
#. At the conclusion of parsing, if ``self.np`` is ``True``, the
structures are converted into NumPy record arrays.
"""
#
# there are five things we might find
# 1. 'blank' lines including comments
# 2. keyword/value pairs (which may have trailing comments)
# 3. enumeration definitions
# 4. structure definitions
# 5. data
#
lines = self._contents
#
# Reattach lines ending with \
#
lines = re.sub(r'\\\s*\n',' ',lines)
#
# Find structure & enumeration definitions & strip them out
#
self['symbols']['struct'] = re.findall(r'typedef\s+struct\s*\{[^}]+\}\s*\w+\s*;',lines)
self['symbols']['enum'] = re.findall(r'typedef\s+enum\s*\{[^}]+\}\s*\w+\s*;',lines)
lines = re.sub(r'typedef\s+struct\s*\{[^}]+\}\s*\w+\s*;','',lines)
lines = re.sub(r'typedef\s+enum\s*\{[^}]+\}\s*\w+\s*;','',lines)
#
# Interpret the structure definitions
#
typedefre = re.compile(r'typedef\s+struct\s*\{([^}]+)\}\s*(\w*)\s*;')
for typedef in self['symbols']['struct']:
typedefm = typedefre.search(typedef)
(definition,name) = typedefm.groups()
self[name.upper()] = dict()
self['symbols'][name.upper()] = list()
definitions = re.findall(r'\S+\s+\S+;',definition)
for d in definitions:
d = d.replace(';','')
(datatype,column) = re.split(r'\s+',d)
column = re.sub(r'[[<].*[]>]$','',column)
self['symbols'][name.upper()].append(column)
self[name.upper()][column] = list()
comments = re.compile(r'^\s*#') # Remove lines containing only comments
blanks = re.compile(r'^\s*$') # Remove lines containing only whitespace
trailing_comments = re.compile(r'\s*\#.*$') # Remove trailing comments
double_braces = re.compile(r'\{\s*\{\s*\}\s*\}') # Double empty braces get replaced with empty quotes
if len(lines) > 0:
for line in lines.split('\n'):
if self.debug:
print(line)
if len(line) == 0:
continue
if comments.search(line) is not None:
continue
if blanks.search(line) is not None:
continue
#
# Remove leading & trailing blanks & comments
#
line = line.strip()
line = trailing_comments.sub('',line)
line = double_braces.sub('""',line)
#
# Now if the first word on the line does not match a
# structure definition it is a keyword/value pair
#
(key, value) = self.get_token(line)
uckey = key.upper()
if uckey in self['symbols'].keys():
#
# Structure data
#
for column in self['symbols'][uckey]:
if len(value) > 0 and blanks.search(value) is None:
(data,value) = self.get_token(value)
if self.isarray(uckey,column):
#
# An array value
# if it's character data, it won't be
# delimited by {} unless it is a multidimensional
# string array. It may or may not be delimited
# by double quotes
#
# Note, we're assuming here that the only
# multidimensional arrays are string arrays
#
arraydata = list()
while len(data) > 0:
(token, data) = self.get_token(data)
arraydata.append(token)
self[uckey][column].append(
self.convert(uckey,column,arraydata))
else:
#
# A single value
#
self[uckey][column].append(
self.convert(uckey,column,data))
else:
break
else:
#
# Keyword/value pair
#
self[key] = value
#
# If self.np is True, convert tables into NumPy record arrays
#
if self.np:
for t in self.tables():
record = numpy.zeros((self.size(t),),dtype=self.dtype(t))
for c in self.columns(t):
record[c] = self[t][c]
self[t] = record
return
#
# Functions
#
def read_yanny(filename):
"""Reads the contents of an FTCL/yanny file & returns the data in a dictionary.
This is just a convenience wrapper on a yanny object, for use when a
user is not interested in changing the contents of a yanny object.
Parameters
----------
filename : str
The name of a parameter file.
Returns
-------
par : dict
A copy of the yanny object.
Examples
--------
"""
par = yanny(filename)
return par.copy()
#
#
#
def write_yanny(filename,datatable):
"""Writes the contents of a dictionary to an FTCL/yanny file.
Ideally used in conjunction with read_yanny() to create an initial
dictionary of the appropriate format.
Parameters
----------
filename : str
The name of a parameter file.
datatable : dict
A dictionary containing data that can be copied into a yanny object.
Returns
-------
par : yanny.yanny
The yanny object resulting from writing the file.
Examples
--------
"""
par = yanny(filename)
for key in datatable:
par[key] = datatable[key]
par.write(filename)
return par
#
#
#
def write_yanny_append(filename,datatable):
"""Appends the contents of a dictionary to an existing FTCL/yanny file.
Ideally used in conjunction with read_yanny() to create an initial
dictionary of the appropriate format.
Parameters
----------
filename : str
The name of a parameter file.
datatable : dict
A dictionary containing data that can be copied into a yanny object.
Returns
-------
par : yanny.yanny
The yanny object resulting from appending the file.
Examples
--------
"""
par = yanny(filename)
par.append(datatable)
return par
#
#
#
def write_ndarray_to_yanny(filename,datatable,structname='mystruct',enums=dict(),hdr=dict()):
"""Converts a NumPy record array into a new FTCL/yanny file.
Returns a new yanny object corresponding to the file.
Parameters
----------
filename : str
The name of a parameter file.
datatable : numpy.ndarray
A NumPy record array containing data that can be copied into a yanny object.
structname : str, optional
The name to give the structure in the yanny file. Defaults to 'MYSTRUCT'.
enums : dict, optional
A dictionary containing enum information. See details above.
hdr : dict, optional
A dictionary containing keyword/value pairs for the 'header' of the yanny file.
Returns
-------
par : yanny.yanny
The yanny object resulting from writing the file.
Examples
--------
"""
par = yanny(filename,np=True,debug=True)
par['symbols'] = par.dtype_to_struct(datatable.dtype,structname=structname,enums=enums)
par[structname.upper()] = datatable
for key in hdr:
par[key] = hdr[key]
par.write(filename)
return par
#
#
#
def main():
"""Used to test the yanny class.
"""
par = yanny(os.path.join(os.getenv('YANNYTOOLS_DIR'),'data','test.par'),
np=True,debug=True)
print(par.pairs())
for p in par.pairs():
print("{0} => {1}".format(p, par[p]))
print(par.keys())
print(par['symbols'].keys())
print(par['symbols']['struct'])
print(par['symbols']['enum'])
print(par.tables())
for t in par.tables():
print(par.dtype(t))
print("{0}: {1:d} entries".format(t,par.size(t)))
print(par.columns(t))
for c in par.columns(t):
print("{0}: type {1}".format(c,par.type(t,c)))
print(par[t][c])
if par.isenum('MYSTRUCT','new_flag'):
print(par._enum_cache)
par.write() # This should fail, since test.par already exists.
datatable = {'status_update': {'state':['SUCCESS', 'SUCCESS'],
'timestamp':['2008-06-22 01:27:33','2008-06-22 01:27:36']},
'new_keyword':'new_value'}
par.set_filename(os.path.join(os.getenv('YANNYTOOLS_DIR'),'data','test_append.par'))
par.append(datatable) # This should also fail, because test_append.par does not exist
return
#
# Testing purposes
#
if __name__ == '__main__':
main()
| bsd-3-clause | -4,673,871,006,453,126,000 | 33.393488 | 116 | 0.531117 | false |
ebagdasa/tempest | tempest/api/object_storage/test_healthcheck.py | 5 | 1669 | # Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Joe H. Rahme <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.object_storage import base
from tempest.common import custom_matchers
from tempest import test
class HealthcheckTest(base.BaseObjectTest):
@classmethod
def resource_setup(cls):
super(HealthcheckTest, cls).resource_setup()
def setUp(self):
super(HealthcheckTest, self).setUp()
# Turning http://.../v1/foobar into http://.../
self.account_client.skip_path()
@test.attr('gate')
def test_get_healthcheck(self):
resp, _ = self.account_client.get("healthcheck", {})
# The target of the request is not any Swift resource. Therefore, the
# existence of response header is checked without a custom matcher.
self.assertIn('content-length', resp)
self.assertIn('content-type', resp)
self.assertIn('x-trans-id', resp)
self.assertIn('date', resp)
# Check only the format of common headers with custom matcher
self.assertThat(resp, custom_matchers.AreAllWellFormatted())
| apache-2.0 | -6,382,049,158,230,826,000 | 35.282609 | 77 | 0.703415 | false |
repotvsupertuga/repo | plugin.video.projectxwizard/parameters.py | 4 | 3239 | import xbmc, xbmcaddon, xbmcgui, xbmcplugin,os,base64,sys,xbmcvfs
import urllib2,urllib
############################
###GET PARAMS###############
############################
def get_params():
url=None
name=None
buildname=None
updated=None
author=None
version=None
mode=None
iconimage=None
description=None
video=None
link=None
skins=None
videoaddons=None
audioaddons=None
programaddons=None
audioaddons=None
sources=None
local=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
guisettingslink=urllib.unquote_plus(params["guisettingslink"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
mode=str(params["mode"])
except:
pass
try:
link=urllib.unquote_plus(params["link"])
except:
pass
try:
skins=urllib.unquote_plus(params["skins"])
except:
pass
try:
videoaddons=urllib.unquote_plus(params["videoaddons"])
except:
pass
try:
audioaddons=urllib.unquote_plus(params["audioaddons"])
except:
pass
try:
programaddons=urllib.unquote_plus(params["programaddons"])
except:
pass
try:
pictureaddons=urllib.unquote_plus(params["pictureaddons"])
except:
pass
try:
local=urllib.unquote_plus(params["local"])
except:
pass
try:
sources=urllib.unquote_plus(params["sources"])
except:
pass
try:
adult=urllib.unquote_plus(params["adult"])
except:
pass
try:
buildname=urllib.unquote_plus(params["buildname"])
except:
pass
try:
updated=urllib.unquote_plus(params["updated"])
except:
pass
try:
version=urllib.unquote_plus(params["version"])
except:
pass
try:
author=urllib.unquote_plus(params["author"])
except:
pass
try:
description=urllib.unquote_plus(params["description"])
except:
pass
try:
video=urllib.unquote_plus(params["video"])
except:
pass
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def params_end():
url=None
name=None
mode=None
iconimage=None
fanart=None
description=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
description=urllib.unquote_plus(params["description"])
except:
pass | gpl-2.0 | -3,997,719,982,741,725,000 | 19 | 68 | 0.615005 | false |
centaurialpha/ninja-ide | ninja_ide/__init__.py | 1 | 2597 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
###############################################################################
# METADATA
###############################################################################
__prj__ = "NINJA-IDE"
__author__ = "The NINJA-IDE Team"
__mail__ = "ninja-ide at googlegroups dot com"
__url__ = "http://www.ninja-ide.org"
__source__ = "https://github.com/ninja-ide/ninja-ide"
__version__ = "3.0-alpha"
__license__ = "GPL3"
###############################################################################
# DOC
###############################################################################
"""NINJA-IDE is a cross-platform integrated development environment (IDE).
NINJA-IDE runs on Linux/X11, Mac OS X and Windows desktop operating systems,
and allows developers to create applications for several purposes using all the
tools and utilities of NINJA-IDE, making the task of writing software easier
and more enjoyable.
"""
###############################################################################
# SET PYQT API 2
###############################################################################
# import sip
# API_NAMES = ["QDate", "QDateTime", "QString", "QTime", "QUrl", "QTextStream",
# "QVariant"]
# API_VERSION = 2
# for name in API_NAMES:
# sip.setapi(name, API_VERSION)
###############################################################################
# START
###############################################################################
def setup_and_run():
"""Load the Core module and trigger the execution."""
# import only on run
# Dont import always this, setup.py will fail
from ninja_ide import core
from ninja_ide import nresources # lint:ok
from multiprocessing import freeze_support
# Used to support multiprocessing on windows packages
freeze_support()
# Run NINJA-IDE
core.run_ninja()
| gpl-3.0 | -133,706,708,978,192,640 | 35.069444 | 79 | 0.530997 | false |
stephen144/odoo | addons/gamification/wizard/grant_badge.py | 47 | 1391 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.exceptions import UserError
class grant_badge_wizard(osv.TransientModel):
""" Wizard allowing to grant a badge to a user"""
_name = 'gamification.badge.user.wizard'
_columns = {
'user_id': fields.many2one("res.users", string='User', required=True),
'badge_id': fields.many2one("gamification.badge", string='Badge', required=True),
'comment': fields.text('Comment'),
}
def action_grant_badge(self, cr, uid, ids, context=None):
"""Wizard action for sending a badge to a chosen user"""
badge_user_obj = self.pool.get('gamification.badge.user')
for wiz in self.browse(cr, uid, ids, context=context):
if uid == wiz.user_id.id:
raise UserError(_('You can not grant a badge to yourself'))
#create the badge
values = {
'user_id': wiz.user_id.id,
'sender_id': uid,
'badge_id': wiz.badge_id.id,
'comment': wiz.comment,
}
badge_user = badge_user_obj.create(cr, uid, values, context=context)
result = badge_user_obj._send_badge(cr, uid, badge_user, context=context)
return result
| agpl-3.0 | 8,782,658,006,548,667,000 | 35.605263 | 89 | 0.603882 | false |
initOS/odoo-addons | account_product_gross_net/models/product.py | 1 | 4387 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp import models, fields, api
from openerp.tools.float_utils import float_round
import addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
class product_category(models.Model):
_inherit = "product.category"
brut_net_factor = fields.Float(string='Gross/Net Ratio', default=1)
class product_template(models.Model):
_inherit = 'product.template'
list_price = fields.Float(
compute='_compute_net_price',
)
lst_price_brut = fields.Float(
string='Gross selling price',
digits_compute=dp.get_precision('Product Price'),
)
brut_net_factor = fields.Float(string='Gross/Net Ratio', default=1)
def get_list_price_factor(self, product, request):
cr, uid, context, registry = request.cr, request.uid, request.context, request.registry
orm_user = registry.get('res.users')
partner = orm_user.browse(cr, SUPERUSER_ID, request.uid, context).partner_id
factor = 1
if hasattr(partner.property_account_position, 'b2c_fiscal_position') and partner.property_account_position.b2c_fiscal_position:
if product.brut_net_factor > 0:
factor = product.brut_net_factor
return factor
@api.onchange('categ_id')
def onchange_product_category(self):
if self.categ_id.brut_net_factor > 0:
self.brut_net_factor = self.categ_id.brut_net_factor
@api.one
@api.depends('lst_price_brut', 'brut_net_factor')
def _compute_net_price(self):
if self.brut_net_factor:
self.list_price = self.lst_price_brut / self.brut_net_factor
if 'request' not in self.env.context and 'uid' in self.env.context:
variants_attribute_prices = self.env['product.attribute.price'].search(
[('product_tmpl_id', '=', self.id)]
)
for attribute_price in variants_attribute_prices:
value = attribute_price.value_id
_logger.debug("Variant: %s", value.name)
if value:
price_extra = value.with_context(active_id=self.id).price_extra
value.with_context(source='template').sudo().write({
'lst_price_brut': (
self.list_price + price_extra
) * self.brut_net_factor
})
class product_product(models.Model):
_inherit = 'product.product'
@api.onchange('categ_id')
def onchange_product_category(self):
if self.categ_id.brut_net_factor > 0:
self.product_tmpl_id.write({'brut_net_factor': self.categ_id.brut_net_factor})
self.product_tmpl_id._compute_net_price()
self.lst_price = self.lst_price_brut / self.categ_id.brut_net_factor
class product_attribute_value(models.Model):
_inherit = "product.attribute.value"
lst_price_brut = fields.Float(
string='Gross selling price',
digits_compute=dp.get_precision('Product Price'),
)
@api.multi
def write(self, vals):
if self.env.context.get('active_id', False):
price_att = self.env['product.attribute.price'].search([('product_tmpl_id', '=', self.env.context['active_id']), ('value_id', '=', self.id)])
if price_att:
template = price_att.product_tmpl_id
if 'price_extra' in vals:
vals['lst_price_brut'] = template.lst_price_brut + vals['price_extra'] * template.brut_net_factor
elif 'lst_price_brut' in vals:
vals['price_extra'] = (vals['lst_price_brut'] - template.lst_price_brut) / template.brut_net_factor
elif 'lst_price_brut' in vals:
del vals['lst_price_brut']
return super(product_attribute_value, self).write(vals)
class product_attribute_price(models.Model):
_inherit = 'product.attribute.price'
lst_price_brut = fields.Float(
string='Gross selling price',
related='value_id.lst_price_brut'
)
@api.model
def create(self, vals):
template = self.product_tmpl_id.browse(vals['product_tmpl_id'])
vals['lst_price_brut'] = template.lst_price_brut + float(vals['price_extra']) * template.brut_net_factor
return super(product_attribute_price, self).create(vals)
| agpl-3.0 | 6,176,068,589,276,220,000 | 37.147826 | 153 | 0.615911 | false |
JonathanSwaim/Tjagonaj_Translator | en/_en-test.py | 9 | 13466 | # Put the en library in the same folder as your script so NodeBox can find
# the library. It takes some time to load all the data the first time.
try:
# This is the statement you normally use.
en = ximport("en")
except:
# But since these examples are "inside" the library
# we may need to try something different when
# the library is not located in /Application Support
en = ximport("__init__")
# LEXICAL CATEGORIZATION ############################################################
# Returns True when the given value is a number.
print 1, en.is_number(12)
print 2, en.is_number("twelve")
# Returns True when the given string is a noun.
# You can also check for is_verb(), is_adjective() and is_adverb().
print 3, en.is_noun("banana")
# Returns True when the given string is a tag,
# for example HTML or XML.
print 4, en.is_tag("</a>")
# Return True when the string is a HTML tag,
# for example <a> or <body>.
print 5, en.is_html_tag("</person>")
# COMMONSENSE #######################################################################
# Returns True if the given word expresses a basic emotion:
# anger, disgust, fear, joy, sadness, surprise.
print 6, en.is_basic_emotion("cheerful")
# Returns True if the given word is a magic word:
# you, money, save, new, results, health, easy, ...
print 7, en.is_persuasive("money")
# Returns True if the word is a connective:
# nevertheless, whatever, secondly, ...
# and words like I, the, own, him which have little semantical value.
print 8, en.is_connective("but")
# NUMBERS ###########################################################################
# Returns the ordinal of the given number,
# 100 -> 100th, 3 -> 3rd
# twenty-one -> twenty-first
print 9, en.number.ordinal(100)
print 10, en.number.ordinal("twenty-one")
# Writes out the given number:
# 25 -> twenty-five
print 11, en.number.spoken(25)
# QUANTIFICATION ####################################################################
# Quantifies the given word:
# 10 and chickens -> a number of chickens
# 300 and chickens -> hundreds of chickens
print 12, en.number.quantify(800, "chicken")
# Quantifies a list of words:
# several chickens, a pair of geese and a duck
# Notice how goose is correctly pluralized and duck has the right article.
print 13, en.list.conjunction(["goose", "goose", "duck", "chicken", "chicken", "chicken"])
# Quantifies the types of things in the given list:
# several integers
print 14, en.list.conjunction((1,2,3,4,5), generalize=True)
# You can also quantify a library:
# en.list.conjunction(en, generalize=True) ->
# a number of modules, a number of functions, a number of strings,
# a pair of lists, a pair of dictionaries, an en verb, an en sentence,
# an en number, an en noun, an en list, an en content, an en adverb,
# an en adjective, a None type and a DrawingPrimitives Context
# INDEFINITE ARTICLE ################################################################
# Returns the noun with its indefinite article
# university -> a university
# owl -> an owl
# hour -> an hour
print 15, en.noun.article("university")
# PLURALIZATION #####################################################################
# Pluralizes the given noun:
# kitchen knife -> kitchen knives
# part-of-speech -> parts-of-speech
# wolf -> wolves
# child -> children
# You can also do en.adjective.plural().
print 16, en.noun.plural("dog")
# EMOTIONAL VALUE ###################################################################
# Guesses whether the given noun expresses an emotion,
# by checking if there are synonyms of the word that
# are basic emotions.
# Return True or False by default.
print 17, en.noun.is_emotion("anger")
# Or you can return a string which provides some information
# anxious -> fear
# An additional optional parameter shallow=True
# speeds up the lookup process but doesn't check as many synonyms.
# You can also use verb.is_emotion(), adjective.is_emotion() and adverb.is_emotion()
print 18, en.adjective.is_emotion("anxious", boolean=False)
# WORDNET ###########################################################################
# WordNet describes semantic relations between synonym sets.
# Returns the dictionary description:
print 19, en.noun.gloss("book")
# A word can have multiple senses,
# for example "tree" can mean a tree in a forest but also a tree diagram,
# or a person named Sir Herbert Beerbohm Tree:
print 20, en.noun.senses("tree")
# Return the dictionary entry for tree as in tree diagram:
print 21, en.noun.gloss("tree", sense=1)
# Return a categorization for the given word:
# book -> communication
print 22, en.noun.lexname("book")
# Return examples of the given word:
# vehicle -> bumper car, craft, rocket, skibob, sled, steamroller, ...
print 23, en.noun.hyponym("vehicle")
print 24, en.noun.hyponym("tree", sense=1)
# Return abstractions of the given word:
# earth -> terrestrial planet
# earth as in dirt -> material
print 25, en.noun.hypernym("earth")
print 26, en.noun.hypernym("earth", sense=1)
# You can also execute a deep query on hypernyms and hyponyms.
# Notice how returned values become more and more abstract:
# vehicle -> transport -> intrumentation -> artifact -> unit -> physical object -> entity
print 27, en.noun.hypernyms("vehicle", sense=0)
# Return components of the given word:
# computer -> chip, diskette, keyboard, monitor, ...
print 28, en.noun.holonym("computer")
# Return the collection in which the given word can be found:
# tree -> forest
print 29, en.noun.meronym("tree")
# Return the semantic opposite of the word:
# black -> white
print 30, en.noun.antonym("black")
# Find out what two words have in common:
# cat and dog -> carnivore
print 31, en.noun.meet("cat", "dog", sense1=0, sense2=0)
# Return an absurd description for the word:
# typography -> a business deal on a trivial scale
print 32, en.noun.absurd_gloss("typography")
# The return value of a WordNet command is usually a list
# containing other lists of related words, for example:
# [['tree'], ['tree', 'tree diagram'], ['Tree', 'Sir Herbert Beerbohm Tree']]
# You can use the en.list.flatten() command to flatten the list:
print 33, en.list.flatten(en.noun.senses("tree"))
# -> ['tree', 'tree', 'tree diagram', 'Tree', 'Sir Herbert Beerbohm Tree']
# If you want a list of all nouns/verbs/adjectives/adverbs there's the
# en.wordnet.all_nouns(), en.wordnet.all_verbs() ... commands:
print 34, len(en.wordnet.all_nouns())
# All of the commands shown here for nouns are also available for verbs, adjectives and adverbs,
# en.verbs.hypernyms("run"), en.adjective.gloss("beautiful") etc. are valid commands.
# VERB CONJUGATION ##################################################################
# NodeBox English Linguistics knows the verb tenses for about 10000 English verbs.
# Return the infinitive:
# swimming -> swim
print 35, en.verb.infinitive("swimming")
# Return the present tense, for the given person:
# gave -> give
# gave -> he gives
print 36, en.verb.present("gave")
print 37, en.verb.present("gave", person=3, negate=False)
# Known values for person are 1, 2, 3, "1st", "2nd", "3rd", "plural", "*".
# Just use the one you like most.
# Return the present participle tense
# be -> being
print 38, en.verb.present_participle("be")
# Return the past tense:
# give -> gave
# be -> I wasn't
print 39, en.verb.past("give")
print 40, en.verb.past("be", person=1, negate=True)
# Return the past participle tense:
# be -> been
print 41, en.verb.past_participle("be")
# a list of all possible tenses:
print 42, en.verb.tenses()
# Returns the tense of the given verb:
# was -> 1st singular past
print 43, en.verb.tense("was")
# Returns True if the given verb is in the given tense:
print 44, en.verb.is_tense("wasn't", "1st singular past", negated=True)
print 45, en.verb.is_present("does", person=1)
print 46, en.verb.is_present_participle("doing")
print 47, en.verb.is_past_participle("done")
# SHALLOW PARSING ###################################################################
# NodeBox English Linguistics is able to do sentence structure analysis using a
# combination of Jason Wiener's tagger and NLTK's chunker.
# The tagger assigns a part-of-speech tag to each word in the sentence using Brill's
# lexicon. A "postag" is something like NN or VBP marking words as nouns, verbs,
# determiners, pronouns, etc. The chunker is then able to group syntactic units
# in the sentence. A syntactic unit is a determiner followed by adjectives followed
# by a noun, for example, "the tasty little chicken" is a syntactic unit.
# Tag the given sentence.
# The return value is a list of (word, tag) tuples.
print 48, en.sentence.tag("this is so cool")
# -> this/DT is/VBZ so/RB cool/JJ
# There are lots of part-of-speech tags and it takes some time getting to know them.
# This function returns a (description, examples) tuple for a given tag:
# NN -> ('noun, singular or mass', 'tiger, chair, laughter')
print 49, en.sentence.tag_description("NN")
# Returns the chunked sentence:
# For example:
# we are going to school ->
# [['SP',
# ['NP', ('we', 'PRP')],
# ['AP',
# ['VP', ('are', 'VBP'), ('going', 'VBG'), ('to', 'TO')],
# ['NP', ('school', 'NN')]]]]
# Now what does all this mean?
# NP are noun phrases, syntactic units describing a noun, for example: a big fish.
# VP are verb phrases, units of verbs and auxillaries, for example: are going to.
# AP is a verb/argument structure, a verb phrase and a noun phrase being influenced.
# SP is a subject structure: a noun phrase which is the executor of a verb phrase
# or verb/argument structure.
from pprint import pprint
print 50
pprint( en.sentence.chunk("he is always trying to feed her with lies") )
# A handy traverse(sentence, cmd) command lets you feed a chunked sentence
# to your own command chunk by chunk:
print 51
s = "we are going to school"
def callback(chunk, token, tag):
if chunk != None : print en.sentence.tag_description(chunk)[0].upper()
if chunk == None : print token, "("+en.sentence.tag_description(tag)[0]+")"
print ""
en.sentence.traverse(s, callback)
print ""
# Find tag patterns in sentences.
print 52, en.sentence.find("The quick brown fox jumped over the lazy dog?", "(JJ) JJ NN")
print 53, en.sentence.find("The hairy hamsters visited the cruel dentist.", "JJ NN", chunked=False)
print 54, en.sentence.find("All sorts of strange and weird and mysterious things happened.", "JJ and JJ NN")
print 55, en.sentence.find("All sorts of strange and weird and mysterious things happened.", "JJ and JJ (NN)")
print 56, en.sentence.find("Hairy hamsters are animals, mammals, funny creatures, or just very cool things.", "(very) (JJ) NN", chunked=False)
print 57, en.sentence.find("Wildcards are pretty wild.", "wild*", chunked=False)
print 58, en.sentence.find("Hamsters, hairy hamsters, funny hairy hamsters!", "(JJ) (JJ) NN", chunked=False)
# If you want you could feed this command with a list of your own
# regular expression units to chunk, mine are pretty basic as I'm not a linguist.
print 59, en.sentence.chunk_rules()
# SUMMARISATION #####################################################################
# NodeBox English Linguistics is able to strip keywords from a given text.
txt = """
Art can describe several kinds of things: a study of creative skill, a process of
using the creative skill, a product of the creative skill, or the audience’s
experiencing of the creative skill. The creative arts (“art”’ as discipline) are
a collection of disciplines (“arts”) which produce artworks (“art” as objects) that
is compelled by a personal drive (“art” as activity) and echoes or reflects a message,
mood, or symbolism for the viewer to interpret (“art” as experience). Artworks can
be defined by purposeful, creative interpretations of limitless concepts or ideas in
order to communicate something to another person. Artworks can be explicitly made for
this purpose or interpreted based on images or objects.
Art is something that visually stimulates an individual's thoughts, emotions, beliefs
or ideas. Art is a realized expression of an idea-it can take many different forms
and serve many different purposes.
"""
print 60, en.content.keywords(txt, top=10, nouns=True, singularize=True, filters=[])
# Guesses a list of words that frequently occur in the given text.
# The return value is a list (length defined by top) of (count, word) tuples.
# When nouns is True, returns only nouns. The command also ignores connectives,
# numbers and tags.
# When singularize is True, attempts to singularize nouns in the text.
# The optional filters parameter is a list of words which the command should ignore.
# Assuming you would want to summarise web content you can use en.content.strip_tags()
# to strip out HTML and keep only textual content:
print 61, en.content.strip_tags("<a href='http://nodebox.net'>NodeBox</a>")
# For example:
# from urllib import urlopen
# html = urlopen("http://news.bbc.co.uk/").read()
# meta = ["news", "health", "uk", "version", "weather", "video", "sport", "return", "read", "help"]
# print sentence_keywords(html, filters=meta)
# -> [(6, 'funeral'), (5, 'beirut'), (3, 'war'), (3, 'service'), (3, 'radio'), (3, 'mull'),
# (3, 'lebanon'), (3, 'islamist'), (3, 'function'), (3, 'female')]
# SPELLING CORRECTION ###############################################################
print 62, en.spelling.suggest("elehpant")
print 63, en.spelling.correct("kebyoard")
| gpl-2.0 | 8,471,973,854,487,513,000 | 39.981707 | 142 | 0.675867 | false |
nacc/autotest | client/tests/disktest/disktest.py | 2 | 3171 | import os, sys, subprocess, logging
from autotest.client import test, utils
from autotest.client.shared import error
class disktest(test.test):
"""
Autotest module for disktest.
Pattern test of the disk, using unique signatures for each block and each
iteration of the test. Designed to check for data corruption issues in the
disk and disk controller.
It writes 50MB/s of 500KB size ops.
@author: Martin Bligh ([email protected])
"""
version = 2
preserve_srcdir = True
def setup(self):
"""
Compiles disktest.
"""
os.chdir(self.srcdir)
utils.make('clean')
utils.make()
def initialize(self):
"""
Verifies if we have gcc to compile disktest.
"""
self.job.require_gcc()
def test_one_disk_chunk(self, disk, chunk):
"""
Tests one part of the disk by spawning a disktest instance.
@param disk: Directory (usually a mountpoint).
@param chunk: Portion of the disk used.
"""
logging.info("Testing %d MB files on %s in %d MB memory, chunk %s",
self.chunk_mb, disk, self.memory_mb, chunk)
cmd = ("%s/disktest -m %d -f %s/testfile.%d -i -S" %
(self.srcdir, self.chunk_mb, disk, chunk))
logging.debug("Running '%s'", cmd)
p = subprocess.Popen(cmd, shell=True)
return(p.pid)
def run_once(self, disks=None, gigabytes=None, chunk_mb=None):
"""
Runs one iteration of disktest.
@param disks: List of directories (usually mountpoints) to be passed
to the test.
@param gigabytes: Disk space that will be used for the test to run.
@param chunk_mb: Size of the portion of the disk used to run the test.
Cannot be larger than the total amount of free RAM.
"""
os.chdir(self.srcdir)
if chunk_mb is None:
chunk_mb = utils.memtotal() / 1024
if disks is None:
disks = [self.tmpdir]
if gigabytes is None:
free = 100 # cap it at 100GB by default
for disk in disks:
free = min(utils.freespace(disk) / 1024**3, free)
gigabytes = free
logging.info("Resizing to %s GB", gigabytes)
sys.stdout.flush()
self.chunk_mb = chunk_mb
self.memory_mb = utils.memtotal()/1024
if self.memory_mb > chunk_mb:
raise error.TestError("Too much RAM (%dMB) for this test to work" %
self.memory_mb)
chunks = (1024 * gigabytes) / chunk_mb
logging.info("Total of disk chunks that will be used: %s", chunks)
for i in range(chunks):
pids = []
for disk in disks:
pid = self.test_one_disk_chunk(disk, i)
pids.append(pid)
errors = []
for pid in pids:
(junk, retval) = os.waitpid(pid, 0)
if (retval != 0):
errors.append(retval)
if errors:
raise error.TestError("Errors from children: %s" % errors)
| gpl-2.0 | 5,716,390,651,195,058,000 | 32.03125 | 79 | 0.557868 | false |
gentlecolts/booru-browse | DynamicMedia.py | 1 | 7467 | #!/usr/bin/python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject, GdkPixbuf, Gdk
import tempfile
import cgi,posixpath
import time
import urllib.request,urllib.parse
import os
from shutil import copyfile
from threading import Thread
try:
import math
inf=math.inf
except:
inf=float("inf")
scale_method=GdkPixbuf.InterpType.BILINEAR
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
tempdirobj=tempfile.TemporaryDirectory(prefix="booru-browse-")
tempdir=tempdirobj.name
print("using tempdir:",tempdir)
def getName(url,content):
domain=urllib.parse.urlsplit(url).netloc
disposition=content.getheader('content-disposition')
if disposition:
_,params=cgi.parse_header(disposition)
return domain,params['filename']
else:
return domain,posixpath.basename(urllib.parse.urlsplit(url).path)
imgcache={}
def loadWithProgress(url, progress):
request=urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
content=urllib.request.urlopen(request)
buff=bytes()
length=content.getheader('content-length')
domain,name=getName(url,content)
#print(domain,name)
if length:
length=int(length)
blocksize=max(4096, length//100)
else:
"set up pulsing progress bar"
def progUpdate():
have=len(buff)
if have<length:
progress.set_fraction(have/length)
return True
return False
GObject.idle_add(progUpdate)
timer=time.time()
while True:
read=content.read(blocksize)
if read:
buff+=read
else:
break
timer=time.time()-timer
print("{}\n\ttook {:.2f} seconds, speed was {:.2f} KB/s".format(url, timer, len(buff)/(timer*1024)))
#cache the image
path=os.path.join(tempdir, domain)
if not os.path.exists(path):
os.mkdir(path)
path="{}/{}".format(path,name)
return path, name,buff
class DynamicMedia(Gtk.EventBox):
def __init__(self, path=None, url=None):
super(DynamicMedia, self).__init__()
#some properties
self.media=Gtk.Image()
self.name=""
self.buf=None
self.path=None
self.fit=True
self.allowUpscale=True
self.draggable=False
self.lastPath=os.path.expanduser('~/Downloads')
def toggle(w, e):
self.fit=not self.fit
self.connect("button_release_event", toggle)
#actually send the data
def data_get(widget,context,selection,info,evttime):
print("drag dropped")
#print(type(selection))
#print(widget,context,selection,info,evttime)
selection.set_uris(["file://"+self.path])
self.connect('drag_data_get',data_get)
#assemble everything
overlay=Gtk.Overlay()
overlay.add(self.media)
self.progressbox=Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
overlay.add_overlay(self.progressbox)
self.add(overlay)
GObject.idle_add(self.resizeSelf)
self.load(path, url)
def enableDrag(self):
if os.name=='nt':
print("Drag n Drop not supported on windows")
return
targets=[
#Gtk.TargetEntry.new('image/x-xpixmap',0,TARGET_ENTRY_PIXBUF),
Gtk.TargetEntry.new('text/uri-list',0,TARGET_ENTRY_PIXBUF),
#Gtk.TargetEntry.new('text/plain',0,TARGET_ENTRY_TEXT),
]
self.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,targets,DRAG_ACTION)
self.draggable=True
def disableDrag(self):
self.drag_source_unset()
self.draggable=False
def generateDrag(self):
if self.draggable and self.buf:
pbuf=self.buf.get_static_image()
(x,y)=(pbuf.get_width(),pbuf.get_height())
scale=128/max(x,y)
self.drag_source_set_icon_pixbuf(pbuf.scale_simple(scale*x,scale*y,scale_method))
def load(self, path=None, url=None):
if path:
self.name=os.path.basename(path)
with open(path,'rb') as f:
#TODO: make copy in temp dir?
self.path=path
loader=GdkPixbuf.PixbufLoader()
loader.write(f.read())
loader.close()
#self.buf=GdkPixbuf.PixbufAnimation.new_from_file(path)
self.buf=loader.get_animation()
self.iter=self.buf.get_iter()
self.media.set_from_animation(self.buf)
self.enableDrag()
self.generateDrag()
elif url:
#if cached, use cached image
if url in imgcache:
self.load(path=imgcache[url])
return
loadbar=Gtk.ProgressBar()
#if this is unset, then the displayed text will be the load percent
#that said,
#loadbar.set_text(url)
loadbar.show()
self.progressbox.add(loadbar)
def asyncload():
loader=GdkPixbuf.PixbufLoader()
#these need to be stored separate from the self versions to prevent race conditions in cache
path, name,buff=loadWithProgress(url, loadbar)
(self.path,self.name)=(path, name)
loader.write(buff)
loader.close()
#self.name=source.info().get_filename()
#print("got filename: ", self.name)
self.buf=loader.get_animation()
self.iter=self.buf.get_iter()
def finish():
self.media.set_from_animation(self.buf)
self.progressbox.remove(loadbar)
self.enableDrag()
self.generateDrag()
return False
GObject.idle_add(finish)
#flush to disk in background
with open(path,'wb+') as f:
f.write(buff)
imgcache[url]=path
t=Thread(target=asyncload, daemon=True)
t.start()
else:
#TODO: in the future, should empty current content
self.disableDrag()
return
def resizeSelf(self):
if not self.buf:
return True
container=self.get_parent().get_allocation()
(x, y)=(container.width, container.height)
(realx, realy)=(self.buf.get_width(), self.buf.get_height())
scale=min(x/realx, y/realy, inf if self.allowUpscale else 1) if self.fit else 1
(x, y)=(scale*realx, scale*realy)
if self.buf.is_static_image():
self.media.set_from_pixbuf(
self.buf.get_static_image().scale_simple(x,y,scale_method)
)
elif hasattr(self, 'iter') and self.iter.advance():
self.media.set_from_pixbuf(
self.iter.get_pixbuf().scale_simple(x,y,scale_method)
)
#TODO: the best approach here might just be doing the animation stepping myself, for both static and not
#self.media.set_from_animation(pixbuf_anim_copy_resize(self.buf, x, y))
return True
def saveDialog(self, rootwin=None):
#TODO: wait (in bg thread) to ensure disk file is fully written before opening save dialog
if not self.path:
print("no image loaded, cant save")
return
print("saving media!")
dialog=Gtk.FileChooserDialog(
"Save image", rootwin,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
)
print("default name should be:", self.name)
dialog.set_current_folder(self.lastPath)
dialog.set_current_name(self.name)
dialog.set_do_overwrite_confirmation(True)
response=dialog.run()
if response==Gtk.ResponseType.OK:
saveto=dialog.get_filename()
self.lastPath=os.path.dirname(saveto)
print("saving to:", saveto)
copyfile(self.path, saveto)
elif response==Gtk.ResponseType.CANCEL:
print("save canceled")
dialog.destroy()
if __name__=="__main__":
win=Gtk.Window()
win.connect("delete-event", lambda wid, event:Gtk.main_quit())
win.set_size_request(320, 240)
win.set_title("Title")
#img=DynamicMedia('8db.jpg')
#img=DynamicMedia('54a.gif')
#img=DynamicMedia('Red-Big-Frog-Wallpaper-Photos-202.jpg')
img=DynamicMedia(url='http://i0.kym-cdn.com/photos/images/newsfeed/001/256/886/074.gif')
sw=Gtk.ScrolledWindow()
sw.add(img)
win.add(sw)
win.show_all()
GObject.threads_init()
Gtk.main()
| mit | -8,868,115,301,672,362,000 | 24.748276 | 107 | 0.697603 | false |
guillermobox/mvc | pyrest.py | 1 | 5115 | import urlparse
import json
import traceback
import os
PROTOCOL_READ = 'GET'
PROTOCOL_CREATE = 'POST'
PROTOCOL_UPDATE = 'PUT'
PROTOCOL_DELETE = 'DELETE'
CRUD_MAPPING = {
PROTOCOL_READ: 'read',
PROTOCOL_CREATE: 'create',
PROTOCOL_UPDATE: 'update',
PROTOCOL_DELETE: 'delete' }
class Resource(object):
def __init__(self, api):
self.makeURI = api.makeURI
self.default_headers = [('Content-Type', api.version)]
def payload(self, code, **kwargs):
return code, [] + self.default_headers, json.dumps(kwargs)
def mapping(self, protocol):
funcname = CRUD_MAPPING[protocol]
if hasattr(self, funcname):
return getattr(self, funcname)
else:
return None
def handle(self, protocol, restcall, data):
method = self.mapping(protocol)
if method == None:
return self.payload(403,
errormsg="You can't %s in this resource"%protocol)
if protocol in [PROTOCOL_CREATE, PROTOCOL_UPDATE]:
try:
data = json.loads(data)
except:
return self.payload(400,
errormsg='Wrong data in payload, JSON not found')
return method(restcall, data)
class API(object):
def __init__(self):
self.resource_dictionary = {}
for resource in self.resources:
resobj = resource(self)
self.resource_dictionary[ resource.name ] = resobj
def makeURI(self, *args, **kwargs):
uri = os.path.join(self.urlbase, self.username, *map(str, args))
if 'query' in kwargs:
uri += '?' + kwargs['query']
return uri
def parseREST(self, apistr):
dictionary = {}
(scheme, netloc, path, params, query, _) = urlparse.urlparse(apistr)
if query:
query = urlparse.parse_qs(query)
path = path[len(self.urlbase):]
if path.endswith('/'): path = path[:-1]
fields = path.split('/')
dictionary = {
'base': self.urlbase,
'user': None,
'baseresource': [],
'resource': '',
'id': None,
'query': query or {},
}
if fields:
dictionary['user'] = fields.pop(0)
if fields:
oddity = len(fields)%2
if oddity==0:
dictionary['id'] = fields.pop()
dictionary['prepath'] = '/'.join(fields)
dictionary['resource'] = fields.pop()
dictionary['baseresource'] = zip( fields[0::2], fields[1::2])
return dictionary
def resourceURI(user, resource, identification=None, action=None, query=None):
uri = 'http://' + host + '/api/' + user + '/' + resource + '/'
if identification:
uri += str(identification) + '/'
if action:
uri += action + '/'
if query:
uri += '?' + query
return uri
def get_handler(self, restcall):
if restcall['resource'] not in self.resource_dictionary:
return None
return self.resource_dictionary[restcall['resource']]
def call(self, apistr, data, protocol, headers):
try:
return self.RESTCall(apistr, data, protocol, headers)
except Exception as e:
return 500, [], json.dumps({"errormsg":'Exception found',
'exception':str(e), 'traceback':traceback.format_exc()})
def check_authorization(self, restcall, protocol, headers, data):
if not 'user' in restcall:
return 403, [], json.dumps({'errormsg':'Bad request, use /api/auth to login'})
if restcall['user'] == 'auth' and protocol == PROTOCOL_CREATE:
try:
data = json.loads(data)
except:
return 403, [], json.dumps({'errormsg':'Wrong data in payload, JSON not found'})
login_headers = self.login(data, headers)
if login_headers:
return 200, login_headers, ''
else:
return 401, [], json.dumps({'errormsg':'You are not authorized'})
if restcall['user'] == 'logout' and protocol == PROTOCOL_CREATE:
logout_headers = self.logout()
return 204, logout_headers, ''
if not self.auth(restcall, headers):
return 401, [], json.dumps({'errormsg':'You are not authorized'})
self.username = restcall['user']
return None
def RESTCall(self, apistr, data, protocol, headers):
self.host = headers['host']
restcall = self.parseREST(apistr)
if headers['accept'] != self.version:
return 403, [], json.dumps({'errormsg':'API version not available'})
error = self.check_authorization(restcall, protocol, headers, data)
if error:
return error
handler = self.get_handler(restcall)
if handler==None:
return 404, [], json.dumps({'errormsg':'Unknown resource'})
return handler.handle(protocol, restcall, data)
| apache-2.0 | -4,963,824,253,229,612,000 | 31.373418 | 96 | 0.550342 | false |
LeMeteore/boomer2 | note/models.py | 1 | 5970 | from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, Permission, Group
from django.core.mail import send_mail
from django.utils import timezone
# Create your models here.
class Note(models.Model):
# always reference the User class using setting conf
author = models.ForeignKey(settings.AUTH_USER_MODEL)
value = models.IntegerField(max_length=255)
def __str__(self):
return "your note is %s" % self.value
# I was obliged to create MyPermissionsMixin class
# to avoid the clash with the default PermissionsMixin class
# And I have renamed the related_name used
class MyPermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="myuser_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="myuser_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
# to make this custom User class complete
# I was obliged to copy the whole AbstractUser class attributes
# from django.contrib.auth.models
# and I have added my custom field: is_devops = True
# and I have removed the abstract = True inside class Meta
# this is just for demonstration
# you'll need to add custom forms for creation/modifications
# and probably a custom manager for a proper user creation
class MyUser(AbstractBaseUser, MyPermissionsMixin):
is_devops = True
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
# you will probably have to write a custom user manager
# that will take care of a proper user creation, etc...
# objects = MyUserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
| bsd-2-clause | 2,916,940,540,287,646,000 | 39.612245 | 79 | 0.652094 | false |
xinjiguaike/edx-platform | common/test/acceptance/tests/lms/test_teams.py | 3 | 80434 | """
Acceptance tests for the teams feature.
"""
import json
import random
import time
from dateutil.parser import parse
import ddt
from nose.plugins.attrib import attr
from uuid import uuid4
from ..helpers import EventsTestMixin, UniqueCourseTest
from ...fixtures import LMS_BASE_URL
from ...fixtures.course import CourseFixture
from ...fixtures.discussion import (
Thread,
MultipleThreadFixture
)
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.learner_profile import LearnerProfilePage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.teams import (
TeamsPage,
MyTeamsPage,
BrowseTopicsPage,
BrowseTeamsPage,
TeamManagementPage,
EditMembershipPage,
TeamPage
)
from ...pages.common.utils import confirm_prompt
TOPICS_PER_PAGE = 12
class TeamsTabBase(EventsTestMixin, UniqueCourseTest):
"""Base class for Teams Tab tests"""
def setUp(self):
super(TeamsTabBase, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
def create_topics(self, num_topics):
"""Create `num_topics` test topics."""
return [{u"description": i, u"name": i, u"id": i} for i in map(str, xrange(num_topics))]
def create_teams(self, topic, num_teams, time_between_creation=0):
"""Create `num_teams` teams belonging to `topic`."""
teams = []
for i in xrange(num_teams):
team = {
'course_id': self.course_id,
'topic_id': topic['id'],
'name': 'Team {}'.format(i),
'description': 'Description {}'.format(i),
'language': 'aa',
'country': 'AF'
}
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/teams/',
data=json.dumps(team),
headers=self.course_fixture.headers
)
# Sadly, this sleep is necessary in order to ensure that
# sorting by last_activity_at works correctly when running
# in Jenkins.
time.sleep(time_between_creation)
teams.append(json.loads(response.text))
return teams
def create_membership(self, username, team_id):
"""Assign `username` to `team_id`."""
response = self.course_fixture.session.post(
LMS_BASE_URL + '/api/team/v0/team_membership/',
data=json.dumps({'username': username, 'team_id': team_id}),
headers=self.course_fixture.headers
)
return json.loads(response.text)
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit().user_info
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual(self.teams_page.active_tab(), 'browse')
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def verify_teams(self, page, expected_teams):
"""Verify that the list of team cards on the current page match the expected teams in order."""
def assert_team_equal(expected_team, team_card_name, team_card_description):
"""
Helper to assert that a single team card has the expected name and
description.
"""
self.assertEqual(expected_team['name'], team_card_name)
self.assertEqual(expected_team['description'], team_card_description)
team_card_names = page.team_names
team_card_descriptions = page.team_descriptions
map(assert_team_equal, expected_teams, team_card_names, team_card_descriptions)
def verify_my_team_count(self, expected_number_of_teams):
""" Verify the number of teams shown on "My Team". """
# We are doing these operations on this top-level page object to avoid reloading the page.
self.teams_page.verify_my_team_count(expected_number_of_teams)
def only_team_events(self, event):
"""Filter out all non-team events."""
return event['event_type'].startswith('edx.team.')
@ddt.ddt
@attr('shard_5')
class TeamsTabTest(TeamsTabBase):
"""
Tests verifying when the Teams tab is present.
"""
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False
)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(1)})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": self.create_topics(1)},
enroll_in_course=False,
global_staff=True
)
self.verify_teams_present(True)
@ddt.data(
'topics/{topic_id}',
'topics/{topic_id}/search',
'teams/{topic_id}/{team_id}/edit-team',
'teams/{topic_id}/{team_id}'
)
def test_unauthorized_error_message(self, route):
"""Ensure that an error message is shown to the user if they attempt
to take an action which makes an AJAX request while not signed
in.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration(
{u'max_team_size': 10, u'topics': topics},
global_staff=True
)
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
self.browser.delete_cookie('sessionid')
url = self.browser.current_url.split('#')[0]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
)
)
)
self.teams_page.wait_for_ajax()
self.assertEqual(
self.teams_page.warning_message,
u"Your request could not be completed. Reload the page and try again."
)
@ddt.data(
('browse', '.topics-list'),
# TODO: find a reliable way to match the "My Teams" tab
# ('my-teams', 'div.teams-list'),
('teams/{topic_id}/{team_id}', 'div.discussion-module'),
('topics/{topic_id}/create-team', 'div.create-team-instructions'),
('topics/{topic_id}', '.teams-list'),
('not-a-real-route', 'div.warning')
)
@ddt.unpack
def test_url_routing(self, route, selector):
"""Ensure that navigating to a URL route correctly updates the page
content.
"""
topics = self.create_topics(1)
topic = topics[0]
self.set_team_configuration({
u'max_team_size': 10,
u'topics': topics
})
team = self.create_teams(topic, 1)[0]
self.teams_page.visit()
# Get the base URL (the URL without any trailing fragment)
url = self.browser.current_url
fragment_index = url.find('#')
if fragment_index >= 0:
url = url[0:fragment_index]
self.browser.get(
'{url}#{route}'.format(
url=url,
route=route.format(
topic_id=topic['id'],
team_id=team['id']
))
)
self.teams_page.wait_for_ajax()
self.assertTrue(self.teams_page.q(css=selector).present)
self.assertTrue(self.teams_page.q(css=selector).visible)
@attr('shard_5')
class MyTeamsTest(TeamsTabBase):
"""
Tests for the "My Teams" tab of the Teams page.
"""
def setUp(self):
super(MyTeamsTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.my_teams_page = MyTeamsPage(self.browser, self.course_id)
self.page_viewed_event = {
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'my-teams',
'topic_id': None,
'team_id': None
}
}
def test_not_member_of_any_teams(self):
"""
Scenario: Visiting the My Teams page when user is not a member of any team should not display any teams.
Given I am enrolled in a course with a team configuration and a topic but am not a member of a team
When I visit the My Teams page
And I should see no teams
And I should see a message that I belong to no teams.
"""
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.assertEqual(len(self.my_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertEqual(
self.my_teams_page.q(css='.page-content-main').text,
[u'You are not currently a member of any team.']
)
def test_member_of_a_team(self):
"""
Scenario: Visiting the My Teams page when user is a member of a team should display the teams.
Given I am enrolled in a course with a team configuration and a topic and am a member of a team
When I visit the My Teams page
Then I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should not see a pagination footer
"""
teams = self.create_teams(self.topic, 1)
self.create_membership(self.user_info['username'], teams[0]['id'])
with self.assert_events_match_during(self.only_team_events, expected_events=[self.page_viewed_event]):
self.my_teams_page.visit()
self.verify_teams(self.my_teams_page, teams)
@attr('shard_5')
@ddt.ddt
class BrowseTopicsTest(TeamsTabBase):
"""
Tests for the Browse tab of the Teams page.
"""
def setUp(self):
super(BrowseTopicsTest, self).setUp()
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
@ddt.data(('name', False), ('team_count', True))
@ddt.unpack
def test_sort_topics(self, sort_order, reverse):
"""
Scenario: the user should be able to sort the list of topics by name or team count
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
When I choose a sort order
Then I should see the paginated list of topics in that order
"""
topics = self.create_topics(TOPICS_PER_PAGE + 1)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
for i, topic in enumerate(random.sample(topics, len(topics))):
self.create_teams(topic, i)
topic['team_count'] = i
self.topics_page.visit()
self.topics_page.sort_topics_by(sort_order)
topic_names = self.topics_page.topic_names
self.assertEqual(len(topic_names), TOPICS_PER_PAGE)
self.assertEqual(
topic_names,
[t['name'] for t in sorted(topics, key=lambda t: t[sort_order], reverse=reverse)][:TOPICS_PER_PAGE]
)
def test_sort_topics_update(self):
"""
Scenario: the list of topics should remain sorted after updates
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics and choose a sort order
Then I should see the paginated list of topics in that order
When I create a team in one of those topics
And I return to the topics list
Then I should see the topics in the correct sorted order
"""
topics = self.create_topics(3)
self.set_team_configuration({u"max_team_size": 100, u"topics": topics})
self.topics_page.visit()
self.topics_page.sort_topics_by('team_count')
topic_name = self.topics_page.topic_names[-1]
topic = [t for t in topics if t['name'] == topic_name][0]
self.topics_page.browse_teams_for_topic(topic_name)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
browse_teams_page.click_create_team_link()
create_team_page = TeamManagementPage(self.browser, self.course_id, topic)
create_team_page.value_for_text_field(field_id='name', value='Team Name', press_enter=False)
create_team_page.value_for_textarea_field(
field_id='description',
value='Team description.'
)
create_team_page.submit_form()
team_page = TeamPage(self.browser, self.course_id)
self.assertTrue(team_page.is_browser_on_page)
team_page.click_all_topics()
self.assertTrue(self.topics_page.is_browser_on_page())
self.topics_page.wait_for_ajax()
self.assertEqual(topic_name, self.topics_page.topic_names[0])
def test_list_topics(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see a list of topics for the course
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(2)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), 2)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-2 out of 2 total'))
self.assertFalse(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_topic_pagination(self):
"""
Scenario: a list of topics should be visible in the "Browse" tab, paginated 12 per page
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
Then I should see only the first 12 topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(20)})
self.topics_page.visit()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 20 total'))
self.assertTrue(self.topics_page.pagination_controls_visible())
self.assertFalse(self.topics_page.is_previous_page_button_enabled())
self.assertTrue(self.topics_page.is_next_page_button_enabled())
def test_go_to_numbered_page(self):
"""
Scenario: topics should be able to be navigated by page number
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter a valid page number in the page number input
Then I should see that page of topics
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(25)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.is_previous_page_button_enabled())
self.assertFalse(self.topics_page.is_next_page_button_enabled())
def test_go_to_invalid_page(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
And I enter an invalid page number in the page number input
Then I should stay on the current page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.go_to_page(3)
self.assertEqual(self.topics_page.get_current_page_number(), 1)
def test_page_navigation_buttons(self):
"""
Scenario: browsing topics should not respond to invalid page numbers
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse topics
When I press the next page button
Then I should move to the next page
When I press the previous page button
Then I should move to the previous page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": self.create_topics(13)})
self.topics_page.visit()
self.topics_page.press_next_page_button()
self.assertEqual(len(self.topics_page.topic_cards), 1)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 13-13 out of 13 total'))
self.topics_page.press_previous_page_button()
self.assertEqual(len(self.topics_page.topic_cards), TOPICS_PER_PAGE)
self.assertTrue(self.topics_page.get_pagination_header_text().startswith('Showing 1-12 out of 13 total'))
def test_topic_description_truncation(self):
"""
Scenario: excessively long topic descriptions should be truncated so
as to fit within a topic card.
Given I am enrolled in a course with a team configuration and a topic
with a long description
When I visit the Teams page
And I browse topics
Then I should see a truncated topic description
"""
initial_description = "A" + " really" * 50 + " long description"
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [{"name": "", "id": "", "description": initial_description}]}
)
self.topics_page.visit()
truncated_description = self.topics_page.topic_descriptions[0]
self.assertLess(len(truncated_description), len(initial_description))
self.assertTrue(truncated_description.endswith('...'))
self.assertIn(truncated_description.split('...')[0], initial_description)
def test_go_to_teams_list(self):
"""
Scenario: Clicking on a Topic Card should take you to the
teams list for that Topic.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
And I browse topics
And I click on the arrow link to view teams for the first topic
Then I should be on the browse teams page
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
self.topics_page.visit()
self.topics_page.browse_teams_for_topic('Example Topic')
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertEqual(browse_teams_page.header_name, 'Example Topic')
self.assertEqual(browse_teams_page.header_description, 'Description')
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse topics page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the browse topics page
Then my browser should post a page viewed event
"""
topic = {u"name": u"Example Topic", u"id": u"example_topic", u"description": "Description"}
self.set_team_configuration(
{u"max_team_size": 1, u"topics": [topic]}
)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'browse',
'topic_id': None,
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.topics_page.visit()
@attr('shard_5')
@ddt.ddt
class BrowseTeamsWithinTopicTest(TeamsTabBase):
"""
Tests for browsing Teams within a Topic on the Teams page.
"""
TEAMS_PAGE_SIZE = 10
def setUp(self):
super(BrowseTeamsWithinTopicTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
self.max_team_size = 10
self.set_team_configuration({
'course_id': self.course_id,
'max_team_size': self.max_team_size,
'topics': [self.topic]
})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.topics_page = BrowseTopicsPage(self.browser, self.course_id)
def teams_with_default_sort_order(self, teams):
"""Return a list of teams sorted according to the default ordering
(last_activity_at, with a secondary sort by open slots).
"""
return sorted(
sorted(teams, key=lambda t: len(t['membership']), reverse=True),
key=lambda t: parse(t['last_activity_at']).replace(microsecond=0),
reverse=True
)
def verify_page_header(self):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(self.browse_teams_page.header_name, self.topic['name'])
self.assertEqual(self.browse_teams_page.header_description, self.topic['description'])
def verify_search_header(self, search_results_page, search_query):
"""Verify that the page header correctly reflects the current topic's name and description."""
self.assertEqual(search_results_page.header_name, 'Team Search')
self.assertEqual(
search_results_page.header_description,
'Showing results for "{search_query}"'.format(search_query=search_query)
)
def verify_on_page(self, teams_page, page_num, total_teams, pagination_header_text, footer_visible):
"""
Verify that we are on the correct team list page.
Arguments:
teams_page (BaseTeamsPage): The teams page object that should be the current page.
page_num (int): The one-indexed page number that we expect to be on
total_teams (list): An unsorted list of all the teams for the
current topic
pagination_header_text (str): Text we expect to see in the
pagination header.
footer_visible (bool): Whether we expect to see the pagination
footer controls.
"""
sorted_teams = self.teams_with_default_sort_order(total_teams)
self.assertTrue(teams_page.get_pagination_header_text().startswith(pagination_header_text))
self.verify_teams(
teams_page,
sorted_teams[(page_num - 1) * self.TEAMS_PAGE_SIZE:page_num * self.TEAMS_PAGE_SIZE]
)
self.assertEqual(
teams_page.pagination_controls_visible(),
footer_visible,
msg='Expected paging footer to be ' + 'visible' if footer_visible else 'invisible'
)
@ddt.data(
('open_slots', 'last_activity_at', True),
('last_activity_at', 'open_slots', True)
)
@ddt.unpack
def test_sort_teams(self, sort_order, secondary_sort_order, reverse):
"""
Scenario: the user should be able to sort the list of teams by open slots or last activity
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic
When I choose a sort order
Then I should see the paginated list of teams in that order
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
for i, team in enumerate(random.sample(teams, len(teams))):
for _ in range(i):
user_info = AutoAuthPage(self.browser, course_id=self.course_id).visit().user_info
self.create_membership(user_info['username'], team['id'])
team['open_slots'] = self.max_team_size - i
# Parse last activity date, removing microseconds because
# the Django ORM does not support them. Will be fixed in
# Django 1.8.
team['last_activity_at'] = parse(team['last_activity_at']).replace(microsecond=0)
# Re-authenticate as staff after creating users
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=True
).visit()
self.browse_teams_page.visit()
self.browse_teams_page.sort_teams_by(sort_order)
team_names = self.browse_teams_page.team_names
self.assertEqual(len(team_names), self.TEAMS_PAGE_SIZE)
sorted_teams = [
team['name']
for team in sorted(
sorted(teams, key=lambda t: t[secondary_sort_order], reverse=reverse),
key=lambda t: t[sort_order],
reverse=reverse
)
][:self.TEAMS_PAGE_SIZE]
self.assertEqual(team_names, sorted_teams)
def test_default_sort_order(self):
"""
Scenario: the list of teams should be sorted by last activity by default
Given I am enrolled in a course with team configuration and topics
When I visit the Teams page
And I browse teams within a topic
Then I should see a list of teams for that topic, sorted by last activity
"""
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1)
self.browse_teams_page.visit()
self.assertEqual(self.browse_teams_page.sort_order, 'last activity')
def test_no_teams(self):
"""
Scenario: Visiting a topic with no teams should not display any teams.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing no teams
And I should see no teams
And I should see a button to add a team
And I should not see a pagination footer
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.assertEqual(len(self.browse_teams_page.team_cards), 0, msg='Expected to see no team cards')
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_one_page(self):
"""
Scenario: Visiting a topic with fewer teams than the page size should
all those teams on one page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see a pagination header showing the number of teams
And I should see all the expected team cards
And I should see a button to add a team
And I should not see a pagination footer
"""
teams = self.teams_with_default_sort_order(
self.create_teams(self.topic, self.TEAMS_PAGE_SIZE, time_between_creation=1)
)
self.browse_teams_page.visit()
self.verify_page_header()
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 1-10 out of 10 total'))
self.verify_teams(self.browse_teams_page, teams)
self.assertFalse(
self.browse_teams_page.pagination_controls_visible(),
msg='Expected paging footer to be invisible'
)
def test_teams_navigation_buttons(self):
"""
Scenario: The user should be able to page through a topic's team list
using navigation buttons when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I click on the next page button
Then I should see that I am on the second page of results
And when I click on the previous page button
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 1, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
self.browse_teams_page.press_next_page_button()
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-11 out of 11 total', True)
self.browse_teams_page.press_previous_page_button()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 11 total', True)
def test_teams_page_input(self):
"""
Scenario: The user should be able to page through a topic's team list
using the page input when it is longer than the page size.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the correct page header
And I should see that I am on the first page of results
When I input the second page
Then I should see that I am on the second page of results
When I input the first page
Then I should see that I am on the first page of results
"""
teams = self.create_teams(self.topic, self.TEAMS_PAGE_SIZE + 10, time_between_creation=1)
self.browse_teams_page.visit()
self.verify_page_header()
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
self.browse_teams_page.go_to_page(2)
self.verify_on_page(self.browse_teams_page, 2, teams, 'Showing 11-20 out of 20 total', True)
self.browse_teams_page.go_to_page(1)
self.verify_on_page(self.browse_teams_page, 1, teams, 'Showing 1-10 out of 20 total', True)
def test_browse_team_topics(self):
"""
Scenario: User should be able to navigate to "browse all teams" and "search team description" links.
Given I am enrolled in a course with teams enabled
When I visit the Teams page for a topic
Then I should see the correct page header
And I should see the link to "browse teams in other topics"
When I should navigate to that link
Then I should see the topic browse page
"""
self.browse_teams_page.visit()
self.verify_page_header()
self.browse_teams_page.click_browse_all_teams_link()
self.assertTrue(self.topics_page.is_browser_on_page())
def test_search(self):
"""
Scenario: User should be able to search for a team
Given I am enrolled in a course with teams enabled
When I visit the Teams page for that topic
And I search for 'banana'
Then I should see the search result page
And the search header should be shown
And 0 results should be shown
And my browser should fire a page viewed event for the search page
And a searched event should have been fired
"""
# Note: all searches will return 0 results with the mock search server
# used by Bok Choy.
search_text = 'banana'
self.create_teams(self.topic, 5)
self.browse_teams_page.visit()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'search-teams',
'topic_id': self.topic['id'],
'team_id': None
}
}, {
'event_type': 'edx.team.searched',
'event': {
'search_text': search_text,
'topic_id': self.topic['id'],
'number_of_results': 0
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events, in_order=False):
search_results_page = self.browse_teams_page.search(search_text)
self.verify_search_header(search_results_page, search_text)
self.assertTrue(search_results_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
def test_page_viewed_event(self):
"""
Scenario: Visiting the browse page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page
Then my browser should post a page viewed event for the teams page
"""
self.create_teams(self.topic, 5)
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-topic',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.browse_teams_page.visit()
@attr('shard_5')
class TeamFormActions(TeamsTabBase):
"""
Base class for create, edit, and delete team.
"""
TEAM_DESCRIPTION = 'The Avengers are a fictional team of superheroes.'
topic = {'name': 'Example Topic', 'id': 'example_topic', 'description': 'Description'}
TEAMS_NAME = 'Avengers'
def setUp(self):
super(TeamFormActions, self).setUp()
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
def verify_page_header(self, title, description, breadcrumbs):
"""
Verify that the page header correctly reflects the
create team header, description and breadcrumb.
"""
self.assertEqual(self.team_management_page.header_page_name, title)
self.assertEqual(self.team_management_page.header_page_description, description)
self.assertEqual(self.team_management_page.header_page_breadcrumbs, breadcrumbs)
def verify_and_navigate_to_create_team_page(self):
"""Navigates to the create team page and verifies."""
self.browse_teams_page.click_create_team_link()
self.verify_page_header(
title='Create a New Team',
description='Create a new team if you can\'t find an existing team to join, '
'or if you would like to learn with friends you know.',
breadcrumbs='All Topics {topic_name}'.format(topic_name=self.topic['name'])
)
def verify_and_navigate_to_edit_team_page(self):
"""Navigates to the edit team page and verifies."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, self.team['name'])
self.assertTrue(self.team_page.edit_team_button_present)
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
# Edit page header.
self.verify_page_header(
title='Edit Team',
description='If you make significant changes, make sure you notify '
'members of the team before making these changes.',
breadcrumbs='All Topics {topic_name} {team_name}'.format(
topic_name=self.topic['name'],
team_name=self.team['name']
)
)
def verify_team_info(self, name, description, location, language):
"""Verify the team information on team page."""
# pylint: disable=no-member
self.assertEqual(self.team_page.team_name, name)
self.assertEqual(self.team_page.team_description, description)
self.assertEqual(self.team_page.team_location, location)
self.assertEqual(self.team_page.team_language, language)
def fill_create_or_edit_form(self):
"""Fill the create/edit team form fields with appropriate values."""
self.team_management_page.value_for_text_field(
field_id='name',
value=self.TEAMS_NAME,
press_enter=False
)
self.team_management_page.value_for_textarea_field(
field_id='description',
value=self.TEAM_DESCRIPTION
)
self.team_management_page.value_for_dropdown_field(field_id='language', value='English')
self.team_management_page.value_for_dropdown_field(field_id='country', value='Pakistan')
def verify_all_fields_exist(self):
"""
Verify the fields for create/edit page.
"""
self.assertEqual(
self.team_management_page.message_for_field('name'),
'A name that identifies your team (maximum 255 characters).'
)
self.assertEqual(
self.team_management_page.message_for_textarea_field('description'),
'A short description of the team to help other learners understand '
'the goals or direction of the team (maximum 300 characters).'
)
self.assertEqual(
self.team_management_page.message_for_field('country'),
'The country that team members primarily identify with.'
)
self.assertEqual(
self.team_management_page.message_for_field('language'),
'The language that team members primarily use to communicate with each other.'
)
@ddt.ddt
class CreateTeamTest(TeamFormActions):
"""
Tests for creating a new Team within a Topic on the Teams page.
"""
def setUp(self):
super(CreateTeamTest, self).setUp()
self.set_team_configuration({'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]})
self.browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.browse_teams_page.visit()
def test_user_can_see_create_team_page(self):
"""
Scenario: The user should be able to see the create team page via teams list page.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Teams page for that topic
Then I should see the Create Team page link on bottom
And When I click create team link
Then I should see the create team page.
And I should see the create team header
And I should also see the help messages for fields.
"""
self.verify_and_navigate_to_create_team_page()
self.verify_all_fields_exist()
def test_user_can_see_error_message_for_missing_data(self):
"""
Scenario: The user should be able to see error message in case of missing required field.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
And When I click create team button without filling required fields
Then I should see the error message and highlighted fields.
"""
self.verify_and_navigate_to_create_team_page()
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
self.assertTrue(self.team_management_page.error_for_field(field_id='description'))
def test_user_can_see_error_message_for_incorrect_data(self):
"""
Scenario: The user should be able to see error message in case of increasing length for required fields.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I add text > than 255 characters for name field
And I click Create button
Then I should see the error message for exceeding length.
"""
self.verify_and_navigate_to_create_team_page()
# Fill the name field with >255 characters to see validation message.
self.team_management_page.value_for_text_field(
field_id='name',
value='EdX is a massive open online course (MOOC) provider and online learning platform. '
'It hosts online university-level courses in a wide range of disciplines to a worldwide '
'audience, some at no charge. It also conducts research into learning based on how '
'people use its platform. EdX was created for students and institutions that seek to'
'transform themselves through cutting-edge technologies, innovative pedagogy, and '
'rigorous courses. More than 70 schools, nonprofits, corporations, and international'
'organizations offer or plan to offer courses on the edX website. As of 22 October 2014,'
'edX has more than 4 million users taking more than 500 courses online.',
press_enter=False
)
self.team_management_page.submit_form()
self.assertEqual(
self.team_management_page.validation_message_text,
'Check the highlighted fields below and try again.'
)
self.assertTrue(self.team_management_page.error_for_field(field_id='name'))
def test_user_can_create_new_team_successfully(self):
"""
Scenario: The user should be able to create new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I fill all the fields present with appropriate data
And I click Create button
Then I expect analytics events to be emitted
And I should see the page for my team
And I should see the message that says "You are member of this team"
And the new team should be added to the list of teams within the topic
And the number of teams should be updated on the topic card
And if I switch to "My Team", the newly created team is displayed
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.browse_teams_page.visit()
self.verify_and_navigate_to_create_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.created'
},
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'added_on_create',
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
# Verify that the page is shown for the new team
team_page = TeamPage(self.browser, self.course_id)
team_page.wait_for_page()
self.assertEqual(team_page.team_name, self.TEAMS_NAME)
self.assertEqual(team_page.team_description, self.TEAM_DESCRIPTION)
self.assertEqual(team_page.team_user_membership_text, 'You are a member of this team.')
# Verify the new team was added to the topic list
self.teams_page.click_specific_topic("Example Topic")
self.teams_page.verify_topic_team_count(1)
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(1)
# Verify that if one switches to "My Team" without reloading the page, the newly created team is shown.
self.verify_my_team_count(1)
def test_user_can_cancel_the_team_creation(self):
"""
Scenario: The user should be able to cancel the creation of new team.
Given I am enrolled in a course with a team configuration and a topic
When I visit the Create Team page for that topic
Then I should see the Create Team header and form
When I click Cancel button
Then I should see teams list page without any new team.
And if I switch to "My Team", it shows no teams
"""
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.verify_and_navigate_to_create_team_page()
self.team_management_page.cancel_team()
self.assertTrue(self.browse_teams_page.is_browser_on_page())
self.assertTrue(self.browse_teams_page.get_pagination_header_text().startswith('Showing 0 out of 0 total'))
self.teams_page.click_all_topics()
self.teams_page.verify_team_count_in_first_topic(0)
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the create team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the create team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'new-team',
'topic_id': self.topic['id'],
'team_id': None
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_create_team_page()
@ddt.ddt
class DeleteTeamTest(TeamFormActions):
"""
Tests for deleting teams.
"""
def setUp(self):
super(DeleteTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
#need to have a membership to confirm it gets deleted as well
self.create_membership(self.user_info['username'], self.team['id'])
self.team_page.visit()
def test_cancel_delete(self):
"""
Scenario: The user should be able to cancel the Delete Team dialog
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I cancel the prompt
And I refresh the page
Then I should still see the team
"""
self.delete_team(cancel=True)
self.assertTrue(self.team_management_page.is_browser_on_page())
self.browser.refresh()
self.team_management_page.wait_for_page()
self.assertEqual(
' '.join(('All Topics', self.topic['name'], self.team['name'])),
self.team_management_page.header_page_breadcrumbs
)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_delete_team(self, role):
"""
Scenario: The user should be able to see and navigate to the delete team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Delete Team button
When I click the delete team button
And I confirm the prompt
Then I should see the browse teams page
And the team should not be present
"""
# If role is None, remain logged in as global staff
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.delete_team(require_notification=False)
browse_teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
self.assertTrue(browse_teams_page.is_browser_on_page())
self.assertNotIn(self.team['name'], browse_teams_page.team_names)
def delete_team(self, **kwargs):
"""
Delete a team. Passes `kwargs` to `confirm_prompt`.
Expects edx.team.deleted event to be emitted, with correct course_id.
Also expects edx.team.learner_removed event to be emitted for the
membership that is removed as a part of the delete operation.
"""
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.team_management_page.delete_team_button.click()
if 'cancel' in kwargs and kwargs['cancel'] is True:
confirm_prompt(self.team_management_page, **kwargs)
else:
expected_events = [
{
'event_type': 'edx.team.deleted',
'event': {
'team_id': self.team['id']
}
},
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'team_deleted',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
confirm_prompt(self.team_management_page, **kwargs)
def test_delete_team_updates_topics(self):
"""
Scenario: Deleting a team should update the team count on the topics page
Given I am staff user for a course with a team
And I delete a team
When I navigate to the browse topics page
Then the team count for the deletd team's topic should be updated
"""
self.delete_team(require_notification=False)
BrowseTeamsPage(self.browser, self.course_id, self.topic).click_all_topics()
topics_page = BrowseTopicsPage(self.browser, self.course_id)
self.assertTrue(topics_page.is_browser_on_page())
self.teams_page.verify_topic_team_count(0)
@ddt.ddt
class EditTeamTest(TeamFormActions):
"""
Tests for editing the team.
"""
def setUp(self):
super(EditTeamTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team = self.create_teams(self.topic, num_teams=1)[0]
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
self.team_page.visit()
def test_staff_can_navigate_to_edit_team_page(self):
"""
Scenario: The user should be able to see and navigate to the edit team page.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And I should see the edit team header
And I should also see the help messages for fields
"""
self.verify_and_navigate_to_edit_team_page()
self.verify_all_fields_exist()
def test_staff_can_edit_team_successfully(self):
"""
Scenario: The staff should be able to edit team successfully.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
And an analytics event should be fired
When I edit all the fields with appropriate data
And I click Update button
Then I should see the page for my team with updated data
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
expected_events = [
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'country',
'old': 'AF',
'new': 'PK',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'name',
'old': self.team['name'],
'new': self.TEAMS_NAME,
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'language',
'old': 'aa',
'new': 'en',
'truncated': [],
}
},
{
'event_type': 'edx.team.changed',
'event': {
'team_id': self.team['id'],
'field': 'description',
'old': self.team['description'],
'new': self.TEAM_DESCRIPTION,
'truncated': [],
}
},
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_staff_can_cancel_the_team_edit(self):
"""
Scenario: The user should be able to cancel the editing of team.
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the edit team page
Then I should see the Edit Team header
When I click Cancel button
Then I should see team page page without changes.
"""
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.cancel_team()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
def test_student_cannot_see_edit_button(self):
"""
Scenario: The student should not see the edit team button.
Given I am student for a course with a team
When I visit the Team profile page
Then I should not see the Edit Team button
"""
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page.visit()
self.assertFalse(self.team_page.edit_team_button_present)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged_user_can_edit_team(self, role):
"""
Scenario: The user with specified role should see the edit team button.
Given I am user with privileged role for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
"""
kwargs = {
'course_id': self.course_id,
'staff': False
}
if role is not None:
kwargs['roles'] = role
AutoAuthPage(self.browser, **kwargs).visit()
self.team_page.visit()
self.teams_page.wait_for_page()
self.assertTrue(self.team_page.edit_team_button_present)
self.verify_team_info(
name=self.team['name'],
description=self.team['description'],
location='Afghanistan',
language='Afar'
)
self.verify_and_navigate_to_edit_team_page()
self.fill_create_or_edit_form()
self.team_management_page.submit_form()
self.team_page.wait_for_page()
self.verify_team_info(
name=self.TEAMS_NAME,
description=self.TEAM_DESCRIPTION,
location='Pakistan',
language='English'
)
def test_page_viewed_event(self):
"""
Scenario: Visiting the edit team page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the edit team page
Then my browser should post a page viewed event
"""
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'edit-team',
'topic_id': self.topic['id'],
'team_id': self.team['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.verify_and_navigate_to_edit_team_page()
@ddt.ddt
class EditMembershipTest(TeamFormActions):
"""
Tests for administrating from the team membership page
"""
def setUp(self):
super(EditMembershipTest, self).setUp()
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': 10, 'topics': [self.topic]},
global_staff=True
)
self.team_management_page = TeamManagementPage(self.browser, self.course_id, self.topic)
self.team = self.create_teams(self.topic, num_teams=1)[0]
#make sure a user exists on this team so we can edit the membership
self.create_membership(self.user_info['username'], self.team['id'])
self.edit_membership_page = EditMembershipPage(self.browser, self.course_id, self.team)
self.team_page = TeamPage(self.browser, self.course_id, team=self.team)
def edit_membership_helper(self, role, cancel=False):
"""
Helper for common functionality in edit membership tests.
Checks for all relevant assertions about membership being removed,
including verify edx.team.learner_removed events are emitted.
"""
if role is not None:
AutoAuthPage(
self.browser,
course_id=self.course_id,
staff=False,
roles=role
).visit()
self.team_page.visit()
self.team_page.click_edit_team_button()
self.team_management_page.wait_for_page()
self.assertTrue(
self.team_management_page.membership_button_present
)
self.team_management_page.click_membership_button()
self.edit_membership_page.wait_for_page()
self.edit_membership_page.click_first_remove()
if cancel:
self.edit_membership_page.cancel_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 1)
else:
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'team_id': self.team['id'],
'remove_method': 'removed_by_admin',
'user_id': self.user_info['user_id']
}
}
]
with self.assert_events_match_during(
event_filter=self.only_team_events, expected_events=expected_events
):
self.edit_membership_page.confirm_delete_membership_dialog()
self.assertEqual(self.edit_membership_page.team_members, 0)
self.assertTrue(self.edit_membership_page.is_browser_on_page)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and confirm the dialog
Then my membership should be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=False)
@ddt.data('Moderator', 'Community TA', 'Administrator', None)
def test_cancel_remove_membership(self, role):
"""
Scenario: The user should be able to remove a membership
Given I am staff user for a course with a team
When I visit the Team profile page
Then I should see the Edit Team button
And When I click edit team button
Then I should see the Edit Membership button
And When I click the edit membership button
Then I should see the edit membership page
And When I click the remove button and cancel the dialog
Then my membership should not be removed, and I should remain on the page
"""
self.edit_membership_helper(role, cancel=True)
@attr('shard_5')
@ddt.ddt
class TeamPageTest(TeamsTabBase):
"""Tests for viewing a specific team"""
SEND_INVITE_TEXT = 'Send this link to friends so that they can join too.'
def setUp(self):
super(TeamPageTest, self).setUp()
self.topic = {u"name": u"Example Topic", u"id": "example_topic", u"description": "Description"}
def _set_team_configuration_and_membership(
self,
max_team_size=10,
membership_team_index=0,
visit_team_index=0,
create_membership=True,
another_user=False):
"""
Set team configuration.
Arguments:
max_team_size (int): number of users a team can have
membership_team_index (int): index of team user will join
visit_team_index (int): index of team user will visit
create_membership (bool): whether to create membership or not
another_user (bool): another user to visit a team
"""
#pylint: disable=attribute-defined-outside-init
self.set_team_configuration(
{'course_id': self.course_id, 'max_team_size': max_team_size, 'topics': [self.topic]}
)
self.teams = self.create_teams(self.topic, 2)
if create_membership:
self.create_membership(self.user_info['username'], self.teams[membership_team_index]['id'])
if another_user:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.team_page = TeamPage(self.browser, self.course_id, self.teams[visit_team_index])
def setup_thread(self):
"""
Create and return a thread for this test's discussion topic.
"""
thread = Thread(
id="test_thread_{}".format(uuid4().hex),
commentable_id=self.teams[0]['discussion_topic_id'],
body="Dummy text body."
)
thread_fixture = MultipleThreadFixture([thread])
thread_fixture.push()
return thread
def setup_discussion_user(self, role=None, staff=False):
"""Set this test's user to have the given role in its
discussions. Role is one of 'Community TA', 'Moderator',
'Administrator', or 'Student'.
"""
kwargs = {
'course_id': self.course_id,
'staff': staff
}
if role is not None:
kwargs['roles'] = role
#pylint: disable=attribute-defined-outside-init
self.user_info = AutoAuthPage(self.browser, **kwargs).visit().user_info
def verify_teams_discussion_permissions(self, should_have_permission):
"""Verify that the teams discussion component is in the correct state
for the test user. If `should_have_permission` is True, assert that
the user can see controls for posting replies, voting, editing, and
deleting. Otherwise, assert that those controls are hidden.
"""
thread = self.setup_thread()
self.team_page.visit()
self.assertEqual(self.team_page.discussion_id, self.teams[0]['discussion_topic_id'])
discussion = self.team_page.discussion_page
self.assertTrue(discussion.is_browser_on_page())
self.assertTrue(discussion.is_discussion_expanded())
self.assertEqual(discussion.get_num_displayed_threads(), 1)
self.assertTrue(discussion.has_thread(thread['id']))
assertion = self.assertTrue if should_have_permission else self.assertFalse
assertion(discussion.q(css='.post-header-actions').present)
assertion(discussion.q(css='.add-response').present)
assertion(discussion.q(css='.new-post-btn').present)
def test_discussion_on_my_team_page(self):
"""
Scenario: Team Page renders a discussion for a team to which I belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the existing thread
And I should see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership()
self.verify_teams_discussion_permissions(True)
@ddt.data(True, False)
def test_discussion_on_other_team_page(self, is_staff):
"""
Scenario: Team Page renders a team discussion for a team to which I do
not belong.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When the team has a discussion with a thread
And I visit the Team page for that team
Then I should see a discussion with the correct discussion_id
And I should see the team's thread
And I should not see controls to change the state of the discussion
"""
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(staff=is_staff)
self.verify_teams_discussion_permissions(False)
@ddt.data('Moderator', 'Community TA', 'Administrator')
def test_discussion_privileged(self, role):
self._set_team_configuration_and_membership(create_membership=False)
self.setup_discussion_user(role=role)
self.verify_teams_discussion_permissions(True)
def assert_team_details(self, num_members, is_member=True, max_size=10):
"""
Verifies that user can see all the information, present on detail page according to their membership status.
Arguments:
num_members (int): number of users in a team
is_member (bool) default True: True if request user is member else False
max_size (int): number of users a team can have
"""
self.assertEqual(
self.team_page.team_capacity_text,
self.team_page.format_capacity_text(num_members, max_size)
)
self.assertEqual(self.team_page.team_location, 'Afghanistan')
self.assertEqual(self.team_page.team_language, 'Afar')
self.assertEqual(self.team_page.team_members, num_members)
if num_members > 0:
self.assertTrue(self.team_page.team_members_present)
else:
self.assertFalse(self.team_page.team_members_present)
if is_member:
self.assertEqual(self.team_page.team_user_membership_text, 'You are a member of this team.')
self.assertTrue(self.team_page.team_leave_link_present)
self.assertTrue(self.team_page.new_post_button_present)
else:
self.assertEqual(self.team_page.team_user_membership_text, '')
self.assertFalse(self.team_page.team_leave_link_present)
self.assertFalse(self.team_page.new_post_button_present)
def test_team_member_can_see_full_team_details(self):
"""
Scenario: Team member can see full info for team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see the full team detail
And I should see the team members
And I should see my team membership text
And I should see the language & country
And I should see the Leave Team and Invite Team
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assert_team_details(
num_members=1,
)
def test_other_users_can_see_limited_team_details(self):
"""
Scenario: Users who are not member of this team can only see limited info for this team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am not a member
When I visit the Team page for that team
Then I should not see full team detail
And I should see the team members
And I should not see my team membership text
And I should not see the Leave Team and Invite Team links
"""
self._set_team_configuration_and_membership(create_membership=False)
self.team_page.visit()
self.assert_team_details(is_member=False, num_members=0)
def test_user_can_navigate_to_members_profile_page(self):
"""
Scenario: User can navigate to profile page via team member profile image.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic of which I am a member
When I visit the Team page for that team
Then I should see profile images for the team members
When I click on the first profile image
Then I should be taken to the user's profile page
And I should see the username on profile page
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
learner_name = self.team_page.first_member_username
self.team_page.click_first_profile_image()
learner_profile_page = LearnerProfilePage(self.browser, learner_name)
learner_profile_page.wait_for_page()
learner_profile_page.wait_for_field('username')
self.assertTrue(learner_profile_page.field_is_visible('username'))
def test_join_team(self):
"""
Scenario: User can join a Team if not a member already..
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I visit the Team page for that team
Then I should see Join Team button
And I should not see New Post button
When I click on Join Team button
Then there should be no Join Team button and no message
And an analytics event should be emitted
And I should see the updated information under Team Details
And I should see New Post button
And if I switch to "My Team", the team I have joined is displayed
"""
self._set_team_configuration_and_membership(create_membership=False)
teams_page = BrowseTeamsPage(self.browser, self.course_id, self.topic)
teams_page.visit()
teams_page.view_first_team()
self.assertTrue(self.team_page.join_team_button_present)
expected_events = [
{
'event_type': 'edx.team.learner_added',
'event': {
'add_method': 'joined_from_team_view'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_join_team_button()
self.assertFalse(self.team_page.join_team_button_present)
self.assertFalse(self.team_page.join_team_message_present)
self.assert_team_details(num_members=1, is_member=True)
# Verify that if one switches to "My Team" without reloading the page, the newly joined team is shown.
self.teams_page.click_all_topics()
self.verify_my_team_count(1)
def test_already_member_message(self):
"""
Scenario: User should see `You are already in a team` if user is a
member of other team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am already a member of a team
And I visit a team other than mine
Then I should see `You are already in a team` message
"""
self._set_team_configuration_and_membership(membership_team_index=0, visit_team_index=1)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'You already belong to another team.')
self.assert_team_details(num_members=0, is_member=False)
def test_team_full_message(self):
"""
Scenario: User should see `Team is full` message when team is full.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And team has no space left
And I am not a member of any team
And I visit the team
Then I should see `Team is full` message
"""
self._set_team_configuration_and_membership(
create_membership=True,
max_team_size=1,
membership_team_index=0,
visit_team_index=0,
another_user=True
)
self.team_page.visit()
self.assertEqual(self.team_page.join_team_message, 'This team is full.')
self.assert_team_details(num_members=1, is_member=False, max_size=1)
def test_leave_team(self):
"""
Scenario: User can leave a team.
Given I am enrolled in a course with a team configuration, a topic,
and a team belonging to that topic
And I am a member of team
And I visit the team
And I should not see Join Team button
And I should see New Post button
Then I should see Leave Team link
When I click on Leave Team link
Then user should be removed from team
And an analytics event should be emitted
And I should see Join Team button
And I should not see New Post button
And if I switch to "My Team", the team I have left is not displayed
"""
self._set_team_configuration_and_membership()
self.team_page.visit()
self.assertFalse(self.team_page.join_team_button_present)
self.assert_team_details(num_members=1)
expected_events = [
{
'event_type': 'edx.team.learner_removed',
'event': {
'remove_method': 'self_removal'
}
}
]
with self.assert_events_match_during(event_filter=self.only_team_events, expected_events=expected_events):
self.team_page.click_leave_team_link()
self.assert_team_details(num_members=0, is_member=False)
self.assertTrue(self.team_page.join_team_button_present)
# Verify that if one switches to "My Team" without reloading the page, the old team no longer shows.
self.teams_page.click_all_topics()
self.verify_my_team_count(0)
def test_page_viewed_event(self):
"""
Scenario: Visiting the team profile page should fire a page viewed event.
Given I am enrolled in a course with a team configuration and a topic
When I visit the team profile page
Then my browser should post a page viewed event
"""
self._set_team_configuration_and_membership()
events = [{
'event_type': 'edx.team.page_viewed',
'event': {
'page_name': 'single-team',
'topic_id': self.topic['id'],
'team_id': self.teams[0]['id']
}
}]
with self.assert_events_match_during(self.only_team_events, expected_events=events):
self.team_page.visit()
| agpl-3.0 | 3,746,891,124,973,736,000 | 41.355977 | 119 | 0.616754 | false |
ebmdatalab/openprescribing | openprescribing/dmd/migrations/0003_auto_20191008_1141.py | 1 | 42978 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-08 10:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dmd', '0002_auto_20181007_1443'),
]
operations = [
migrations.AlterModelOptions(
name='aping',
options={'verbose_name': 'Excipients'},
),
migrations.AlterModelOptions(
name='ing',
options={'verbose_name': 'Ingredients'},
),
migrations.AlterModelOptions(
name='packinfo',
options={'verbose_name': 'Appliance Pack Information'},
),
migrations.AlterField(
model_name='amp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='amp',
name='avail_restrict',
field=models.ForeignKey(db_column='avail_restrictcd', help_text='Restrictions on availability', on_delete=django.db.models.deletion.CASCADE, to='dmd.AvailabilityRestriction'),
),
migrations.AlterField(
model_name='amp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='amp',
name='combprod',
field=models.ForeignKey(db_column='combprodcd', help_text='Combination product', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationProdInd'),
),
migrations.AlterField(
model_name='amp',
name='descr',
field=models.CharField(help_text='Description', max_length=700),
),
migrations.AlterField(
model_name='amp',
name='ema',
field=models.BooleanField(help_text='EMA additional monitoring'),
),
migrations.AlterField(
model_name='amp',
name='flavour',
field=models.ForeignKey(db_column='flavourcd', help_text='Flavour', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Flavour'),
),
migrations.AlterField(
model_name='amp',
name='id',
field=models.BigIntegerField(db_column='apid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='amp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='amp',
name='lic_auth',
field=models.ForeignKey(db_column='lic_authcd', help_text='Current licensing authority', on_delete=django.db.models.deletion.CASCADE, to='dmd.LicensingAuthority'),
),
migrations.AlterField(
model_name='amp',
name='lic_auth_prev',
field=models.ForeignKey(db_column='lic_auth_prevcd', help_text='Previous licensing authority', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.LicensingAuthority'),
),
migrations.AlterField(
model_name='amp',
name='lic_authchange',
field=models.ForeignKey(db_column='lic_authchangecd', help_text='Reason for change of licensing authority', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.LicensingAuthorityChangeReason'),
),
migrations.AlterField(
model_name='amp',
name='lic_authchangedt',
field=models.DateField(help_text='Date of change of licensing authority', null=True),
),
migrations.AlterField(
model_name='amp',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='amp',
name='nm_prev',
field=models.CharField(help_text='Previous name', max_length=255, null=True),
),
migrations.AlterField(
model_name='amp',
name='nmdt',
field=models.DateField(help_text='Date of name applicability', null=True),
),
migrations.AlterField(
model_name='amp',
name='parallel_import',
field=models.BooleanField(help_text='Parallel import'),
),
migrations.AlterField(
model_name='amp',
name='supp',
field=models.ForeignKey(db_column='suppcd', help_text='Supplier', on_delete=django.db.models.deletion.CASCADE, to='dmd.Supplier'),
),
migrations.AlterField(
model_name='amp',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='ampp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='ampp',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='ampp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='ampp',
name='combpack',
field=models.ForeignKey(db_column='combpackcd', help_text='Combination pack', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationPackInd'),
),
migrations.AlterField(
model_name='ampp',
name='disc',
field=models.ForeignKey(db_column='disccd', help_text='Discontinued', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.DiscontinuedInd'),
),
migrations.AlterField(
model_name='ampp',
name='discdt',
field=models.DateField(help_text='Discontinued change date', null=True),
),
migrations.AlterField(
model_name='ampp',
name='id',
field=models.BigIntegerField(db_column='appid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ampp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='ampp',
name='legal_cat',
field=models.ForeignKey(db_column='legal_catcd', help_text='Legal category', on_delete=django.db.models.deletion.CASCADE, to='dmd.LegalCategory'),
),
migrations.AlterField(
model_name='ampp',
name='nm',
field=models.CharField(help_text='Description', max_length=774),
),
migrations.AlterField(
model_name='ampp',
name='subp',
field=models.CharField(help_text='Sub pack info', max_length=30, null=True),
),
migrations.AlterField(
model_name='ampp',
name='vmpp',
field=models.ForeignKey(db_column='vppid', help_text='VMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMPP'),
),
migrations.AlterField(
model_name='apinfo',
name='amp',
field=models.OneToOneField(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='apinfo',
name='colour',
field=models.ForeignKey(db_column='colourcd', help_text='Colour', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Colour'),
),
migrations.AlterField(
model_name='apinfo',
name='prod_order_no',
field=models.CharField(help_text='Product order number', max_length=20, null=True),
),
migrations.AlterField(
model_name='apinfo',
name='sz_weight',
field=models.CharField(help_text='Size / weight', max_length=100, null=True),
),
migrations.AlterField(
model_name='aping',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='aping',
name='ing',
field=models.ForeignKey(db_column='isid', help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='dmd.Ing'),
),
migrations.AlterField(
model_name='aping',
name='strnth',
field=models.DecimalField(decimal_places=3, help_text='Pharmaceutical strength numerical value', max_digits=10, null=True),
),
migrations.AlterField(
model_name='aping',
name='uom',
field=models.ForeignKey(db_column='uomcd', help_text='Pharmaceutical Strength Unit of Measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='availabilityrestriction',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='availabilityrestriction',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='basisofname',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='basisofname',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='basisofstrnth',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='basisofstrnth',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='colour',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='colour',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='combinationpackind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='combinationpackind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='combinationprodind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='combinationprodind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='controldrugcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='controldrugcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='controlinfo',
name='cat',
field=models.ForeignKey(db_column='catcd', help_text='Controlled Drug category', on_delete=django.db.models.deletion.CASCADE, to='dmd.ControlDrugCategory'),
),
migrations.AlterField(
model_name='controlinfo',
name='cat_prev',
field=models.ForeignKey(db_column='cat_prevcd', help_text='Previous Controlled Drug information', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.ControlDrugCategory'),
),
migrations.AlterField(
model_name='controlinfo',
name='catdt',
field=models.DateField(help_text='Date of applicability', null=True),
),
migrations.AlterField(
model_name='controlinfo',
name='vmp',
field=models.OneToOneField(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='dfindicator',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dfindicator',
name='descr',
field=models.CharField(help_text='Description', max_length=20),
),
migrations.AlterField(
model_name='dform',
name='form',
field=models.ForeignKey(db_column='formcd', help_text='Formulation', on_delete=django.db.models.deletion.CASCADE, to='dmd.Form'),
),
migrations.AlterField(
model_name='dform',
name='vmp',
field=models.OneToOneField(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='discontinuedind',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='discontinuedind',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='dnd',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dnd',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='droute',
name='route',
field=models.ForeignKey(db_column='routecd', help_text='Route', on_delete=django.db.models.deletion.CASCADE, to='dmd.Route'),
),
migrations.AlterField(
model_name='droute',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='dtinfo',
name='dt',
field=models.DateField(help_text='Date from which applicable', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='pay_cat',
field=models.ForeignKey(db_column='pay_catcd', help_text='Drug Tariff payment category', on_delete=django.db.models.deletion.CASCADE, to='dmd.DtPaymentCategory'),
),
migrations.AlterField(
model_name='dtinfo',
name='prevprice',
field=models.IntegerField(help_text='Previous price', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='price',
field=models.IntegerField(help_text='Drug Tariff price', null=True),
),
migrations.AlterField(
model_name='dtinfo',
name='vmpp',
field=models.OneToOneField(db_column='vppid', help_text='VMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMPP'),
),
migrations.AlterField(
model_name='dtpaymentcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='dtpaymentcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='flavour',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='flavour',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='form',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='form',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='form',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='form',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='gtin',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='gtin',
name='enddt',
field=models.DateField(help_text='The date the GTIN became invalid', null=True),
),
migrations.AlterField(
model_name='gtin',
name='gtin',
field=models.BigIntegerField(help_text='GTIN'),
),
migrations.AlterField(
model_name='gtin',
name='startdt',
field=models.DateField(help_text='GTIN date'),
),
migrations.AlterField(
model_name='ing',
name='id',
field=models.BigIntegerField(db_column='isid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ing',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='ing',
name='isiddt',
field=models.DateField(help_text='Date identifier became valid', null=True),
),
migrations.AlterField(
model_name='ing',
name='isidprev',
field=models.BigIntegerField(help_text='Previous identifier', null=True),
),
migrations.AlterField(
model_name='ing',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='legalcategory',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='legalcategory',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licensingauthority',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='licensingauthority',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licensingauthoritychangereason',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='licensingauthoritychangereason',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='licroute',
name='amp',
field=models.ForeignKey(db_column='apid', help_text='AMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMP'),
),
migrations.AlterField(
model_name='licroute',
name='route',
field=models.ForeignKey(db_column='routecd', help_text='Licenced route', on_delete=django.db.models.deletion.CASCADE, to='dmd.Route'),
),
migrations.AlterField(
model_name='namechangereason',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='namechangereason',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='ont',
name='form',
field=models.ForeignKey(db_column='formcd', help_text='Form & Route', on_delete=django.db.models.deletion.CASCADE, to='dmd.OntFormRoute'),
),
migrations.AlterField(
model_name='ont',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='ontformroute',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='ontformroute',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='packinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='packinfo',
name='pack_order_no',
field=models.CharField(help_text='Pack order number', max_length=20, null=True),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_stat',
field=models.ForeignKey(db_column='reimb_statcd', help_text='Appliance reimbursement status', on_delete=django.db.models.deletion.CASCADE, to='dmd.ReimbursementStatus'),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_statdt',
field=models.DateField(help_text='Date appliance reimbursement status became effective', null=True),
),
migrations.AlterField(
model_name='packinfo',
name='reimb_statprev',
field=models.ForeignKey(db_column='reimb_statprevcd', help_text='Appliance reimbursement previous status', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.ReimbursementStatus'),
),
migrations.AlterField(
model_name='prescribinfo',
name='acbs',
field=models.BooleanField(help_text='ACBS'),
),
migrations.AlterField(
model_name='prescribinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='prescribinfo',
name='dent_f',
field=models.BooleanField(help_text='Dental formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='enurse_f',
field=models.BooleanField(help_text='Nurse extended formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='fp10_mda',
field=models.BooleanField(help_text='FP10 MDA Prescription'),
),
migrations.AlterField(
model_name='prescribinfo',
name='hosp',
field=models.BooleanField(help_text='Hospital'),
),
migrations.AlterField(
model_name='prescribinfo',
name='nurse_f',
field=models.BooleanField(help_text='Nurse formulary'),
),
migrations.AlterField(
model_name='prescribinfo',
name='padm',
field=models.BooleanField(help_text='Personally administered'),
),
migrations.AlterField(
model_name='prescribinfo',
name='sched_1',
field=models.BooleanField(help_text='Schedule 1'),
),
migrations.AlterField(
model_name='prescribinfo',
name='sched_2',
field=models.BooleanField(help_text='Schedule 2'),
),
migrations.AlterField(
model_name='pricebasis',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='pricebasis',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='priceinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='priceinfo',
name='price',
field=models.IntegerField(help_text='Price', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='price_basis',
field=models.ForeignKey(db_column='price_basiscd', help_text='Price basis', on_delete=django.db.models.deletion.CASCADE, to='dmd.PriceBasis'),
),
migrations.AlterField(
model_name='priceinfo',
name='price_prev',
field=models.IntegerField(help_text='Price prior to change date', null=True),
),
migrations.AlterField(
model_name='priceinfo',
name='pricedt',
field=models.DateField(help_text='Date of price validity', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='ampp',
field=models.OneToOneField(db_column='appid', help_text='AMPP', on_delete=django.db.models.deletion.CASCADE, to='dmd.AMPP'),
),
migrations.AlterField(
model_name='reimbinfo',
name='bb',
field=models.BooleanField(help_text='Broken bulk'),
),
migrations.AlterField(
model_name='reimbinfo',
name='cal_pack',
field=models.BooleanField(help_text='Calendar pack'),
),
migrations.AlterField(
model_name='reimbinfo',
name='disp_fees',
field=models.IntegerField(help_text='Dispensing fees', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='dnd',
field=models.ForeignKey(db_column='dndcd', help_text='Discount not deducted', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.Dnd'),
),
migrations.AlterField(
model_name='reimbinfo',
name='fp34d',
field=models.BooleanField(help_text='FP34D prescription item'),
),
migrations.AlterField(
model_name='reimbinfo',
name='px_chrgs',
field=models.IntegerField(help_text='Prescription charges', null=True),
),
migrations.AlterField(
model_name='reimbinfo',
name='spec_cont',
field=models.ForeignKey(db_column='spec_contcd', help_text='Special container', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.SpecCont'),
),
migrations.AlterField(
model_name='reimbursementstatus',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='reimbursementstatus',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='route',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='route',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='route',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='route',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='speccont',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='speccont',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='supplier',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='supplier',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='supplier',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='supplier',
name='descr',
field=models.CharField(help_text='Description', max_length=80),
),
migrations.AlterField(
model_name='supplier',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cd',
field=models.BigIntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cddt',
field=models.DateField(help_text='Date code is applicable from', null=True),
),
migrations.AlterField(
model_name='unitofmeasure',
name='cdprev',
field=models.BigIntegerField(help_text='Previous code', null=True),
),
migrations.AlterField(
model_name='unitofmeasure',
name='descr',
field=models.CharField(help_text='Description', max_length=150),
),
migrations.AlterField(
model_name='virtualproductnonavail',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='virtualproductnonavail',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='virtualproductpresstatus',
name='cd',
field=models.IntegerField(help_text='Code', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='virtualproductpresstatus',
name='descr',
field=models.CharField(help_text='Description', max_length=60),
),
migrations.AlterField(
model_name='vmp',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='vmp',
name='basis',
field=models.ForeignKey(db_column='basiscd', help_text='Basis of preferred name', on_delete=django.db.models.deletion.CASCADE, to='dmd.BasisOfName'),
),
migrations.AlterField(
model_name='vmp',
name='basis_prev',
field=models.ForeignKey(db_column='basis_prevcd', help_text='Basis of previous name', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.BasisOfName'),
),
migrations.AlterField(
model_name='vmp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='vmp',
name='cfc_f',
field=models.BooleanField(help_text='CFC free'),
),
migrations.AlterField(
model_name='vmp',
name='combprod',
field=models.ForeignKey(db_column='combprodcd', help_text='Combination product', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationProdInd'),
),
migrations.AlterField(
model_name='vmp',
name='df_ind',
field=models.ForeignKey(db_column='df_indcd', help_text='Dose form', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.DfIndicator'),
),
migrations.AlterField(
model_name='vmp',
name='glu_f',
field=models.BooleanField(help_text='Gluten free'),
),
migrations.AlterField(
model_name='vmp',
name='id',
field=models.BigIntegerField(db_column='vpid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vmp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vmp',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='vmp',
name='nmchange',
field=models.ForeignKey(db_column='nmchangecd', help_text='Reason for name change', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.NamechangeReason'),
),
migrations.AlterField(
model_name='vmp',
name='nmdt',
field=models.DateField(help_text='Date of name applicability', null=True),
),
migrations.AlterField(
model_name='vmp',
name='nmprev',
field=models.CharField(help_text='Previous name', max_length=255, null=True),
),
migrations.AlterField(
model_name='vmp',
name='non_avail',
field=models.ForeignKey(db_column='non_availcd', help_text='Non-availability', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.VirtualProductNonAvail'),
),
migrations.AlterField(
model_name='vmp',
name='non_availdt',
field=models.DateField(help_text='Non-availability status date', null=True),
),
migrations.AlterField(
model_name='vmp',
name='pres_f',
field=models.BooleanField(help_text='Preservative free'),
),
migrations.AlterField(
model_name='vmp',
name='pres_stat',
field=models.ForeignKey(db_column='pres_statcd', help_text='Prescribing status', on_delete=django.db.models.deletion.CASCADE, to='dmd.VirtualProductPresStatus'),
),
migrations.AlterField(
model_name='vmp',
name='sug_f',
field=models.BooleanField(help_text='Sugar free'),
),
migrations.AlterField(
model_name='vmp',
name='udfs',
field=models.DecimalField(decimal_places=3, help_text='Unit dose form size', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vmp',
name='udfs_uom',
field=models.ForeignKey(db_column='udfs_uomcd', help_text='Unit dose form units', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmp',
name='unit_dose_uom',
field=models.ForeignKey(db_column='unit_dose_uomcd', help_text='Unit dose unit of measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmp',
name='vpiddt',
field=models.DateField(help_text='Date identifier became valid', null=True),
),
migrations.AlterField(
model_name='vmp',
name='vpidprev',
field=models.BigIntegerField(help_text='Previous product identifier', null=True),
),
migrations.AlterField(
model_name='vmp',
name='vtm',
field=models.ForeignKey(db_column='vtmid', help_text='VTM', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.VTM'),
),
migrations.AlterField(
model_name='vmpp',
name='bnf_code',
field=models.CharField(help_text='BNF code', max_length=15, null=True),
),
migrations.AlterField(
model_name='vmpp',
name='combpack',
field=models.ForeignKey(db_column='combpackcd', help_text='Combination pack', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.CombinationPackInd'),
),
migrations.AlterField(
model_name='vmpp',
name='id',
field=models.BigIntegerField(db_column='vppid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vmpp',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vmpp',
name='nm',
field=models.CharField(help_text='Description', max_length=420),
),
migrations.AlterField(
model_name='vmpp',
name='qty_uom',
field=models.ForeignKey(db_column='qty_uomcd', help_text='Quantity unit of measure', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vmpp',
name='qtyval',
field=models.DecimalField(decimal_places=2, help_text='Quantity value', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vmpp',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='vpi',
name='basis_strnt',
field=models.ForeignKey(db_column='basis_strntcd', help_text='Basis of pharmaceutical strength', null=True, on_delete=django.db.models.deletion.CASCADE, to='dmd.BasisOfStrnth'),
),
migrations.AlterField(
model_name='vpi',
name='bs_subid',
field=models.BigIntegerField(help_text='Basis of strength substance identifier', null=True),
),
migrations.AlterField(
model_name='vpi',
name='ing',
field=models.ForeignKey(db_column='isid', help_text='Ingredient', on_delete=django.db.models.deletion.CASCADE, to='dmd.Ing'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_dnmtr_uom',
field=models.ForeignKey(db_column='strnt_dnmtr_uomcd', help_text='Strength value denominator unit', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_dnmtr_val',
field=models.DecimalField(decimal_places=3, help_text='Strength value denominator', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vpi',
name='strnt_nmrtr_uom',
field=models.ForeignKey(db_column='strnt_nmrtr_uomcd', help_text='Strength value numerator unit', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dmd.UnitOfMeasure'),
),
migrations.AlterField(
model_name='vpi',
name='strnt_nmrtr_val',
field=models.DecimalField(decimal_places=3, help_text='Strength value numerator', max_digits=10, null=True),
),
migrations.AlterField(
model_name='vpi',
name='vmp',
field=models.ForeignKey(db_column='vpid', help_text='VMP', on_delete=django.db.models.deletion.CASCADE, to='dmd.VMP'),
),
migrations.AlterField(
model_name='vtm',
name='abbrevnm',
field=models.CharField(help_text='Abbreviated name', max_length=60, null=True),
),
migrations.AlterField(
model_name='vtm',
name='id',
field=models.BigIntegerField(db_column='vtmid', help_text='Identifier', primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='vtm',
name='invalid',
field=models.BooleanField(help_text='Invalid'),
),
migrations.AlterField(
model_name='vtm',
name='nm',
field=models.CharField(help_text='Name', max_length=255),
),
migrations.AlterField(
model_name='vtm',
name='vtmiddt',
field=models.DateField(help_text='VTM identifier date', null=True),
),
migrations.AlterField(
model_name='vtm',
name='vtmidprev',
field=models.BigIntegerField(help_text='Previous identifier', null=True),
),
]
| mit | 5,245,389,261,111,108,000 | 40.605034 | 223 | 0.571199 | false |
jbudynk/sherpa | versioneer.py | 1 | 40390 |
# Version: 0.14
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Updated by Omar Laurino for the Chandra X-Ray Center
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
````
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'myproject-' # dirname like 'myproject-1.2.0'
````
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue #12).
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: A condensed PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional
"local version" section with more detail for in-between builds. For Git,
this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe
--tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates
that the tree is like the "1076c97" commit but has uncommitted changes
(".dirty"), and that this commit is two revisions ("+2") beyond the "0.11"
tag. For released software (exactly equal to a known tag), the identifier
will only contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by ".dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac.dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
### Upgrading from 0.11 to 0.12
Nothing special.
## Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
import errno
import os
import re
import subprocess
import sys
from distutils.core import Command
from helpers import commands
_sdist = commands['sdist']
_build = commands['build']
# these configuration settings will be overridden by setup.py after it
# imports us
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.14 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full": keywords["full"].strip()}
def git_parse_vcs_describe(git_describe, tag_prefix, verbose=False):
# TAG-NUM-gHEX[-dirty] or HEX[-dirty] . TAG might have hyphens.
# dirty
dirty = git_describe.endswith("-dirty")
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
dirty_suffix = ".dirty" if dirty else ""
# now we have TAG-NUM-gHEX or HEX
if "-" not in git_describe: # just HEX
return "0+untagged.g"+git_describe+dirty_suffix, dirty
# just TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
return "0+unparseable"+dirty_suffix, dirty
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
return None, dirty
tag = full_tag[len(tag_prefix):]
# distance: number of commits since tag
distance = int(mo.group(2))
# commit: short hex revision ID
commit = mo.group(3)
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+NUM.gHEX[.dirty]] . Note that if you get a
# tagged build and then dirty it, you'll get TAG+0.gHEX.dirty . So you
# can always test version.endswith(".dirty").
version = tag
if distance or dirty:
version += "+%%d.g%%s" %% (distance, commit) + dirty_suffix
return version, dirty
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {} # get_versions() will try next method
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
stdout = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if stdout is None:
return {} # try next method
version, dirty = git_parse_vcs_describe(stdout, tag_prefix, verbose)
# build "full", which is FULLHEX[.dirty]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if dirty:
full += ".dirty"
return {"version": version, "full": full}
def get_versions(default={"version": "0+unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = {"refnames": git_refnames, "full": git_full}
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full": keywords["full"].strip()}
def git_parse_vcs_describe(git_describe, tag_prefix, verbose=False):
# TAG-NUM-gHEX[-dirty] or HEX[-dirty] . TAG might have hyphens.
# dirty
dirty = git_describe.endswith("-dirty")
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
dirty_suffix = ".dirty" if dirty else ""
# now we have TAG-NUM-gHEX or HEX
if "-" not in git_describe: # just HEX
return "0+untagged.g"+git_describe+dirty_suffix, dirty
# just TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
return "0+unparseable"+dirty_suffix, dirty
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
return None, dirty
tag = full_tag[len(tag_prefix):]
# distance: number of commits since tag
distance = int(mo.group(2))
# commit: short hex revision ID
commit = mo.group(3)
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+NUM.gHEX[.dirty]] . Note that if you get a
# tagged build and then dirty it, you'll get TAG+0.gHEX.dirty . So you
# can always test version.endswith(".dirty").
version = tag
if distance or dirty:
version += "+%d.g%s" % (distance, commit) + dirty_suffix
return version, dirty
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {} # get_versions() will try next method
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
stdout = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if stdout is None:
return {} # try next method
version, dirty = git_parse_vcs_describe(stdout, tag_prefix, verbose)
# build "full", which is FULLHEX[.dirty]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if dirty:
full += ".dirty"
return {"version": version, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.14) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "0+unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
assert versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, \
"please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
# I am in versioneer.py, which must live at the top of the source tree,
# which we use to compute the root directory. py2exe/bbfreeze/non-CPython
# don't have __file__, in which case we fall back to sys.argv[0] (which
# ought to be the setup.py script). We prefer __file__ since that's more
# robust in cases where setup.py was invoked in some weird way (e.g. pip)
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
# extract version from first of _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose:
print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose:
print("got version from parentdir %s" % ver)
return ver
if verbose:
print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(self.build_lib,
versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = ("install/upgrade Versioneer files: "
"__init__.py SRC/_version.py")
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
commands['version'] = cmd_version
commands['update_files'] = cmd_update_files
commands['build'] = cmd_build
commands['sdist'] = cmd_sdist
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
commands['build_exe'] = cmd_build_exe
del commands['build']
return commands
| gpl-3.0 | -4,857,791,568,113,578,000 | 37.503337 | 79 | 0.640332 | false |
CQTools/Wavemeter_switch | switch_server.py | 1 | 3226 | """
Created on Mon Sep 29 14:52:13 2014
@author: nick
"""
from flask import Flask, render_template, jsonify
import requests
import switchcontrol
import time
import ctypes
import re
wlm = ctypes.windll.wlmData # load the DLL
uInt32 = ctypes.c_ulong
uInt64 = ctypes.c_ulonglong
double = ctypes.c_double
long = ctypes.c_long
LZERO = long(0)
DZERO = double(0)
cInstCheckForWLM = long(-1)
cInstResetCalc = long(0)
cInstReturnMode = cInstResetCalc
cInstNotification = long(1)
cInstCopyPattern = long(2)
cInstCopyAnalysis = cInstCopyPattern
cInstControlWLM = long(3)
cInstControlDelay = long(4)
cInstControlPriority = long(5)
getfreq = wlm.GetFrequencyNum
getfreq.restype = double
getwave = wlm.GetWavelengthNum
getwave.restype = double
getpat = wlm.GetPatternNum
getpat.restype = long
wlm.SetSwitcherSignalStates(2,1,0) #sets channel 2 on
wlm.SetSwitcherSignalStates(6,1,0) #sets channel 6 on
wlm.SetSwitcherSignalStates(7,1,0) #sets channel 7 on
wlm.SetSwitcherSignalStates(8,1,1) #sets channel 8 on
app = Flask(__name__)
#port ='/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_5543131303835141E011-if00' #hardwire address as arduino fixed into switch
port = 'COM5'
switch = switchcontrol.wavemeterswitch(port) #connect to wavemeter
time.sleep(1) #delay to allow for connection
switch.serial_write(1)
channel = 2 #current free wavemeter port on primary switch
@app.route('/')
def home(): #create webpage on index
return render_template('index.html') #webpage temaplate
global channel_data
channel_data = '1'
@app.route('/switch_1') # dont know how to do this for many switches so will be copy paste of function
def switch1():
global channel_data
print 'switch 1 pressed'
switch.serial_write(1)
channel_data = '1'
return channel_data
@app.route('/switch_2')
def switch2():
global channel_data
print 'switch 2 pressed'
switch.serial_write(2)
channel_data = '2'
return channel_data
@app.route('/switch_3')
def switch3():
global channel_data
switch.serial_write(3)
channel_data = '3'
return
@app.route('/switch_4')
def switch4():
global channel_data
switch.serial_write(4)
channel_data = '4'
return
@app.route('/switch_5')
def switch5():
global channel_data
switch.serial_write(5)
channel_data = '5'
return
@app.route('/switch_6')
def switch6():
global channel_data
switch.serial_write(6)
channel_data = '6'
print '6'
return
@app.route('/switch_7')
def switch7():
global channel_data
switch.serial_write(7)
channel_data = '7'
return
@app.route('/switch_8')
def switch8():
global channel_data
switch.serial_write(8)
channel_data = '8'
return
@app.route("/data", methods= ['GET'])
def get_data():
data = {'wavelength': get_wavelength(),
'freq': get_frequency(),'channel': channel_data}
return jsonify(data)
def get_wavelength():
s = float(getwave(2,DZERO))
s = round(s,5)
return str(s) + ' nm'
def get_frequency():
s = float(getfreq(2,DZERO))
s = round(s,5)
return str(s) + ' THz'
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8080)
| mit | 9,215,792,569,284,897,000 | 19.948052 | 133 | 0.67483 | false |
marc-sensenich/ansible | lib/ansible/modules/network/f5/bigiq_utility_license_assignment.py | 22 | 20990 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigiq_utility_license_assignment
short_description: Manage utility license assignment on BIG-IPs from a BIG-IQ
description:
- Manages the assignment of utility licenses on a BIG-IQ. Assignment means that
the license is assigned to a BIG-IP, or, it needs to be assigned to a BIG-IP.
Additionally, this module supported revoking the assignments from BIG-IP devices.
version_added: 2.7
options:
unit_of_measure:
description:
- Sets the rate at which this license usage is billed.
- Depending on your license, you may have different units of measures
available to you. If a particular unit is not available to you, the module
will notify you at licensing time.
default: hourly
choices:
- hourly
- daily
- monthly
- yearly
key:
description:
- The registration key that you want choose an offering from.
required: True
offering:
description:
- Name of the license offering to assign to the device.
device:
description:
- When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
can reach the remote device to register.
- When C(managed) is C(yes), specifies the managed device, or device UUID, that
you want to register.
- If C(managed) is C(yes), it is very important that you do not have more than
one device with the same name. BIG-IQ internally recognizes devices by their ID,
and therefore, this module's cannot guarantee that the correct device will be
registered. The device returned is the device that will be used.
managed:
description:
- Whether the specified device is a managed or un-managed device.
- When C(state) is C(present), this parameter is required.
type: bool
device_port:
description:
- Specifies the port of the remote device to connect to.
- If this parameter is not specified, the default of C(443) will be used.
default: 443
device_username:
description:
- The username used to connect to the remote device.
- This username should be one that has sufficient privileges on the remote device
to do licensing. Usually this is the C(Administrator) role.
- When C(managed) is C(no), this parameter is required.
device_password:
description:
- The password of the C(device_username).
- When C(managed) is C(no), this parameter is required.
state:
description:
- When C(present), ensures that the device is assigned the specified license.
- When C(absent), ensures the license is revokes from the remote device and freed
on the BIG-IQ.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Register an unmanaged device
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: 1.1.1.1
managed: no
device_username: admin
device_password: secret
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Register a managed device, by name
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: bigi1.foo.com
managed: yes
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Register a managed device, by UUID
bigiq_utility_license_assignment:
key: XXXX-XXXX-XXXX-XXXX-XXXX
offering: F5-BIG-MSP-AFM-10G-LIC
device: 7141a063-7cf8-423f-9829-9d40599fa3e0
managed: yes
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceReference': 'device_reference',
'deviceAddress': 'device_address',
'httpsPort': 'device_port',
'unitOfMeasure': 'unit_of_measure'
}
api_attributes = [
'deviceReference', 'deviceAddress', 'httpsPort', 'managed', 'unitOfMeasure'
]
returnables = [
'device_address', 'device_reference', 'device_username', 'device_password',
'device_port', 'managed', 'unit_of_measure'
]
updatables = [
'device_reference', 'device_address', 'device_username', 'device_password',
'device_port', 'managed', 'unit_of_measure'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password']
@property
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username']
@property
def device_address(self):
if self.device_is_address:
return self._values['device']
@property
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port'])
@property
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False
@property
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False
@property
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False
@property
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/?$filter={2}&$top=1".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result
@property
def offering_id(self):
filter = "(name+eq+'{0}')".format(self.offering)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings?$filter={3}&$top=1'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.key,
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No offering with the specified name was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['items'][0]['id']
@property
def member_id(self):
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "deviceAddress+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "deviceName+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "deviceMachineId+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/?$filter={4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.key,
self.offering_id,
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
return None
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = response['items'][0]['id']
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
@property
def device_port(self):
if self._values['managed']:
return None
return self._values['device_port']
@property
def device_username(self):
if self._values['managed']:
return None
return self._values['device_username']
@property
def device_password(self):
if self._values['managed']:
return None
return self._values['device_password']
@property
def device_reference(self):
if not self._values['managed']:
return None
return self._values['device_reference']
@property
def device_address(self):
if self._values['managed']:
return None
return self._values['device_address']
@property
def managed(self):
return None
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params, client=self.client)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
return self.create()
def exists(self):
if self.want.member_id is None:
return False
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id
)
resp = self.client.api.get(uri)
if resp.status == 200:
return True
return False
def remove(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if not self.want.managed:
if self.want.device_username is None:
raise F5ModuleError(
"You must specify a 'device_username' when working with unmanaged devices."
)
if self.want.device_password is None:
raise F5ModuleError(
"You must specify a 'device_password' when working with unmanaged devices."
)
if self.module.check_mode:
return True
self.create_on_device()
if not self.exists():
raise F5ModuleError(
"Failed to license the remote device."
)
self.wait_for_device_to_be_licensed()
return True
def create_on_device(self):
params = self.changes.api_params()
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
)
if not self.want.managed:
params['username'] = self.want.device_username
params['password'] = self.want.device_password
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def wait_for_device_to_be_licensed(self):
count = 0
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id,
)
while count < 3:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] == 'LICENSED':
count += 1
else:
count = 0
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/utility/licenses/{2}/offerings/{3}/members/{4}'.format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.key,
self.want.offering_id,
self.want.member_id
)
params = {}
if not self.want.managed:
params.update(self.changes.api_params())
params['id'] = self.want.member_id
params['username'] = self.want.device_username
params['password'] = self.want.device_password
self.client.api.delete(uri, json=params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
offering=dict(required=True),
unit_of_measure=dict(
default='hourly',
choices=[
'hourly', 'daily', 'monthly', 'yearly'
]
),
key=dict(required=True, no_log=True),
device=dict(required=True),
managed=dict(type='bool'),
device_port=dict(type='int', default=443),
device_username=dict(no_log=True),
device_password=dict(no_log=True),
state=dict(default='present', choices=['absent', 'present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['key', 'managed']],
['managed', False, ['device', 'device_username', 'device_password']],
['managed', True, ['device']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,593,861,278,805,888,000 | 31.643857 | 127 | 0.589233 | false |
mikelikespie/bazel | third_party/py/six/test_six.py | 48 | 24674 | import operator
import sys
import types
import unittest
import py
import six
def test_add_doc():
def f():
"""Icky doc"""
pass
six._add_doc(f, """New doc""")
assert f.__doc__ == "New doc"
def test_import_module():
from logging import handlers
m = six._import_module("logging.handlers")
assert m is handlers
def test_integer_types():
assert isinstance(1, six.integer_types)
assert isinstance(-1, six.integer_types)
assert isinstance(six.MAXSIZE + 23, six.integer_types)
assert not isinstance(.1, six.integer_types)
def test_string_types():
assert isinstance("hi", six.string_types)
assert isinstance(six.u("hi"), six.string_types)
assert issubclass(six.text_type, six.string_types)
def test_class_types():
class X:
pass
class Y(object):
pass
assert isinstance(X, six.class_types)
assert isinstance(Y, six.class_types)
assert not isinstance(X(), six.class_types)
def test_text_type():
assert type(six.u("hi")) is six.text_type
def test_binary_type():
assert type(six.b("hi")) is six.binary_type
def test_MAXSIZE():
try:
# This shouldn't raise an overflow error.
six.MAXSIZE.__index__()
except AttributeError:
# Before Python 2.6.
pass
py.test.raises(
(ValueError, OverflowError),
operator.mul, [None], six.MAXSIZE + 1)
def test_lazy():
if six.PY3:
html_name = "html.parser"
else:
html_name = "HTMLParser"
assert html_name not in sys.modules
mod = six.moves.html_parser
assert sys.modules[html_name] is mod
assert "htmlparser" not in six._MovedItems.__dict__
try:
import _tkinter
except ImportError:
have_tkinter = False
else:
have_tkinter = True
have_gdbm = True
try:
import gdbm
except ImportError:
try:
import dbm.gnu
except ImportError:
have_gdbm = False
@py.test.mark.parametrize("item_name",
[item.name for item in six._moved_attributes])
def test_move_items(item_name):
"""Ensure that everything loads correctly."""
try:
item = getattr(six.moves, item_name)
if isinstance(item, types.ModuleType):
__import__("six.moves." + item_name)
except AttributeError:
if item_name == "zip_longest" and sys.version_info < (2, 6):
py.test.skip("zip_longest only available on 2.6+")
except ImportError:
if item_name == "winreg" and not sys.platform.startswith("win"):
py.test.skip("Windows only module")
if item_name.startswith("tkinter"):
if not have_tkinter:
py.test.skip("requires tkinter")
if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
py.test.skip("ttk only available on 2.7+")
if item_name.startswith("dbm_gnu") and not have_gdbm:
py.test.skip("requires gdbm")
raise
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_parse_moved_attributes])
def test_move_items_urllib_parse(item_name):
"""Ensure that everything loads correctly."""
if item_name == "ParseResult" and sys.version_info < (2, 5):
py.test.skip("ParseResult is only found on 2.5+")
if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
py.test.skip("parse_qs[l] is new in 2.6")
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.parse)
getattr(six.moves.urllib.parse, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_error_moved_attributes])
def test_move_items_urllib_error(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.error)
getattr(six.moves.urllib.error, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_request_moved_attributes])
def test_move_items_urllib_request(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.request)
getattr(six.moves.urllib.request, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_response_moved_attributes])
def test_move_items_urllib_response(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.response)
getattr(six.moves.urllib.response, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_robotparser_moved_attributes])
def test_move_items_urllib_robotparser(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.robotparser)
getattr(six.moves.urllib.robotparser, item_name)
def test_import_moves_error_1():
from six.moves.urllib.parse import urljoin
from six import moves
# In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
assert moves.urllib.parse.urljoin
def test_import_moves_error_2():
from six import moves
assert moves.urllib.parse.urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib.parse import urljoin
def test_import_moves_error_3():
from six.moves.urllib.parse import urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib_parse import urljoin
def test_from_imports():
from six.moves.queue import Queue
assert isinstance(Queue, six.class_types)
from six.moves.configparser import ConfigParser
assert isinstance(ConfigParser, six.class_types)
def test_filter():
from six.moves import filter
f = filter(lambda x: x % 2, range(10))
assert six.advance_iterator(f) == 1
def test_filter_false():
from six.moves import filterfalse
f = filterfalse(lambda x: x % 3, range(10))
assert six.advance_iterator(f) == 0
assert six.advance_iterator(f) == 3
assert six.advance_iterator(f) == 6
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
def test_zip():
from six.moves import zip
assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
@py.test.mark.skipif("sys.version_info < (2, 6)")
def test_zip_longest():
from six.moves import zip_longest
it = zip_longest(range(2), range(1))
assert six.advance_iterator(it) == (0, 0)
assert six.advance_iterator(it) == (1, None)
class TestCustomizedMoves:
def teardown_method(self, meth):
try:
del six._MovedItems.spam
except AttributeError:
pass
try:
del six.moves.__dict__["spam"]
except KeyError:
pass
def test_moved_attribute(self):
attr = six.MovedAttribute("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
assert attr.attr == "spam"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
assert attr.attr == "lemma"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
if six.PY3:
assert attr.attr == "theorm"
else:
assert attr.attr == "lemma"
def test_moved_module(self):
attr = six.MovedModule("spam", "foo")
if six.PY3:
assert attr.mod == "spam"
else:
assert attr.mod == "foo"
attr = six.MovedModule("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
def test_custom_move_module(self):
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
from six.moves import spam
assert spam is six
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_custom_move_attribute(self):
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
from six.moves import spam
assert spam is six.u
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_empty_remove(self):
py.test.raises(AttributeError, six.remove_move, "eggs")
def test_get_unbound_function():
class X(object):
def m(self):
pass
assert six.get_unbound_function(X.m) is X.__dict__["m"]
def test_get_method_self():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_self(x.m) is x
py.test.raises(AttributeError, six.get_method_self, 42)
def test_get_method_function():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_function(x.m) is X.__dict__["m"]
py.test.raises(AttributeError, six.get_method_function, hasattr)
def test_get_function_closure():
def f():
x = 42
def g():
return x
return g
cell = six.get_function_closure(f())[0]
assert type(cell).__name__ == "cell"
def test_get_function_code():
def f():
pass
assert isinstance(six.get_function_code(f), types.CodeType)
if not hasattr(sys, "pypy_version_info"):
py.test.raises(AttributeError, six.get_function_code, hasattr)
def test_get_function_defaults():
def f(x, y=3, b=4):
pass
assert six.get_function_defaults(f) == (3, 4)
def test_get_function_globals():
def f():
pass
assert six.get_function_globals(f) is globals()
def test_dictionary_iterators(monkeypatch):
def stock_method_name(iterwhat):
"""Given a method suffix like "lists" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return iterwhat
return 'iter' + iterwhat
class MyDict(dict):
if not six.PY3:
def lists(self, **kw):
return [1, 2, 3]
def iterlists(self, **kw):
return iter([1, 2, 3])
f = MyDict.iterlists
del MyDict.iterlists
setattr(MyDict, stock_method_name('lists'), f)
d = MyDict(zip(range(10), reversed(range(10))))
for name in "keys", "values", "items", "lists":
meth = getattr(six, "iter" + name)
it = meth(d)
assert not isinstance(it, list)
assert list(it) == list(getattr(d, name)())
py.test.raises(StopIteration, six.advance_iterator, it)
record = []
def with_kw(*args, **kw):
record.append(kw["kw"])
return old(*args)
old = getattr(MyDict, stock_method_name(name))
monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
meth(d, kw=42)
assert record == [42]
monkeypatch.undo()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)",
reason="view methods on dictionaries only available on 2.7+")
def test_dictionary_views():
def stock_method_name(viewwhat):
"""Given a method suffix like "keys" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return viewwhat
return 'view' + viewwhat
d = dict(zip(range(10), (range(11, 20))))
for name in "keys", "values", "items":
meth = getattr(six, "view" + name)
view = meth(d)
assert set(view) == set(getattr(d, name)())
def test_advance_iterator():
assert six.next is six.advance_iterator
l = [1, 2]
it = iter(l)
assert six.next(it) == 1
assert six.next(it) == 2
py.test.raises(StopIteration, six.next, it)
py.test.raises(StopIteration, six.next, it)
def test_iterator():
class myiter(six.Iterator):
def __next__(self):
return 13
assert six.advance_iterator(myiter()) == 13
class myitersub(myiter):
def __next__(self):
return 14
assert six.advance_iterator(myitersub()) == 14
def test_callable():
class X:
def __call__(self):
pass
def method(self):
pass
assert six.callable(X)
assert six.callable(X())
assert six.callable(test_callable)
assert six.callable(hasattr)
assert six.callable(X.method)
assert six.callable(X().method)
assert not six.callable(4)
assert not six.callable("string")
def test_create_bound_method():
class X(object):
pass
def f(self):
return self
x = X()
b = six.create_bound_method(f, x)
assert isinstance(b, types.MethodType)
assert b() is x
def test_create_unbound_method():
class X(object):
pass
def f(self):
return self
u = six.create_unbound_method(f, X)
py.test.raises(TypeError, u)
if six.PY2:
assert isinstance(u, types.MethodType)
x = X()
assert f(x) is x
if six.PY3:
def test_b():
data = six.b("\xff")
assert isinstance(data, bytes)
assert len(data) == 1
assert data == bytes([255])
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, str)
assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
else:
def test_b():
data = six.b("\xff")
assert isinstance(data, str)
assert len(data) == 1
assert data == "\xff"
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, unicode)
assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
def test_u_escapes():
s = six.u("\u1234")
assert len(s) == 1
def test_unichr():
assert six.u("\u1234") == six.unichr(0x1234)
assert type(six.u("\u1234")) is type(six.unichr(0x1234))
def test_int2byte():
assert six.int2byte(3) == six.b("\x03")
py.test.raises(Exception, six.int2byte, 256)
def test_byte2int():
assert six.byte2int(six.b("\x03")) == 3
assert six.byte2int(six.b("\x03\x04")) == 3
py.test.raises(IndexError, six.byte2int, six.b(""))
def test_bytesindex():
assert six.indexbytes(six.b("hello"), 3) == ord("l")
def test_bytesiter():
it = six.iterbytes(six.b("hi"))
assert six.next(it) == ord("h")
assert six.next(it) == ord("i")
py.test.raises(StopIteration, six.next, it)
def test_StringIO():
fp = six.StringIO()
fp.write(six.u("hello"))
assert fp.getvalue() == six.u("hello")
def test_BytesIO():
fp = six.BytesIO()
fp.write(six.b("hello"))
assert fp.getvalue() == six.b("hello")
def test_exec_():
def f():
l = []
six.exec_("l.append(1)")
assert l == [1]
f()
ns = {}
six.exec_("x = 42", ns)
assert ns["x"] == 42
glob = {}
loc = {}
six.exec_("global y; y = 42; x = 12", glob, loc)
assert glob["y"] == 42
assert "x" not in glob
assert loc["x"] == 12
assert "y" not in loc
def test_reraise():
def get_next(tb):
if six.PY3:
return tb.tb_next.tb_next
else:
return tb.tb_next
e = Exception("blah")
try:
raise e
except Exception:
tp, val, tb = sys.exc_info()
try:
six.reraise(tp, val, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb is get_next(tb2)
try:
six.reraise(tp, val)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb2 is not tb
try:
six.reraise(tp, val, tb2)
except Exception:
tp2, value2, tb3 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert get_next(tb3) is tb2
try:
six.reraise(tp, None, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is not val
assert isinstance(value2, Exception)
assert tb is get_next(tb2)
def test_raise_from():
try:
try:
raise Exception("blah")
except Exception:
ctx = sys.exc_info()[1]
f = Exception("foo")
six.raise_from(f, None)
except Exception:
tp, val, tb = sys.exc_info()
if sys.version_info[:2] > (3, 0):
# We should have done a raise f from None equivalent.
assert val.__cause__ is None
assert val.__context__ is ctx
if sys.version_info[:2] >= (3, 3):
# And that should suppress the context on the exception.
assert val.__suppress_context__
# For all versions the outer exception should have raised successfully.
assert str(val) == "foo"
def test_print_():
save = sys.stdout
out = sys.stdout = six.moves.StringIO()
try:
six.print_("Hello,", "person!")
finally:
sys.stdout = save
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out)
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, end="")
assert out.getvalue() == "Hello, person!"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, sep="X")
assert out.getvalue() == "Hello,Xperson!\n"
out = six.StringIO()
six.print_(six.u("Hello,"), six.u("person!"), file=out)
result = out.getvalue()
assert isinstance(result, six.text_type)
assert result == six.u("Hello, person!\n")
six.print_("Hello", file=None) # This works.
out = six.StringIO()
six.print_(None, file=out)
assert out.getvalue() == "None\n"
class FlushableStringIO(six.StringIO):
def __init__(self):
six.StringIO.__init__(self)
self.flushed = False
def flush(self):
self.flushed = True
out = FlushableStringIO()
six.print_("Hello", file=out)
assert not out.flushed
six.print_("Hello", file=out, flush=True)
assert out.flushed
@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
def test_print_encoding(monkeypatch):
# Fool the type checking in print_.
monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
out = six.BytesIO()
out.encoding = "utf-8"
out.errors = None
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\xd4\xbc")
out = six.BytesIO()
out.encoding = "ascii"
out.errors = "strict"
py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
out.errors = "backslashreplace"
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\\u053c")
def test_print_exceptions():
py.test.raises(TypeError, six.print_, x=3)
py.test.raises(TypeError, six.print_, end=3)
py.test.raises(TypeError, six.print_, sep=42)
def test_with_metaclass():
class Meta(type):
pass
class X(six.with_metaclass(Meta)):
pass
assert type(X) is Meta
assert issubclass(X, object)
class Base(object):
pass
class X(six.with_metaclass(Meta, Base)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(six.with_metaclass(Meta, Base, Base2)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
assert X.__mro__ == (X, Base, Base2, object)
def test_wraps():
def f(g):
@six.wraps(g)
def w():
return 42
return w
def k():
pass
original_k = k
k = f(f(k))
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert k is original_k
assert not hasattr(k, '__wrapped__')
def f(g, assign, update):
def w():
return 42
w.glue = {"foo" : "bar"}
return six.wraps(g, assign, update)(w)
k.glue = {"melon" : "egg"}
k.turnip = 43
k = f(k, ["turnip"], ["glue"])
assert k.__name__ == "w"
assert k.turnip == 43
assert k.glue == {"melon" : "egg", "foo" : "bar"}
def test_add_metaclass():
class Meta(type):
pass
class X:
"success"
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, object)
assert X.__module__ == __name__
assert X.__doc__ == "success"
class Base(object):
pass
class X(Base):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(Base, Base2):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
# Test a second-generation subclass of a type.
class Meta1(type):
m1 = "m1"
class Meta2(Meta1):
m2 = "m2"
class Base:
b = "b"
Base = six.add_metaclass(Meta1)(Base)
class X(Base):
x = "x"
X = six.add_metaclass(Meta2)(X)
assert type(X) is Meta2
assert issubclass(X, Base)
assert type(Base) is Meta1
assert "__dict__" not in vars(X)
instance = X()
instance.attr = "test"
assert vars(instance) == {"attr": "test"}
assert instance.b == Base.b
assert instance.x == X.x
# Test a class with slots.
class MySlots(object):
__slots__ = ["a", "b"]
MySlots = six.add_metaclass(Meta1)(MySlots)
assert MySlots.__slots__ == ["a", "b"]
instance = MySlots()
instance.a = "foo"
py.test.raises(AttributeError, setattr, instance, "c", "baz")
# Test a class with string for slots.
class MyStringSlots(object):
__slots__ = "ab"
MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
assert MyStringSlots.__slots__ == "ab"
instance = MyStringSlots()
instance.ab = "foo"
py.test.raises(AttributeError, setattr, instance, "a", "baz")
py.test.raises(AttributeError, setattr, instance, "b", "baz")
class MySlotsWeakref(object):
__slots__ = "__weakref__",
MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
assert type(MySlotsWeakref) is Meta
@py.test.mark.skipif("sys.version_info[:2] < (2, 7) or sys.version_info[:2] in ((3, 0), (3, 1))")
def test_assertCountEqual():
class TestAssertCountEqual(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertCountEqual(self, (1, 2), [3, 4, 5])
six.assertCountEqual(self, (1, 2), [2, 1])
TestAssertCountEqual('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRegex():
class TestAssertRegex(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertRegex(self, 'test', r'^a')
six.assertRegex(self, 'test', r'^t')
TestAssertRegex('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRaisesRegex():
class TestAssertRaisesRegex(unittest.TestCase):
def test(self):
with six.assertRaisesRegex(self, AssertionError, '^Foo'):
raise AssertionError('Foo')
with self.assertRaises(AssertionError):
with six.assertRaisesRegex(self, AssertionError, r'^Foo'):
raise AssertionError('Bar')
TestAssertRaisesRegex('test').test()
def test_python_2_unicode_compatible():
@six.python_2_unicode_compatible
class MyTest(object):
def __str__(self):
return six.u('hello')
def __bytes__(self):
return six.b('hello')
my_test = MyTest()
if six.PY2:
assert str(my_test) == six.b("hello")
assert unicode(my_test) == six.u("hello")
elif six.PY3:
assert bytes(my_test) == six.b("hello")
assert str(my_test) == six.u("hello")
assert getattr(six.moves.builtins, 'bytes', str)(my_test) == six.b("hello")
| apache-2.0 | -6,456,332,918,742,145,000 | 27.295872 | 97 | 0.588717 | false |
nmayorov/scipy | scipy/optimize/_lsq/common.py | 21 | 20616 | """Functions used by least-squares algorithms."""
from math import copysign
import numpy as np
from numpy.linalg import norm
from scipy.linalg import cho_factor, cho_solve, LinAlgError
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator, aslinearoperator
EPS = np.finfo(float).eps
# Functions related to a trust-region problem.
def intersect_trust_region(x, s, Delta):
"""Find the intersection of a line with the boundary of a trust region.
This function solves the quadratic equation with respect to t
||(x + s*t)||**2 = Delta**2.
Returns
-------
t_neg, t_pos : tuple of float
Negative and positive roots.
Raises
------
ValueError
If `s` is zero or `x` is not within the trust region.
"""
a = np.dot(s, s)
if a == 0:
raise ValueError("`s` is zero.")
b = np.dot(x, s)
c = np.dot(x, x) - Delta**2
if c > 0:
raise ValueError("`x` is not within the trust region.")
d = np.sqrt(b*b - a*c) # Root from one fourth of the discriminant.
# Computations below avoid loss of significance, see "Numerical Recipes".
q = -(b + copysign(d, b))
t1 = q / a
t2 = c / q
if t1 < t2:
return t1, t2
else:
return t2, t1
def solve_lsq_trust_region(n, m, uf, s, V, Delta, initial_alpha=None,
rtol=0.01, max_iter=10):
"""Solve a trust-region problem arising in least-squares minimization.
This function implements a method described by J. J. More [1]_ and used
in MINPACK, but it relies on a single SVD of Jacobian instead of series
of Cholesky decompositions. Before running this function, compute:
``U, s, VT = svd(J, full_matrices=False)``.
Parameters
----------
n : int
Number of variables.
m : int
Number of residuals.
uf : ndarray
Computed as U.T.dot(f).
s : ndarray
Singular values of J.
V : ndarray
Transpose of VT.
Delta : float
Radius of a trust region.
initial_alpha : float, optional
Initial guess for alpha, which might be available from a previous
iteration. If None, determined automatically.
rtol : float, optional
Stopping tolerance for the root-finding procedure. Namely, the
solution ``p`` will satisfy ``abs(norm(p) - Delta) < rtol * Delta``.
max_iter : int, optional
Maximum allowed number of iterations for the root-finding procedure.
Returns
-------
p : ndarray, shape (n,)
Found solution of a trust-region problem.
alpha : float
Positive value such that (J.T*J + alpha*I)*p = -J.T*f.
Sometimes called Levenberg-Marquardt parameter.
n_iter : int
Number of iterations made by root-finding procedure. Zero means
that Gauss-Newton step was selected as the solution.
References
----------
.. [1] More, J. J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes
in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
"""
def phi_and_derivative(alpha, suf, s, Delta):
"""Function of which to find zero.
It is defined as "norm of regularized (by alpha) least-squares
solution minus `Delta`". Refer to [1]_.
"""
denom = s**2 + alpha
p_norm = norm(suf / denom)
phi = p_norm - Delta
phi_prime = -np.sum(suf ** 2 / denom**3) / p_norm
return phi, phi_prime
suf = s * uf
# Check if J has full rank and try Gauss-Newton step.
if m >= n:
threshold = EPS * m * s[0]
full_rank = s[-1] > threshold
else:
full_rank = False
if full_rank:
p = -V.dot(uf / s)
if norm(p) <= Delta:
return p, 0.0, 0
alpha_upper = norm(suf) / Delta
if full_rank:
phi, phi_prime = phi_and_derivative(0.0, suf, s, Delta)
alpha_lower = -phi / phi_prime
else:
alpha_lower = 0.0
if initial_alpha is None or not full_rank and initial_alpha == 0:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
else:
alpha = initial_alpha
for it in range(max_iter):
if alpha < alpha_lower or alpha > alpha_upper:
alpha = max(0.001 * alpha_upper, (alpha_lower * alpha_upper)**0.5)
phi, phi_prime = phi_and_derivative(alpha, suf, s, Delta)
if phi < 0:
alpha_upper = alpha
ratio = phi / phi_prime
alpha_lower = max(alpha_lower, alpha - ratio)
alpha -= (phi + Delta) * ratio / Delta
if np.abs(phi) < rtol * Delta:
break
p = -V.dot(suf / (s**2 + alpha))
# Make the norm of p equal to Delta, p is changed only slightly during
# this. It is done to prevent p lie outside the trust region (which can
# cause problems later).
p *= Delta / norm(p)
return p, alpha, it + 1
def solve_trust_region_2d(B, g, Delta):
"""Solve a general trust-region problem in 2 dimensions.
The problem is reformulated as a 4th order algebraic equation,
the solution of which is found by numpy.roots.
Parameters
----------
B : ndarray, shape (2, 2)
Symmetric matrix, defines a quadratic term of the function.
g : ndarray, shape (2,)
Defines a linear term of the function.
Delta : float
Radius of a trust region.
Returns
-------
p : ndarray, shape (2,)
Found solution.
newton_step : bool
Whether the returned solution is the Newton step which lies within
the trust region.
"""
try:
R, lower = cho_factor(B)
p = -cho_solve((R, lower), g)
if np.dot(p, p) <= Delta**2:
return p, True
except LinAlgError:
pass
a = B[0, 0] * Delta**2
b = B[0, 1] * Delta**2
c = B[1, 1] * Delta**2
d = g[0] * Delta
f = g[1] * Delta
coeffs = np.array(
[-b + d, 2 * (a - c + f), 6 * b, 2 * (-a + c + f), -b - d])
t = np.roots(coeffs) # Can handle leading zeros.
t = np.real(t[np.isreal(t)])
p = Delta * np.vstack((2 * t / (1 + t**2), (1 - t**2) / (1 + t**2)))
value = 0.5 * np.sum(p * B.dot(p), axis=0) + np.dot(g, p)
i = np.argmin(value)
p = p[:, i]
return p, False
def update_tr_radius(Delta, actual_reduction, predicted_reduction,
step_norm, bound_hit):
"""Update the radius of a trust region based on the cost reduction.
Returns
-------
Delta : float
New radius.
ratio : float
Ratio between actual and predicted reductions.
"""
if predicted_reduction > 0:
ratio = actual_reduction / predicted_reduction
elif predicted_reduction == actual_reduction == 0:
ratio = 1
else:
ratio = 0
if ratio < 0.25:
Delta = 0.25 * step_norm
elif ratio > 0.75 and bound_hit:
Delta *= 2.0
return Delta, ratio
# Construction and minimization of quadratic functions.
def build_quadratic_1d(J, g, s, diag=None, s0=None):
"""Parameterize a multivariate quadratic function along a line.
The resulting univariate quadratic function is given as follows:
::
f(t) = 0.5 * (s0 + s*t).T * (J.T*J + diag) * (s0 + s*t) +
g.T * (s0 + s*t)
Parameters
----------
J : ndarray, sparse matrix or LinearOperator shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (n,)
Direction vector of a line.
diag : None or ndarray with shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
s0 : None or ndarray with shape (n,), optional
Initial point. If None, assumed to be 0.
Returns
-------
a : float
Coefficient for t**2.
b : float
Coefficient for t.
c : float
Free term. Returned only if `s0` is provided.
"""
v = J.dot(s)
a = np.dot(v, v)
if diag is not None:
a += np.dot(s * diag, s)
a *= 0.5
b = np.dot(g, s)
if s0 is not None:
u = J.dot(s0)
b += np.dot(u, v)
c = 0.5 * np.dot(u, u) + np.dot(g, s0)
if diag is not None:
b += np.dot(s0 * diag, s)
c += 0.5 * np.dot(s0 * diag, s0)
return a, b, c
else:
return a, b
def minimize_quadratic_1d(a, b, lb, ub, c=0):
"""Minimize a 1-D quadratic function subject to bounds.
The free term `c` is 0 by default. Bounds must be finite.
Returns
-------
t : float
Minimum point.
y : float
Minimum value.
"""
t = [lb, ub]
if a != 0:
extremum = -0.5 * b / a
if lb < extremum < ub:
t.append(extremum)
t = np.asarray(t)
y = t * (a * t + b) + c
min_index = np.argmin(y)
return t[min_index], y[min_index]
def evaluate_quadratic(J, g, s, diag=None):
"""Compute values of a quadratic function arising in least squares.
The function is 0.5 * s.T * (J.T * J + diag) * s + g.T * s.
Parameters
----------
J : ndarray, sparse matrix or LinearOperator, shape (m, n)
Jacobian matrix, affects the quadratic term.
g : ndarray, shape (n,)
Gradient, defines the linear term.
s : ndarray, shape (k, n) or (n,)
Array containing steps as rows.
diag : ndarray, shape (n,), optional
Addition diagonal part, affects the quadratic term.
If None, assumed to be 0.
Returns
-------
values : ndarray with shape (k,) or float
Values of the function. If `s` was 2-D, then ndarray is
returned, otherwise, float is returned.
"""
if s.ndim == 1:
Js = J.dot(s)
q = np.dot(Js, Js)
if diag is not None:
q += np.dot(s * diag, s)
else:
Js = J.dot(s.T)
q = np.sum(Js**2, axis=0)
if diag is not None:
q += np.sum(diag * s**2, axis=1)
l = np.dot(s, g)
return 0.5 * q + l
# Utility functions to work with bound constraints.
def in_bounds(x, lb, ub):
"""Check if a point lies within bounds."""
return np.all((x >= lb) & (x <= ub))
def step_size_to_bound(x, s, lb, ub):
"""Compute a min_step size required to reach a bound.
The function computes a positive scalar t, such that x + s * t is on
the bound.
Returns
-------
step : float
Computed step. Non-negative value.
hits : ndarray of int with shape of x
Each element indicates whether a corresponding variable reaches the
bound:
* 0 - the bound was not hit.
* -1 - the lower bound was hit.
* 1 - the upper bound was hit.
"""
non_zero = np.nonzero(s)
s_non_zero = s[non_zero]
steps = np.empty_like(x)
steps.fill(np.inf)
with np.errstate(over='ignore'):
steps[non_zero] = np.maximum((lb - x)[non_zero] / s_non_zero,
(ub - x)[non_zero] / s_non_zero)
min_step = np.min(steps)
return min_step, np.equal(steps, min_step) * np.sign(s).astype(int)
def find_active_constraints(x, lb, ub, rtol=1e-10):
"""Determine which constraints are active in a given point.
The threshold is computed using `rtol` and the absolute value of the
closest bound.
Returns
-------
active : ndarray of int with shape of x
Each component shows whether the corresponding constraint is active:
* 0 - a constraint is not active.
* -1 - a lower bound is active.
* 1 - a upper bound is active.
"""
active = np.zeros_like(x, dtype=int)
if rtol == 0:
active[x <= lb] = -1
active[x >= ub] = 1
return active
lower_dist = x - lb
upper_dist = ub - x
lower_threshold = rtol * np.maximum(1, np.abs(lb))
upper_threshold = rtol * np.maximum(1, np.abs(ub))
lower_active = (np.isfinite(lb) &
(lower_dist <= np.minimum(upper_dist, lower_threshold)))
active[lower_active] = -1
upper_active = (np.isfinite(ub) &
(upper_dist <= np.minimum(lower_dist, upper_threshold)))
active[upper_active] = 1
return active
def make_strictly_feasible(x, lb, ub, rstep=1e-10):
"""Shift a point to the interior of a feasible region.
Each element of the returned vector is at least at a relative distance
`rstep` from the closest bound. If ``rstep=0`` then `np.nextafter` is used.
"""
x_new = x.copy()
active = find_active_constraints(x, lb, ub, rstep)
lower_mask = np.equal(active, -1)
upper_mask = np.equal(active, 1)
if rstep == 0:
x_new[lower_mask] = np.nextafter(lb[lower_mask], ub[lower_mask])
x_new[upper_mask] = np.nextafter(ub[upper_mask], lb[upper_mask])
else:
x_new[lower_mask] = (lb[lower_mask] +
rstep * np.maximum(1, np.abs(lb[lower_mask])))
x_new[upper_mask] = (ub[upper_mask] -
rstep * np.maximum(1, np.abs(ub[upper_mask])))
tight_bounds = (x_new < lb) | (x_new > ub)
x_new[tight_bounds] = 0.5 * (lb[tight_bounds] + ub[tight_bounds])
return x_new
def CL_scaling_vector(x, g, lb, ub):
"""Compute Coleman-Li scaling vector and its derivatives.
Components of a vector v are defined as follows:
::
| ub[i] - x[i], if g[i] < 0 and ub[i] < np.inf
v[i] = | x[i] - lb[i], if g[i] > 0 and lb[i] > -np.inf
| 1, otherwise
According to this definition v[i] >= 0 for all i. It differs from the
definition in paper [1]_ (eq. (2.2)), where the absolute value of v is
used. Both definitions are equivalent down the line.
Derivatives of v with respect to x take value 1, -1 or 0 depending on a
case.
Returns
-------
v : ndarray with shape of x
Scaling vector.
dv : ndarray with shape of x
Derivatives of v[i] with respect to x[i], diagonal elements of v's
Jacobian.
References
----------
.. [1] M.A. Branch, T.F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
"""
v = np.ones_like(x)
dv = np.zeros_like(x)
mask = (g < 0) & np.isfinite(ub)
v[mask] = ub[mask] - x[mask]
dv[mask] = -1
mask = (g > 0) & np.isfinite(lb)
v[mask] = x[mask] - lb[mask]
dv[mask] = 1
return v, dv
def reflective_transformation(y, lb, ub):
"""Compute reflective transformation and its gradient."""
if in_bounds(y, lb, ub):
return y, np.ones_like(y)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
x = y.copy()
g_negative = np.zeros_like(y, dtype=bool)
mask = lb_finite & ~ub_finite
x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
g_negative[mask] = y[mask] < lb[mask]
mask = ~lb_finite & ub_finite
x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
g_negative[mask] = y[mask] > ub[mask]
mask = lb_finite & ub_finite
d = ub - lb
t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
g_negative[mask] = t > d[mask]
g = np.ones_like(y)
g[g_negative] = -1
return x, g
# Functions to display algorithm's progress.
def print_header_nonlinear():
print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}{5:^15}"
.format("Iteration", "Total nfev", "Cost", "Cost reduction",
"Step norm", "Optimality"))
def print_iteration_nonlinear(iteration, nfev, cost, cost_reduction,
step_norm, optimality):
if cost_reduction is None:
cost_reduction = " " * 15
else:
cost_reduction = "{0:^15.2e}".format(cost_reduction)
if step_norm is None:
step_norm = " " * 15
else:
step_norm = "{0:^15.2e}".format(step_norm)
print("{0:^15}{1:^15}{2:^15.4e}{3}{4}{5:^15.2e}"
.format(iteration, nfev, cost, cost_reduction,
step_norm, optimality))
def print_header_linear():
print("{0:^15}{1:^15}{2:^15}{3:^15}{4:^15}"
.format("Iteration", "Cost", "Cost reduction", "Step norm",
"Optimality"))
def print_iteration_linear(iteration, cost, cost_reduction, step_norm,
optimality):
if cost_reduction is None:
cost_reduction = " " * 15
else:
cost_reduction = "{0:^15.2e}".format(cost_reduction)
if step_norm is None:
step_norm = " " * 15
else:
step_norm = "{0:^15.2e}".format(step_norm)
print("{0:^15}{1:^15.4e}{2}{3}{4:^15.2e}".format(
iteration, cost, cost_reduction, step_norm, optimality))
# Simple helper functions.
def compute_grad(J, f):
"""Compute gradient of the least-squares cost function."""
if isinstance(J, LinearOperator):
return J.rmatvec(f)
else:
return J.T.dot(f)
def compute_jac_scale(J, scale_inv_old=None):
"""Compute variables scale based on the Jacobian matrix."""
if issparse(J):
scale_inv = np.asarray(J.power(2).sum(axis=0)).ravel()**0.5
else:
scale_inv = np.sum(J**2, axis=0)**0.5
if scale_inv_old is None:
scale_inv[scale_inv == 0] = 1
else:
scale_inv = np.maximum(scale_inv, scale_inv_old)
return 1 / scale_inv, scale_inv
def left_multiplied_operator(J, d):
"""Return diag(d) J as LinearOperator."""
J = aslinearoperator(J)
def matvec(x):
return d * J.matvec(x)
def matmat(X):
return d[:, np.newaxis] * J.matmat(X)
def rmatvec(x):
return J.rmatvec(x.ravel() * d)
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
rmatvec=rmatvec)
def right_multiplied_operator(J, d):
"""Return J diag(d) as LinearOperator."""
J = aslinearoperator(J)
def matvec(x):
return J.matvec(np.ravel(x) * d)
def matmat(X):
return J.matmat(X * d[:, np.newaxis])
def rmatvec(x):
return d * J.rmatvec(x)
return LinearOperator(J.shape, matvec=matvec, matmat=matmat,
rmatvec=rmatvec)
def regularized_lsq_operator(J, diag):
"""Return a matrix arising in regularized least squares as LinearOperator.
The matrix is
[ J ]
[ D ]
where D is diagonal matrix with elements from `diag`.
"""
J = aslinearoperator(J)
m, n = J.shape
def matvec(x):
return np.hstack((J.matvec(x), diag * x))
def rmatvec(x):
x1 = x[:m]
x2 = x[m:]
return J.rmatvec(x1) + diag * x2
return LinearOperator((m + n, n), matvec=matvec, rmatvec=rmatvec)
def right_multiply(J, d, copy=True):
"""Compute J diag(d).
If `copy` is False, `J` is modified in place (unless being LinearOperator).
"""
if copy and not isinstance(J, LinearOperator):
J = J.copy()
if issparse(J):
J.data *= d.take(J.indices, mode='clip') # scikit-learn recipe.
elif isinstance(J, LinearOperator):
J = right_multiplied_operator(J, d)
else:
J *= d
return J
def left_multiply(J, d, copy=True):
"""Compute diag(d) J.
If `copy` is False, `J` is modified in place (unless being LinearOperator).
"""
if copy and not isinstance(J, LinearOperator):
J = J.copy()
if issparse(J):
J.data *= np.repeat(d, np.diff(J.indptr)) # scikit-learn recipe.
elif isinstance(J, LinearOperator):
J = left_multiplied_operator(J, d)
else:
J *= d[:, np.newaxis]
return J
def check_termination(dF, F, dx_norm, x_norm, ratio, ftol, xtol):
"""Check termination condition for nonlinear least squares."""
ftol_satisfied = dF < ftol * F and ratio > 0.25
xtol_satisfied = dx_norm < xtol * (xtol + x_norm)
if ftol_satisfied and xtol_satisfied:
return 4
elif ftol_satisfied:
return 2
elif xtol_satisfied:
return 3
else:
return None
def scale_for_robust_loss_function(J, f, rho):
"""Scale Jacobian and residuals for a robust loss function.
Arrays are modified in place.
"""
J_scale = rho[1] + 2 * rho[2] * f**2
J_scale[J_scale < EPS] = EPS
J_scale **= 0.5
f *= rho[1] / J_scale
return left_multiply(J, J_scale, copy=False), f
| bsd-3-clause | 2,907,033,500,794,492,400 | 27.087193 | 79 | 0.57014 | false |
shaftoe/home-assistant | homeassistant/components/cover/lutron_caseta.py | 5 | 1902 | """
Support for Lutron Caseta SerenaRollerShade.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.lutron_caseta/
"""
import logging
from homeassistant.components.cover import (
CoverDevice, SUPPORT_OPEN, SUPPORT_CLOSE)
from homeassistant.components.lutron_caseta import (
LUTRON_CASETA_SMARTBRIDGE, LutronCasetaDevice)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['lutron_caseta']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Lutron Caseta Serena shades as a cover device."""
devs = []
bridge = hass.data[LUTRON_CASETA_SMARTBRIDGE]
cover_devices = bridge.get_devices_by_types(["SerenaRollerShade"])
for cover_device in cover_devices:
dev = LutronCasetaCover(cover_device, bridge)
devs.append(dev)
add_devices(devs, True)
class LutronCasetaCover(LutronCasetaDevice, CoverDevice):
"""Representation of a Lutron Serena shade."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._state["current_state"] < 1
def close_cover(self):
"""Close the cover."""
self._smartbridge.set_value(self._device_id, 0)
def open_cover(self):
"""Open the cover."""
self._smartbridge.set_value(self._device_id, 100)
def set_cover_position(self, position, **kwargs):
"""Move the roller shutter to a specific position."""
self._smartbridge.set_value(self._device_id, position)
def update(self):
"""Call when forcing a refresh of the device."""
self._state = self._smartbridge.get_device_by_id(self._device_id)
_LOGGER.debug(self._state)
| apache-2.0 | -5,067,780,650,745,974,000 | 29.677419 | 74 | 0.679811 | false |
bobhale/climate | havana-moonsong.py | 2 | 3345 | ############################################################################
# A sample program read a data file, generate a midi file
# and write to disk.
############################################################################
#Import the library
from midiutil.MidiFile3 import MIDIFile
from random import randint
import csv
def winddirection_to_values(argument):
switcher = {
"N": 19,
"E": 23,
"S": 26,
"W": 27
}
return switcher.get(argument, 0)
def moonphase_to_values(argument):
switcher = {
"1": 31,
# "1.5": 45,
"2": 38,
# "2.5": 50,
"3": 43,
# "3.5": 50,
"4": 38,
# "4.5":45
}
return switcher.get(argument, 0)
def randompitch():
pitcher = randint(0,3)
return pitcher
# constant values
channel = 0
channel2 = 1
channel3 = 2
track1 = 0
track2 = 1
track3 = 2
time = 0
beats = 400
# indexes to elements of data row
windDirection = 7
windSpeed = 6
precipitation = 1
moonphase = 8
highTempAdjustment = 30
lowTempAdjustment = 30
# Create the MIDIFile Object with 3 tracks plus names of tracks
MyMIDI = MIDIFile(3)
MyMIDI.addTrackName(track1,time,"Temperature MusicHI")
time = time +1
MyMIDI.addTrackName(track2,time,"Temperature MusicLOW")
time = time +1
MyMIDI.addTrackName(track3,time,"Temperature MusicPrecip")
time = time +1
MyMIDI.addTempo(track1,time, beats)
time = time +1
MyMIDI.addTempo(track2,time, beats)
time = time +1
MyMIDI.addTempo(track3,time, beats)
# set voice (sound) to be played on tracks
# we used General Midi sounds ( see General Midi docs )
time = time +1
MyMIDI.addProgramChange(track1,0, time, 47) # voice 1 = 86 fretless bass
#time = time +1
MyMIDI.addProgramChange(track2,1, time, 112) # voice 2 = 53
time = time +1
MyMIDI.addProgramChange(track3,2, time, 77) # cymbal = 119
time = time +1
# open and read each line ( data object ) in file
f = open("lunarprecipitation.txt")
for row in csv.reader(f):
# calculate pitch value from temperatures
#pitch1 = 20 + winddirection_to_values(row[windDirection])
pitch1 = moonphase_to_values(row[moonphase])
#pitch2Tmp = float(row[windSpeed])
# pitch2 = int(pitch2Tmp) + lowTempAdjustment
duration = 1.5
durationlong = 2.5
volume = 80
# add initial tracks
# Add a note. addNote expects the following information:
if row[moonphase] != "0":
MyMIDI.addNote(track1,channel,pitch1,time,durationlong,volume)
time = time +1
# MyMIDI.addNote(track2,channel2,pitch2,time,duration,volume)
# time = time + 1
if row[precipitation] != "0.00": #got some rain today
pitch3 = 64 + randompitch()
pitch4 = pitch3 + 4
MyMIDI.addNote(track3,channel3,pitch3,time,1,100)
MyMIDI.addNote(track3,channel3,pitch4,time,1,100)
# pitch3 = pitch3 + 1
# MyMIDI.addNote(track3,channel3,pitch3,time,3,100)
#print(row[1])
time = time + 4
# change track 3 to ocean sound for the finale !!
#MyMIDI.addProgramChange(track3,2, time, 122) # 122 = Seashore
#time = time + 1
#MyMIDI.addNote(track3,channel3,40,time,45,100) # let it ring....
# And write it to disk.
binfile = open("2015MSPMoonPrecip_HAVANA.mid", 'wb')
MyMIDI.writeFile(binfile)
binfile.close()
| gpl-3.0 | -137,747,492,866,787,630 | 23.962687 | 77 | 0.618236 | false |
falau/pogom | pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/GetBuddyWalked_pb2.py | 6 | 1920 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/GetBuddyWalked.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/GetBuddyWalked.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Networking/Requests/Messages/GetBuddyWalked.proto\x12\'POGOProtos.Networking.Requests.Messages\"\x17\n\x15GetBuddyWalkedMessageb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETBUDDYWALKEDMESSAGE = _descriptor.Descriptor(
name='GetBuddyWalkedMessage',
full_name='POGOProtos.Networking.Requests.Messages.GetBuddyWalkedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=105,
serialized_end=128,
)
DESCRIPTOR.message_types_by_name['GetBuddyWalkedMessage'] = _GETBUDDYWALKEDMESSAGE
GetBuddyWalkedMessage = _reflection.GeneratedProtocolMessageType('GetBuddyWalkedMessage', (_message.Message,), dict(
DESCRIPTOR = _GETBUDDYWALKEDMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.GetBuddyWalked_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.GetBuddyWalkedMessage)
))
_sym_db.RegisterMessage(GetBuddyWalkedMessage)
# @@protoc_insertion_point(module_scope)
| mit | -4,002,278,688,682,819,600 | 29.967742 | 174 | 0.778646 | false |
zetaops/ulakbus | ulakbus/services/personel/hitap/hizmet_tazminat_ekle.py | 1 | 1180 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
"""HITAP Tazminat Ekle
Hitap'a personelin Tazminat bilgilerinin eklenmesini yapar.
"""
from ulakbus.services.personel.hitap.hitap_service import ZatoHitapService
class HizmetTazminatEkle(ZatoHitapService):
"""
HITAP Ekleme servisinden kalıtılmış Hizmet Tazminat Bilgi Ekleme servisi
"""
HAS_CHANNEL = True
service_dict = {
'service_name': 'HizmetTazminatInsert',
'fields': {
'gorev': 'gorev',
'kadrosuzluk': 'kadrosuzluk',
'makam': 'makam',
'tckn': 'tckn',
'temsil': 'temsil',
'unvanKod': 'unvan_kod',
'tazminatTarihi': 'tazminat_tarihi',
'tazminatBitisTarihi': 'tazminat_bitis_tarihi',
'kurumOnayTarihi': 'kurum_onay_tarihi'
},
'date_filter': ['tazminat_tarihi', 'tazminat_bitis_tarihi', 'kurum_onay_tarihi'],
'long_to_string': ['kayit_no'],
'required_fields': ['tckn', 'unvanKod', 'tazminatTarihi', 'kurumOnayTarihi']
}
| gpl-3.0 | 3,348,967,054,435,488,300 | 29.153846 | 89 | 0.611395 | false |
lshain-android-source/external-chromium_org-tools-grit | grit/grd_reader.py | 8 | 7246 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Class for reading GRD files into memory, without processing them.
'''
import os.path
import types
import xml.sax
import xml.sax.handler
from grit import exception
from grit import util
from grit.node import base
from grit.node import mapping
from grit.node import misc
class StopParsingException(Exception):
'''An exception used to stop parsing.'''
pass
class GrdContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, stop_after, debug, dir, defines, tags_to_ignore):
# Invariant of data:
# 'root' is the root of the parse tree being created, or None if we haven't
# parsed out any elements.
# 'stack' is the a stack of elements that we push new nodes onto and
# pop from when they finish parsing, or [] if we are not currently parsing.
# 'stack[-1]' is the top of the stack.
self.root = None
self.stack = []
self.stop_after = stop_after
self.debug = debug
self.dir = dir
self.defines = defines
self.tags_to_ignore = tags_to_ignore or set()
self.ignore_depth = 0
def startElement(self, name, attrs):
if self.ignore_depth or name in self.tags_to_ignore:
if self.debug and self.ignore_depth == 0:
print "Ignoring element %s and its children" % name
self.ignore_depth += 1
return
if self.debug:
attr_list = ' '.join('%s="%s"' % kv for kv in attrs.items())
print ("Starting parsing of element %s with attributes %r" %
(name, attr_list or '(none)'))
typeattr = attrs.get('type')
node = mapping.ElementToClass(name, typeattr)()
if self.stack:
self.stack[-1].AddChild(node)
node.StartParsing(name, self.stack[-1])
else:
assert self.root is None
self.root = node
node.StartParsing(name, None)
if self.defines:
node.SetDefines(self.defines)
self.stack.append(node)
for attr, attrval in attrs.items():
node.HandleAttribute(attr, attrval)
def endElement(self, name):
if self.ignore_depth:
self.ignore_depth -= 1
return
if name == 'part':
partnode = self.stack[-1]
partnode.started_inclusion = True
# Add the contents of the sub-grd file as children of the <part> node.
partname = partnode.GetInputPath()
if os.path.dirname(partname):
# TODO(benrg): Remove this limitation. (The problem is that GRIT
# assumes that files referenced from the GRD file are relative to
# a path stored in the root <grit> node.)
raise exception.GotPathExpectedFilenameOnly()
partname = os.path.join(self.dir, partname)
# Exceptions propagate to the handler in grd_reader.Parse().
xml.sax.parse(partname, GrdPartContentHandler(self))
if self.debug:
print "End parsing of element %s" % name
self.stack.pop().EndParsing()
if name == self.stop_after:
raise StopParsingException()
def characters(self, content):
if self.ignore_depth == 0:
if self.stack[-1]:
self.stack[-1].AppendContent(content)
def ignorableWhitespace(self, whitespace):
# TODO(joi) This is not supported by expat. Should use a different XML parser?
pass
class GrdPartContentHandler(xml.sax.handler.ContentHandler):
def __init__(self, parent):
self.parent = parent
self.depth = 0
def startElement(self, name, attrs):
if self.depth:
self.parent.startElement(name, attrs)
else:
if name != 'grit-part':
raise exception.MissingElement("root tag must be <grit-part>")
if attrs:
raise exception.UnexpectedAttribute(
"<grit-part> tag must not have attributes")
self.depth += 1
def endElement(self, name):
self.depth -= 1
if self.depth:
self.parent.endElement(name)
def characters(self, content):
self.parent.characters(content)
def ignorableWhitespace(self, whitespace):
self.parent.ignorableWhitespace(whitespace)
def Parse(filename_or_stream, dir=None, stop_after=None, first_ids_file=None,
debug=False, defines=None, tags_to_ignore=None, target_platform=None):
'''Parses a GRD file into a tree of nodes (from grit.node).
If filename_or_stream is a stream, 'dir' should point to the directory
notionally containing the stream (this feature is only used in unit tests).
If 'stop_after' is provided, the parsing will stop once the first node
with this name has been fully parsed (including all its contents).
If 'debug' is true, lots of information about the parsing events will be
printed out during parsing of the file.
If 'first_ids_file' is non-empty, it is used to override the setting for the
first_ids_file attribute of the <grit> root node. Note that the first_ids_file
parameter should be relative to the cwd, even though the first_ids_file
attribute of the <grit> node is relative to the grd file.
If 'target_platform' is set, this is used to determine the target
platform of builds, instead of using |sys.platform|.
Args:
filename_or_stream: './bla.xml'
dir: None (if filename_or_stream is a filename) or '.'
stop_after: 'inputs'
first_ids_file: 'GRIT_DIR/../gritsettings/resource_ids'
debug: False
defines: dictionary of defines, like {'chromeos': '1'}
target_platform: None or the value that would be returned by sys.platform
on your target platform.
Return:
Subclass of grit.node.base.Node
Throws:
grit.exception.Parsing
'''
if dir is None and isinstance(filename_or_stream, types.StringType):
dir = util.dirname(filename_or_stream)
handler = GrdContentHandler(stop_after=stop_after, debug=debug, dir=dir,
defines=defines, tags_to_ignore=tags_to_ignore)
try:
xml.sax.parse(filename_or_stream, handler)
except StopParsingException:
assert stop_after
pass
except:
if not debug:
print "parse exception: run GRIT with the -x flag to debug .grd problems"
raise
if handler.root.name != 'grit':
raise exception.MissingElement("root tag must be <grit>")
if hasattr(handler.root, 'SetOwnDir'):
# Fix up the base_dir so it is relative to the input file.
assert dir is not None
handler.root.SetOwnDir(dir)
if isinstance(handler.root, misc.GritNode):
if target_platform:
handler.root.SetTargetPlatform(target_platform)
if first_ids_file:
# Make the path to the first_ids_file relative to the grd file,
# unless it begins with GRIT_DIR.
GRIT_DIR_PREFIX = 'GRIT_DIR'
if not (first_ids_file.startswith(GRIT_DIR_PREFIX)
and first_ids_file[len(GRIT_DIR_PREFIX)] in ['/', '\\']):
rel_dir = os.path.relpath(os.getcwd(), dir)
first_ids_file = util.normpath(os.path.join(rel_dir, first_ids_file))
handler.root.attrs['first_ids_file'] = first_ids_file
# Assign first ids to the nodes that don't have them.
handler.root.AssignFirstIds(filename_or_stream, defines)
return handler.root
if __name__ == '__main__':
util.ChangeStdoutEncoding()
print unicode(Parse(sys.argv[1]))
| bsd-2-clause | 895,549,244,715,326,000 | 32.391705 | 84 | 0.678443 | false |
eduNEXT/edx-platform | common/lib/xmodule/xmodule/tests/test_video.py | 4 | 49670 | # pylint: disable=protected-access
"""Test for Video Xmodule functional logic.
These test data read from xml, not from mongo.
We have a ModuleStoreTestCase class defined in
common/lib/xmodule/xmodule/modulestore/tests/django_utils.py. You can
search for usages of this in the cms and lms tests for examples. You use
this so that it will do things like point the modulestore setting to mongo,
flush the contentstore before and after, load the templates, etc.
You can then use the CourseFactory and XModuleItemFactory as defined
in common/lib/xmodule/xmodule/modulestore/tests/factories.py to create
the course, section, subsection, unit, etc.
"""
import datetime
import json
import os
import shutil
import unittest
from tempfile import mkdtemp
from uuid import uuid4
from unittest.mock import ANY, MagicMock, Mock, patch
import pytest
import ddt
import httpretty
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from fs.osfs import OSFS
from lxml import etree
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.tests import get_test_descriptor_system
from xmodule.validation import StudioValidationMessage
from xmodule.video_module import EXPORT_IMPORT_STATIC_DIR, VideoBlock, create_youtube_string
from xmodule.video_module.transcripts_utils import download_youtube_subs, save_subs_to_store, save_to_store
from .test_import import DummySystem
SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
sprechen sie deutsch?
1
00:00:02,720 --> 00:00:05,430
Ja, ich spreche Deutsch
'''
CRO_SRT_FILEDATA = '''
0
00:00:00,270 --> 00:00:02,720
Dobar dan!
1
00:00:02,720 --> 00:00:05,430
Kako ste danas?
'''
YOUTUBE_SUBTITLES = (
"Sample trascript line 1. "
"Sample trascript line 2. "
"Sample trascript line 3."
)
MOCKED_YOUTUBE_TRANSCRIPT_API_RESPONSE = '''
<transcript>
<text start="27.88" dur="3.68">Sample trascript line 1.</text>
<text start="31.76" dur="9.54">Sample trascript line 2.</text>
<text start="44.04" dur="3.1">Sample trascript line 3.</text>
</transcript>
'''
ALL_LANGUAGES = (
["en", "English"],
["eo", "Esperanto"],
["ur", "Urdu"]
)
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
if field_data.get('data', None):
field_data = VideoBlock.parse_video_xml(field_data['data'])
system = get_test_descriptor_system()
course_key = CourseLocator('org', 'course', 'run')
usage_key = course_key.make_usage_key('video', 'SampleProblem')
return system.construct_xblock_from_class(
VideoBlock,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
)
# Because of the way xmodule.video_module.video_module imports edxval.api, we
# must mock the entire module, which requires making mock exception classes.
class _MockValVideoNotFoundError(Exception):
"""Mock ValVideoNotFoundError exception"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class _MockValCannotCreateError(Exception):
"""Mock ValCannotCreateError exception"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
class VideoBlockTest(unittest.TestCase):
"""Logic tests for Video XBlock."""
raw_field_data = {
'data': '<video />'
}
def test_parse_youtube(self):
"""Test parsing old-style Youtube ID strings into a dict."""
youtube_str = '0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg'
output = VideoBlock._parse_youtube(youtube_str)
assert output == {'0.75': 'jNCf2gIqpeE', '1.00': 'ZwkTiUPN0mg', '1.25': 'rsq9auxASqI', '1.50': 'kMyNdzVHHgg'}
def test_parse_youtube_one_video(self):
"""
Ensure that all keys are present and missing speeds map to the
empty string.
"""
youtube_str = '0.75:jNCf2gIqpeE'
output = VideoBlock._parse_youtube(youtube_str)
assert output == {'0.75': 'jNCf2gIqpeE', '1.00': '', '1.25': '', '1.50': ''}
def test_parse_youtube_invalid(self):
"""Ensure that ids that are invalid return an empty dict"""
# invalid id
youtube_str = 'thisisaninvalidid'
output = VideoBlock._parse_youtube(youtube_str)
assert output == {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
# another invalid id
youtube_str = ',::,:,,'
output = VideoBlock._parse_youtube(youtube_str)
assert output == {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
# and another one, partially invalid
youtube_str = '0.75_BAD!!!,1.0:AXdE34_U,1.25:KLHF9K_Y,1.5:VO3SxfeD,'
output = VideoBlock._parse_youtube(youtube_str)
assert output == {'0.75': '', '1.00': 'AXdE34_U', '1.25': 'KLHF9K_Y', '1.50': 'VO3SxfeD'}
def test_parse_youtube_key_format(self):
"""
Make sure that inconsistent speed keys are parsed correctly.
"""
youtube_str = '1.00:p2Q6BrNhdh8'
youtube_str_hack = '1.0:p2Q6BrNhdh8'
assert VideoBlock._parse_youtube(youtube_str) == VideoBlock._parse_youtube(youtube_str_hack)
def test_parse_youtube_empty(self):
"""
Some courses have empty youtube attributes, so we should handle
that well.
"""
assert VideoBlock._parse_youtube('') == {'0.75': '', '1.00': '', '1.25': '', '1.50': ''}
class VideoBlockTestBase(unittest.TestCase):
"""
Base class for tests for VideoBlock
"""
def setUp(self):
super().setUp()
self.descriptor = instantiate_descriptor()
def assertXmlEqual(self, expected, xml):
"""
Assert that the given XML fragments have the same attributes, text, and
(recursively) children
"""
def get_child_tags(elem):
"""Extract the list of tag names for children of elem"""
return [child.tag for child in elem]
for attr in ['tag', 'attrib', 'text', 'tail']:
expected_attr = getattr(expected, attr)
actual_attr = getattr(xml, attr)
assert expected_attr == actual_attr
assert get_child_tags(expected) == get_child_tags(xml)
for left, right in zip(expected, xml):
self.assertXmlEqual(left, right)
class TestCreateYoutubeString(VideoBlockTestBase):
"""
Checks that create_youtube_string correcty extracts information from Video descriptor.
"""
def test_create_youtube_string(self):
"""
Test that Youtube ID strings are correctly created when writing back out to XML.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8"
assert create_youtube_string(self.descriptor) == expected
def test_create_youtube_string_missing(self):
"""
Test that Youtube IDs which aren't explicitly set aren't included in the output string.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
expected = "0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
assert create_youtube_string(self.descriptor) == expected
class TestCreateYouTubeUrl(VideoBlockTestBase):
"""
Tests for helper method `create_youtube_url`.
"""
def test_create_youtube_url_unicode(self):
"""
Test that passing unicode to `create_youtube_url` doesn't throw
an error.
"""
self.descriptor.create_youtube_url("üñîçø∂é")
@ddt.ddt
class VideoBlockImportTestCase(TestCase):
"""
Make sure that VideoBlock can import an old XML-based video correctly.
"""
def assert_attributes_equal(self, video, attrs):
"""
Assert that `video` has the correct attributes. `attrs` is a map of {metadata_field: value}.
"""
for key, value in attrs.items():
assert getattr(video, key) == value
def test_constructor(self):
sample_xml = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="true"
download_video="true"
start_time="00:00:01"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ua" src="ukrainian_translation.srt" />
<transcript language="ge" src="german_translation.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=sample_xml)
self.assert_attributes_equal(descriptor, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'download_video': True,
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg'],
'data': '',
'transcripts': {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
})
def test_from_xml(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="uk" src="ukrainian_translation.srt" />
<transcript language="de" src="german_translation.srt" />
</video>
'''
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {'uk': 'ukrainian_translation.srt', 'de': 'german_translation.srt'},
})
@ddt.data(
('course-v1:test_org+test_course+test_run',
'/asset-v1:test_org+test_course+test_run+type@[email protected]'),
('test_org/test_course/test_run', '/c4x/test_org/test_course/asset/test.png')
)
@ddt.unpack
def test_from_xml_when_handout_is_course_asset(self, course_id_string, expected_handout_link):
"""
Test that if handout link is course_asset then it will contain targeted course_id in handout link.
"""
module_system = DummySystem(load_error_modules=True)
course_id = CourseKey.from_string(course_id_string)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="/asset-v1:test_org_1+test_course_1+test_run_1+type@[email protected]"/>
<transcript language="uk" src="ukrainian_translation.srt" />
<transcript language="de" src="german_translation.srt" />
</video>
'''
id_generator = Mock()
id_generator.target_course_id = course_id
output = VideoBlock.from_xml(xml_data, module_system, id_generator)
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
'handout': expected_handout_link,
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {'uk': 'ukrainian_translation.srt', 'de': 'german_translation.srt'},
})
def test_from_xml_missing_attributes(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
</video>
'''
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_from_xml_missing_download_track(self):
"""
Ensure that attributes have the right values if they aren't
explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,1.25:1EeWXzPdhSA"
show_captions="true">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
'''
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'download_track': True,
'download_video': False,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
'transcripts': {},
})
def test_from_xml_no_attributes(self):
"""
Make sure settings are correct if none are explicitly set in XML.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '<video></video>'
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': '3_yD_cEKoCk',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': '',
'transcripts': {},
})
def test_from_xml_double_quotes(self):
"""
Make sure we can handle the double-quoted string format (which was used for exporting for
a few weeks).
"""
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name=""display_name""
html5_sources="["source_1", "source_2"]"
show_captions="false"
download_video="true"
sub=""html5_subtitles""
track=""http://www.example.com/track""
handout=""http://www.example.com/handout""
download_track="true"
youtube_id_0_75=""OEoXaMPEzf65""
youtube_id_1_25=""OEoXaMPEzf125""
youtube_id_1_5=""OEoXaMPEzf15""
youtube_id_1_0=""OEoXaMPEzf10""
/>
'''
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'OEoXaMPEzf65',
'youtube_id_1_0': 'OEoXaMPEzf10',
'youtube_id_1_25': 'OEoXaMPEzf125',
'youtube_id_1_5': 'OEoXaMPEzf15',
'show_captions': False,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': 'http://www.example.com/track',
'handout': 'http://www.example.com/handout',
'download_track': True,
'download_video': True,
'html5_sources': ["source_1", "source_2"],
'data': ''
})
def test_from_xml_double_quote_concatenated_youtube(self):
module_system = DummySystem(load_error_modules=True)
xml_data = '''
<video display_name="Test Video"
youtube="1.0:"p2Q6BrNhdh8",1.25:"1EeWXzPdhSA"">
</video>
'''
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': '',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': '',
'show_captions': True,
'start_time': datetime.timedelta(seconds=0.0),
'end_time': datetime.timedelta(seconds=0.0),
'track': '',
'handout': None,
'download_track': False,
'download_video': False,
'html5_sources': [],
'data': ''
})
def test_old_video_format(self):
"""
Test backwards compatibility with VideoBlock's XML format.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
source="http://www.example.com/source.mp4"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
output = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(output, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': '',
})
def test_old_video_data(self):
"""
Ensure that Video is able to read VideoBlock's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="00:00:01"
to="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
def test_import_with_float_times(self):
"""
Ensure that Video is able to read VideoBlock's model data.
"""
module_system = DummySystem(load_error_modules=True)
xml_data = """
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
from="1.0"
to="60.0">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
</video>
"""
video = VideoBlock.from_xml(xml_data, module_system, Mock())
self.assert_attributes_equal(video, {
'youtube_id_0_75': 'izygArpw-Qo',
'youtube_id_1_0': 'p2Q6BrNhdh8',
'youtube_id_1_25': '1EeWXzPdhSA',
'youtube_id_1_5': 'rABDYkeK0x8',
'show_captions': False,
'start_time': datetime.timedelta(seconds=1),
'end_time': datetime.timedelta(seconds=60),
'track': 'http://www.example.com/track',
# 'download_track': True,
'html5_sources': ['http://www.example.com/source.mp4'],
'data': ''
})
@patch('xmodule.video_module.video_module.edxval_api')
def test_import_val_data(self, mock_val_api):
"""
Test that `from_xml` works method works as expected.
"""
def mock_val_import(xml, edx_video_id, resource_fs, static_dir, external_transcripts, course_id):
"""Mock edxval.api.import_from_xml"""
assert xml.tag == 'video_asset'
assert dict(list(xml.items())) == {'mock_attr': ''}
assert edx_video_id == 'test_edx_video_id'
assert static_dir == EXPORT_IMPORT_STATIC_DIR
assert resource_fs is not None
assert external_transcripts == {'en': ['subs_3_yD_cEKoCk.srt.sjson']}
assert course_id == 'test_course_id'
return edx_video_id
edx_video_id = 'test_edx_video_id'
mock_val_api.import_from_xml = Mock(wraps=mock_val_import)
module_system = DummySystem(load_error_modules=True)
# Create static directory in import file system and place transcript files inside it.
module_system.resources_fs.makedirs(EXPORT_IMPORT_STATIC_DIR, recreate=True)
# import new edx_video_id
xml_data = """
<video edx_video_id="{edx_video_id}">
<video_asset mock_attr=""/>
</video>
""".format(
edx_video_id=edx_video_id
)
id_generator = Mock()
id_generator.target_course_id = 'test_course_id'
video = VideoBlock.from_xml(xml_data, module_system, id_generator)
self.assert_attributes_equal(video, {'edx_video_id': edx_video_id})
mock_val_api.import_from_xml.assert_called_once_with(
ANY,
edx_video_id,
module_system.resources_fs,
EXPORT_IMPORT_STATIC_DIR,
{'en': ['subs_3_yD_cEKoCk.srt.sjson']},
course_id='test_course_id'
)
@patch('xmodule.video_module.video_module.edxval_api')
def test_import_val_data_invalid(self, mock_val_api):
mock_val_api.ValCannotCreateError = _MockValCannotCreateError
mock_val_api.import_from_xml = Mock(side_effect=mock_val_api.ValCannotCreateError)
module_system = DummySystem(load_error_modules=True)
# Negative duration is invalid
xml_data = """
<video edx_video_id="test_edx_video_id">
<video_asset client_video_id="test_client_video_id" duration="-1"/>
</video>
"""
with pytest.raises(mock_val_api.ValCannotCreateError):
VideoBlock.from_xml(xml_data, module_system, id_generator=Mock())
class VideoExportTestCase(VideoBlockTestBase):
"""
Make sure that VideoBlock can export itself to XML correctly.
"""
def setUp(self):
super().setUp()
self.temp_dir = mkdtemp()
self.file_system = OSFS(self.temp_dir)
self.addCleanup(shutil.rmtree, self.temp_dir)
@patch('xmodule.video_module.video_module.edxval_api')
def test_export_to_xml(self, mock_val_api):
"""
Test that we write the correct XML on export.
"""
edx_video_id = 'test_edx_video_id'
mock_val_api.export_to_xml = Mock(
return_value=dict(
xml=etree.Element('video_asset'),
transcripts={}
)
)
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=1.0)
self.descriptor.end_time = datetime.timedelta(seconds=60)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.handout = 'http://www.example.com/handout'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source1.ogg']
self.descriptor.download_video = True
self.descriptor.transcripts = {'ua': 'ukrainian_translation.srt', 'ge': 'german_translation.srt'}
self.descriptor.edx_video_id = edx_video_id
self.descriptor.runtime.course_id = MagicMock()
xml = self.descriptor.definition_to_xml(self.file_system)
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '''\
<video
url_name="SampleProblem"
start_time="0:00:01"
show_captions="false"
end_time="0:01:00"
download_video="true"
download_track="true"
youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8"
transcripts='{"ge": "german_translation.srt", "ua": "ukrainian_translation.srt"}'
>
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source1.ogg"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<video_asset />
<transcript language="ge" src="german_translation.srt" />
<transcript language="ua" src="ukrainian_translation.srt" />
</video>
'''
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
mock_val_api.export_to_xml.assert_called_once_with(
video_id=edx_video_id,
static_dir=EXPORT_IMPORT_STATIC_DIR,
resource_fs=self.file_system,
course_id=str(self.descriptor.runtime.course_id.for_branch(None)),
)
@patch('xmodule.video_module.video_module.edxval_api')
def test_export_to_xml_val_error(self, mock_val_api):
# Export should succeed without VAL data if video does not exist
mock_val_api.ValVideoNotFoundError = _MockValVideoNotFoundError
mock_val_api.export_to_xml = Mock(side_effect=mock_val_api.ValVideoNotFoundError)
self.descriptor.edx_video_id = 'test_edx_video_id'
self.descriptor.runtime.course_id = MagicMock()
xml = self.descriptor.definition_to_xml(self.file_system)
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '<video url_name="SampleProblem"/>'
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
@patch('xmodule.video_module.video_module.edxval_api', None)
def test_export_to_xml_empty_end_time(self):
"""
Test that we write the correct XML on export.
"""
self.descriptor.youtube_id_0_75 = 'izygArpw-Qo'
self.descriptor.youtube_id_1_0 = 'p2Q6BrNhdh8'
self.descriptor.youtube_id_1_25 = '1EeWXzPdhSA'
self.descriptor.youtube_id_1_5 = 'rABDYkeK0x8'
self.descriptor.show_captions = False
self.descriptor.start_time = datetime.timedelta(seconds=5.0)
self.descriptor.end_time = datetime.timedelta(seconds=0.0)
self.descriptor.track = 'http://www.example.com/track'
self.descriptor.download_track = True
self.descriptor.html5_sources = ['http://www.example.com/source.mp4', 'http://www.example.com/source.ogg']
self.descriptor.download_video = True
xml = self.descriptor.definition_to_xml(self.file_system)
parser = etree.XMLParser(remove_blank_text=True)
xml_string = '''\
<video url_name="SampleProblem" start_time="0:00:05" youtube="0.75:izygArpw-Qo,1.00:p2Q6BrNhdh8,1.25:1EeWXzPdhSA,1.50:rABDYkeK0x8" show_captions="false" download_video="true" download_track="true">
<source src="http://www.example.com/source.mp4"/>
<source src="http://www.example.com/source.ogg"/>
<track src="http://www.example.com/track"/>
</video>
'''
expected = etree.XML(xml_string, parser=parser)
self.assertXmlEqual(expected, xml)
@patch('xmodule.video_module.video_module.edxval_api', None)
def test_export_to_xml_empty_parameters(self):
"""
Test XML export with defaults.
"""
xml = self.descriptor.definition_to_xml(self.file_system)
# Check that download_video field is also set to default (False) in xml for backward compatibility
expected = '<video url_name="SampleProblem"/>\n'
assert expected == etree.tostring(xml, pretty_print=True).decode('utf-8')
@patch('xmodule.video_module.video_module.edxval_api', None)
def test_export_to_xml_with_transcripts_as_none(self):
"""
Test XML export with transcripts being overridden to None.
"""
self.descriptor.transcripts = None
xml = self.descriptor.definition_to_xml(self.file_system)
expected = b'<video url_name="SampleProblem"/>\n'
assert expected == etree.tostring(xml, pretty_print=True)
@patch('xmodule.video_module.video_module.edxval_api', None)
def test_export_to_xml_invalid_characters_in_attributes(self):
"""
Test XML export will *not* raise TypeError by lxml library if contains illegal characters.
The illegal characters in a String field are removed from the string instead.
"""
self.descriptor.display_name = 'Display\x1eName'
xml = self.descriptor.definition_to_xml(self.file_system)
assert xml.get('display_name') == 'DisplayName'
@patch('xmodule.video_module.video_module.edxval_api', None)
def test_export_to_xml_unicode_characters(self):
"""
Test XML export handles the unicode characters.
"""
self.descriptor.display_name = '这是文'
xml = self.descriptor.definition_to_xml(self.file_system)
assert xml.get('display_name') == '这是文'
@ddt.ddt
@patch.object(settings, 'FEATURES', create=True, new={
'FALLBACK_TO_ENGLISH_TRANSCRIPTS': False,
})
class VideoBlockStudentViewDataTestCase(unittest.TestCase):
"""
Make sure that VideoBlock returns the expected student_view_data.
"""
VIDEO_URL_1 = 'http://www.example.com/source_low.mp4'
VIDEO_URL_2 = 'http://www.example.com/source_med.mp4'
VIDEO_URL_3 = 'http://www.example.com/source_high.mp4'
@ddt.data(
# Ensure no extra data is returned if video module configured only for web display.
(
{'only_on_web': True},
{'only_on_web': True},
),
# Ensure that YouTube URLs are included in `encoded_videos`, but not `all_sources`.
(
{
'only_on_web': False,
'youtube_id_1_0': 'abc',
'html5_sources': [VIDEO_URL_2, VIDEO_URL_3],
},
{
'only_on_web': False,
'duration': None,
'transcripts': {},
'encoded_videos': {
'fallback': {'url': VIDEO_URL_2, 'file_size': 0},
'youtube': {'url': 'https://www.youtube.com/watch?v=abc', 'file_size': 0},
},
'all_sources': [VIDEO_URL_2, VIDEO_URL_3],
},
),
)
@ddt.unpack
def test_student_view_data(self, field_data, expected_student_view_data):
"""
Ensure that student_view_data returns the expected results for video modules.
"""
descriptor = instantiate_descriptor(**field_data)
descriptor.runtime.course_id = MagicMock()
student_view_data = descriptor.student_view_data()
assert student_view_data == expected_student_view_data
@patch('xmodule.video_module.video_module.HLSPlaybackEnabledFlag.feature_enabled', Mock(return_value=True))
@patch('xmodule.video_module.transcripts_utils.get_available_transcript_languages', Mock(return_value=['es']))
@patch('edxval.api.get_video_info_for_course_and_profiles', Mock(return_value={}))
@patch('xmodule.video_module.transcripts_utils.get_video_transcript_content')
@patch('edxval.api.get_video_info')
def test_student_view_data_with_hls_flag(self, mock_get_video_info, mock_get_video_transcript_content):
mock_get_video_info.return_value = {
'url': '/edxval/video/example',
'edx_video_id': 'example_id',
'duration': 111.0,
'client_video_id': 'The example video',
'encoded_videos': [
{
'url': 'http://www.meowmix.com',
'file_size': 25556,
'bitrate': 9600,
'profile': 'hls'
}
]
}
mock_get_video_transcript_content.return_value = {
'content': json.dumps({
"start": [10],
"end": [100],
"text": ["Hi, welcome to Edx."],
}),
'file_name': 'edx.sjson'
}
descriptor = instantiate_descriptor(edx_video_id='example_id', only_on_web=False)
descriptor.runtime.course_id = MagicMock()
descriptor.runtime.handler_url = MagicMock()
student_view_data = descriptor.student_view_data()
expected_video_data = {'hls': {'url': 'http://www.meowmix.com', 'file_size': 25556}}
self.assertDictEqual(student_view_data.get('encoded_videos'), expected_video_data)
@ddt.ddt
@patch.object(settings, 'YOUTUBE', create=True, new={
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to get YouTube metadata
'METADATA_URL': 'www.googleapis.com/youtube/v3/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
})
@patch.object(settings, 'CONTENTSTORE', create=True, new={
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'edx.devstack.mongo' if 'BOK_CHOY_HOSTNAME' in os.environ else 'localhost',
'db': 'test_xcontent_%s' % uuid4().hex,
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
})
@patch.object(settings, 'FEATURES', create=True, new={
# The default value in {lms,cms}/envs/common.py and xmodule/tests/test_video.py should be consistent.
'FALLBACK_TO_ENGLISH_TRANSCRIPTS': True,
})
class VideoBlockIndexingTestCase(unittest.TestCase):
"""
Make sure that VideoBlock can format data for indexing as expected.
"""
def test_video_with_no_subs_index_dictionary(self):
"""
Test index dictionary of a video module without subtitles.
"""
xml_data = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
</video>
'''
descriptor = instantiate_descriptor(data=xml_data)
assert descriptor.index_dictionary() == {'content': {'display_name': 'Test Video'}, 'content_type': 'Video'}
@httpretty.activate
def test_video_with_youtube_subs_index_dictionary(self):
"""
Test index dictionary of a video module with YouTube subtitles.
"""
xml_data_sub = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
sub="OEoXaMPEzfM"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
</video>
'''
yt_subs_id = 'OEoXaMPEzfM'
url = f'http://video.google.com/timedtext?lang=en&v={yt_subs_id}'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
body=MOCKED_YOUTUBE_TRANSCRIPT_API_RESPONSE,
content_type='application/xml'
)
descriptor = instantiate_descriptor(data=xml_data_sub)
subs = download_youtube_subs(yt_subs_id, descriptor, settings)
save_subs_to_store(json.loads(subs), yt_subs_id, descriptor)
assert descriptor.index_dictionary() ==\
{'content': {'display_name': 'Test Video', 'transcript_en': YOUTUBE_SUBTITLES}, 'content_type': 'Video'}
@httpretty.activate
def test_video_with_subs_and_transcript_index_dictionary(self):
"""
Test index dictionary of a video module with
YouTube subtitles and German transcript uploaded by a user.
"""
xml_data_sub_transcript = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
sub="OEoXaMPEzfM"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
</video>
'''
yt_subs_id = 'OEoXaMPEzfM'
url = f'http://video.google.com/timedtext?lang=en&v={yt_subs_id}'
httpretty.register_uri(
method=httpretty.GET,
uri=url,
body=MOCKED_YOUTUBE_TRANSCRIPT_API_RESPONSE,
content_type='application/xml'
)
descriptor = instantiate_descriptor(data=xml_data_sub_transcript)
subs = download_youtube_subs(yt_subs_id, descriptor, settings)
save_subs_to_store(json.loads(subs), yt_subs_id, descriptor)
save_to_store(SRT_FILEDATA, "subs_grmtran1.srt", 'text/srt', descriptor.location)
assert descriptor.index_dictionary() ==\
{'content': {'display_name': 'Test Video', 'transcript_en': YOUTUBE_SUBTITLES,
'transcript_ge': 'sprechen sie deutsch? Ja, ich spreche Deutsch'},
'content_type': 'Video'}
def test_video_with_multiple_transcripts_index_dictionary(self):
"""
Test index dictionary of a video module with
two transcripts uploaded by a user.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
<transcript language="hr" src="subs_croatian1.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_transcripts)
save_to_store(SRT_FILEDATA, "subs_grmtran1.srt", 'text/srt', descriptor.location)
save_to_store(CRO_SRT_FILEDATA, "subs_croatian1.srt", 'text/srt', descriptor.location)
assert descriptor.index_dictionary() ==\
{'content': {'display_name': 'Test Video',
'transcript_ge': 'sprechen sie deutsch? Ja, ich spreche Deutsch',
'transcript_hr': 'Dobar dan! Kako ste danas?'}, 'content_type': 'Video'}
def test_video_with_multiple_transcripts_translation_retrieval(self):
"""
Test translation retrieval of a video module with
multiple transcripts uploaded by a user.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ge" src="subs_grmtran1.srt" />
<transcript language="hr" src="subs_croatian1.srt" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_transcripts)
translations = descriptor.available_translations(descriptor.get_transcripts_info())
assert sorted(translations) == sorted(['hr', 'ge'])
def test_video_with_no_transcripts_translation_retrieval(self):
"""
Test translation retrieval of a video module with
no transcripts uploaded by a user- ie, that retrieval
does not throw an exception.
"""
descriptor = instantiate_descriptor(data=None)
translations_with_fallback = descriptor.available_translations(descriptor.get_transcripts_info())
assert translations_with_fallback == ['en']
with patch.dict(settings.FEATURES, FALLBACK_TO_ENGLISH_TRANSCRIPTS=False):
# Some organizations don't have English transcripts for all videos
# This feature makes it configurable
translations_no_fallback = descriptor.available_translations(descriptor.get_transcripts_info())
assert translations_no_fallback == []
@override_settings(ALL_LANGUAGES=ALL_LANGUAGES)
def test_video_with_language_do_not_have_transcripts_translation(self):
"""
Test translation retrieval of a video module with
a language having no transcripts uploaded by a user.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
<transcript language="ur" src="" />
</video>
'''
descriptor = instantiate_descriptor(data=xml_data_transcripts)
translations = descriptor.available_translations(descriptor.get_transcripts_info(), verify_assets=False)
assert translations != ['ur']
def assert_validation_message(self, validation, expected_msg):
"""
Asserts that the validation message has all expected content.
Args:
validation (StudioValidation): A validation object.
expected_msg (string): An expected validation message.
"""
assert not validation.empty
# Validation contains some warning/message
assert validation.summary
assert StudioValidationMessage.WARNING == validation.summary.type
assert expected_msg in validation.summary.text.replace('Urdu, Esperanto', 'Esperanto, Urdu')
@ddt.data(
(
'<transcript language="ur" src="" />',
'There is no transcript file associated with the Urdu language.'
),
(
'<transcript language="eo" src="" /><transcript language="ur" src="" />',
'There are no transcript files associated with the Esperanto, Urdu languages.'
),
)
@ddt.unpack
@override_settings(ALL_LANGUAGES=ALL_LANGUAGES)
def test_no_transcript_validation_message(self, xml_transcripts, expected_validation_msg):
"""
Test the validation message when no associated transcript file uploaded.
"""
xml_data_transcripts = '''
<video display_name="Test Video"
youtube="1.0:p2Q6BrNhdh8,0.75:izygArpw-Qo,1.25:1EeWXzPdhSA,1.5:rABDYkeK0x8"
show_captions="false"
download_track="false"
start_time="00:00:01"
download_video="false"
end_time="00:01:00">
<source src="http://www.example.com/source.mp4"/>
<track src="http://www.example.com/track"/>
<handout src="http://www.example.com/handout"/>
{xml_transcripts}
</video>
'''.format(xml_transcripts=xml_transcripts)
descriptor = instantiate_descriptor(data=xml_data_transcripts)
validation = descriptor.validate()
self.assert_validation_message(validation, expected_validation_msg)
def test_video_transcript_none(self):
"""
Test video when transcripts is None.
"""
descriptor = instantiate_descriptor(data=None)
descriptor.transcripts = None
response = descriptor.get_transcripts_info()
expected = {'transcripts': {}, 'sub': ''}
assert expected == response
| agpl-3.0 | -4,999,782,988,000,027,000 | 40.828138 | 206 | 0.583464 | false |
kyukyukyu/dash | dash/public/forms.py | 1 | 1062 | from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired
from dash.user.models import User
class LoginForm(Form):
username = TextField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append('Unknown username')
return False
if not self.user.check_password(self.password.data):
self.password.errors.append('Invalid password')
return False
if not self.user.active:
self.username.errors.append('User not activated')
return False
return True
| bsd-3-clause | -6,477,607,392,999,295,000 | 31.181818 | 77 | 0.648776 | false |
badbytes/pymeg | meg/sensorsindexed.py | 1 | 1574 | """Return sensors and headshape positions"""
from pdf2py import pdf
from numpy import zeros, array, size, append, reshape
from pylab import *
import matplotlib.axes3d as p3
import pylab as p
class locations:
def __init__(self, datapdf, channelinstance):
pdfinstance=pdf.read(datapdf)
chlpos=array([]);chupos=array([])
chldir=array([]);chudir=array([])
for i in channelinstance.channelindexcfg:
chlpos=append(chlpos,pdfinstance.cfg.channel_data[i].device_data.loop_data[0].position)
chupos=append(chupos,pdfinstance.cfg.channel_data[i].device_data.loop_data[1].position)
chldir=append(chldir,pdfinstance.cfg.channel_data[i].device_data.loop_data[0].direction)
chudir=append(chudir,pdfinstance.cfg.channel_data[i].device_data.loop_data[1].direction)
#reshape arrays
chlpos=chlpos.reshape(size(chlpos)/3,3)
chupos=chupos.reshape(size(chupos)/3,3)
chldir=chldir.reshape(size(chldir)/3,3)
chudir=chudir.reshape(size(chudir)/3,3)
self.chlpos=chlpos
self.chupos=chupos
self.chldir=chldir
self.chudir=chudir
def plot3d(self):
x=self.chlpos[:,0]
y=self.chlpos[:,1]
z=self.chlpos[:,2]
fig=p.figure()
ax = p3.Axes3D(fig)
ax.scatter(x,y,z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(-.13,.13)
ax.set_ylim(-.13,.13)
ax.set_zlim(-.13,.13)
p.show()
| gpl-3.0 | -5,065,316,598,131,933,000 | 32.489362 | 100 | 0.605464 | false |
markjin1990/solr | dev-tools/scripts/addVersion.py | 2 | 8177 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.append(os.path.dirname(__file__))
import scriptutil
import argparse
import io
import re
import subprocess
def update_changes(filename, new_version):
print(' adding new section to %s...' % filename, end='', flush=True)
matcher = re.compile(r'\d+\.\d+\.\d+\s+===')
def edit(buffer, match, line):
if new_version.dot in line:
return None
match = new_version.previous_dot_matcher.search(line)
if match is not None:
buffer.append(line.replace(match.group(0), new_version.dot))
buffer.append('(No Changes)\n\n')
buffer.append(line)
return match is not None
changed = scriptutil.update_file(filename, matcher, edit)
print('done' if changed else 'uptodate')
def add_constant(new_version, deprecate):
filename = 'lucene/core/src/java/org/apache/lucene/util/Version.java'
print(' adding constant %s...' % new_version.constant, end='', flush=True)
constant_prefix = 'public static final Version LUCENE_'
matcher = re.compile(constant_prefix)
prev_matcher = new_version.make_previous_matcher(prefix=constant_prefix, sep='_')
def ensure_deprecated(buffer):
last = buffer[-1]
if last.strip() != '@Deprecated':
spaces = ' ' * (len(last) - len(last.lstrip()) - 1)
buffer[-1] = spaces + (' * @deprecated (%s) Use latest\n' % new_version)
buffer.append(spaces + ' */\n')
buffer.append(spaces + '@Deprecated\n')
def buffer_constant(buffer, line):
spaces = ' ' * (len(line) - len(line.lstrip()))
buffer.append('\n' + spaces + '/**\n')
buffer.append(spaces + ' * Match settings and bugs in Lucene\'s %s release.\n' % new_version)
if deprecate:
buffer.append(spaces + ' * @deprecated Use latest\n')
buffer.append(spaces + ' */\n')
if deprecate:
buffer.append(spaces + '@Deprecated\n')
buffer.append(spaces + 'public static final Version %s = new Version(%d, %d, %d);\n' %
(new_version.constant, new_version.major, new_version.minor, new_version.bugfix))
class Edit(object):
found = -1
def __call__(self, buffer, match, line):
if new_version.constant in line:
return None # constant already exists
# outter match is just to find lines declaring version constants
match = prev_matcher.search(line)
if match is not None:
ensure_deprecated(buffer) # old version should be deprecated
self.found = len(buffer) + 1 # extra 1 for buffering current line below
elif self.found != -1:
# we didn't match, but we previously had a match, so insert new version here
# first find where to insert (first empty line before current constant)
c = []
buffer_constant(c, line)
tmp = buffer[self.found:]
buffer[self.found:] = c
buffer.extend(tmp)
buffer.append(line)
return True
buffer.append(line)
return False
changed = scriptutil.update_file(filename, matcher, Edit())
print('done' if changed else 'uptodate')
version_prop_re = re.compile('version\.base=(.*)')
def update_build_version(new_version):
print(' changing version.base...', end='', flush=True)
filename = 'lucene/version.properties'
def edit(buffer, match, line):
if new_version.dot in line:
return None
buffer.append('version.base=' + new_version.dot + '\n')
return True
changed = scriptutil.update_file(filename, version_prop_re, edit)
print('done' if changed else 'uptodate')
def update_latest_constant(new_version):
print(' changing Version.LATEST to %s...' % new_version.constant, end='', flush=True)
filename = 'lucene/core/src/java/org/apache/lucene/util/Version.java'
matcher = re.compile('public static final Version LATEST')
def edit(buffer, match, line):
if new_version.constant in line:
return None
buffer.append(line.rpartition('=')[0] + ('= %s;\n' % new_version.constant))
return True
changed = scriptutil.update_file(filename, matcher, edit)
print('done' if changed else 'uptodate')
def update_example_solrconfigs(new_version):
print(' updating example solrconfig.xml files')
matcher = re.compile('<luceneMatchVersion>')
for root,dirs,files in os.walk('solr/example'):
for f in files:
if f == 'solrconfig.xml':
update_solrconfig(os.path.join(root, f), matcher, new_version)
def update_solrconfig(filename, matcher, new_version):
print(' %s...' % filename, end='', flush=True)
def edit(buffer, match, line):
if new_version.dot in line:
return None
match = new_version.previous_dot_matcher.search(line)
if match is None:
return False
buffer.append(line.replace(match.group(1), new_version.dot))
return True
changed = scriptutil.update_file(filename, matcher, edit)
print('done' if changed else 'uptodate')
def check_lucene_version_tests():
print(' checking lucene version tests...', end='', flush=True)
base_dir = os.getcwd()
os.chdir('lucene/core')
run('ant test -Dtestcase=TestVersion')
os.chdir(base_dir)
print('ok')
def check_solr_version_tests():
print(' checking solr version tests...', end='', flush=True)
base_dir = os.getcwd()
os.chdir('solr/core')
run('ant test -Dtestcase=TestLuceneMatchVersion')
os.chdir(base_dir)
print('ok')
def read_config():
parser = argparse.ArgumentParser(description='Add a new version')
parser.add_argument('version', type=Version.parse)
parser.add_argument('-c', '--changeid', type=int, help='SVN ChangeId for downstream version change to merge')
parser.add_argument('-r', '--downstream-repo', help='Path to downstream checkout for given changeid')
c = parser.parse_args()
c.branch_type = scriptutil.find_branch_type()
c.matching_branch = c.version.is_bugfix_release() and c.branch_type == 'release' or \
c.version.is_minor_release() and c.branch_type == 'stable' or \
c.branch_type == 'major'
if bool(c.changeid) != bool(c.downstream_repo):
parser.error('--changeid and --upstream-repo must be used together')
if not c.changeid and not c.matching_branch:
parser.error('Must use --changeid for forward porting bugfix release version to other branches')
if c.changeid and c.matching_branch:
parser.error('Cannot use --changeid on branch that new version will originate on')
if c.changeid and c.version.is_major_release():
parser.error('Cannot use --changeid for major release')
return c
def main():
c = read_config()
if c.changeid:
merge_change(c.changeid, c.downstream_repo)
print('\nAdding new version %s' % c.version)
update_changes('lucene/CHANGES.txt', c.version)
update_changes('solr/CHANGES.txt', c.version)
add_constant(c.version, not c.matching_branch)
if not c.changeid:
print('\nUpdating latest version')
update_build_version(c.version)
update_latest_constant(c.version)
update_example_solrconfigs(c.version)
if c.version.is_major_release():
print('\nTODO: ')
print(' - Move backcompat oldIndexes to unsupportedIndexes in TestBackwardsCompatibility')
print(' - Update IndexFormatTooOldException throw cases')
else:
print('\nTesting changes')
check_lucene_version_tests()
check_solr_version_tests()
print()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nReceived Ctrl-C, exiting early')
| apache-2.0 | -3,070,284,062,893,433,300 | 36.856481 | 111 | 0.678978 | false |
Azure/azure-sdk-for-python | sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2019_09_01/models/_models_py3.py | 1 | 140334 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._data_box_management_client_enums import *
class AccountCredentialDetails(msrest.serialization.Model):
"""Credential details of the account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar account_name: Name of the account.
:vartype account_name: str
:ivar data_destination_type: Data Destination Type. Possible values include: "StorageAccount",
"ManagedDisk".
:vartype data_destination_type: str or ~azure.mgmt.databox.models.DataDestinationType
:ivar account_connection_string: Connection string of the account endpoint to use the account
as a storage endpoint on the device.
:vartype account_connection_string: str
:ivar share_credential_details: Per share level unencrypted access credentials.
:vartype share_credential_details: list[~azure.mgmt.databox.models.ShareCredentialDetails]
"""
_validation = {
'account_name': {'readonly': True},
'data_destination_type': {'readonly': True},
'account_connection_string': {'readonly': True},
'share_credential_details': {'readonly': True},
}
_attribute_map = {
'account_name': {'key': 'accountName', 'type': 'str'},
'data_destination_type': {'key': 'dataDestinationType', 'type': 'str'},
'account_connection_string': {'key': 'accountConnectionString', 'type': 'str'},
'share_credential_details': {'key': 'shareCredentialDetails', 'type': '[ShareCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(AccountCredentialDetails, self).__init__(**kwargs)
self.account_name = None
self.data_destination_type = None
self.account_connection_string = None
self.share_credential_details = None
class AddressValidationOutput(msrest.serialization.Model):
"""Output of the address validation api.
Variables are only populated by the server, and will be ignored when sending a request.
:param validation_type: Identifies the type of validation response.Constant filled by server.
Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar validation_status: The address validation status. Possible values include: "Valid",
"Invalid", "Ambiguous".
:vartype validation_status: str or ~azure.mgmt.databox.models.AddressValidationStatus
:ivar alternate_addresses: List of alternate addresses.
:vartype alternate_addresses: list[~azure.mgmt.databox.models.ShippingAddress]
"""
_validation = {
'error': {'readonly': True},
'validation_status': {'readonly': True},
'alternate_addresses': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'properties.validationType', 'type': 'str'},
'error': {'key': 'properties.error', 'type': 'Error'},
'validation_status': {'key': 'properties.validationStatus', 'type': 'str'},
'alternate_addresses': {'key': 'properties.alternateAddresses', 'type': '[ShippingAddress]'},
}
def __init__(
self,
**kwargs
):
super(AddressValidationOutput, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
self.error = None
self.validation_status = None
self.alternate_addresses = None
class ValidationInputResponse(msrest.serialization.Model):
"""Minimum properties that should be present in each individual validation response.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AddressValidationProperties, CreateOrderLimitForSubscriptionValidationResponseProperties, DataDestinationDetailsValidationResponseProperties, PreferencesValidationResponseProperties, SkuAvailabilityValidationResponseProperties, SubscriptionIsAllowedToCreateJobValidationResponseProperties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
}
_subtype_map = {
'validation_type': {'ValidateAddress': 'AddressValidationProperties', 'ValidateCreateOrderLimit': 'CreateOrderLimitForSubscriptionValidationResponseProperties', 'ValidateDataDestinationDetails': 'DataDestinationDetailsValidationResponseProperties', 'ValidatePreferences': 'PreferencesValidationResponseProperties', 'ValidateSkuAvailability': 'SkuAvailabilityValidationResponseProperties', 'ValidateSubscriptionIsAllowedToCreateJob': 'SubscriptionIsAllowedToCreateJobValidationResponseProperties'}
}
def __init__(
self,
**kwargs
):
super(ValidationInputResponse, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
self.error = None
class AddressValidationProperties(ValidationInputResponse):
"""The address validation output.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar validation_status: The address validation status. Possible values include: "Valid",
"Invalid", "Ambiguous".
:vartype validation_status: str or ~azure.mgmt.databox.models.AddressValidationStatus
:ivar alternate_addresses: List of alternate addresses.
:vartype alternate_addresses: list[~azure.mgmt.databox.models.ShippingAddress]
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'validation_status': {'readonly': True},
'alternate_addresses': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'validation_status': {'key': 'validationStatus', 'type': 'str'},
'alternate_addresses': {'key': 'alternateAddresses', 'type': '[ShippingAddress]'},
}
def __init__(
self,
**kwargs
):
super(AddressValidationProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateAddress' # type: str
self.validation_status = None
self.alternate_addresses = None
class ApplianceNetworkConfiguration(msrest.serialization.Model):
"""The Network Adapter configuration of a DataBox.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the network.
:vartype name: str
:ivar mac_address: Mac Address.
:vartype mac_address: str
"""
_validation = {
'name': {'readonly': True},
'mac_address': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ApplianceNetworkConfiguration, self).__init__(**kwargs)
self.name = None
self.mac_address = None
class ArmBaseObject(msrest.serialization.Model):
"""Base class for all objects under resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the object.
:vartype name: str
:ivar id: Id of the object.
:vartype id: str
:ivar type: Type of the object.
:vartype type: str
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ArmBaseObject, self).__init__(**kwargs)
self.name = None
self.id = None
self.type = None
class AvailableSkuRequest(msrest.serialization.Model):
"""The filters for showing the available skus.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar transfer_type: Required. Type of the transfer. Default value: "ImportToAzure".
:vartype transfer_type: str
:param country: Required. ISO country code. Country for hardware shipment. For codes check:
https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements.
:type country: str
:param location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type location: str
:param sku_names: Sku Names to filter for available skus.
:type sku_names: list[str or ~azure.mgmt.databox.models.SkuName]
"""
_validation = {
'transfer_type': {'required': True, 'constant': True},
'country': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'transfer_type': {'key': 'transferType', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'sku_names': {'key': 'skuNames', 'type': '[str]'},
}
transfer_type = "ImportToAzure"
def __init__(
self,
*,
country: str,
location: str,
sku_names: Optional[List[Union[str, "SkuName"]]] = None,
**kwargs
):
super(AvailableSkuRequest, self).__init__(**kwargs)
self.country = country
self.location = location
self.sku_names = sku_names
class AvailableSkusResult(msrest.serialization.Model):
"""The available skus operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of available skus.
:vartype value: list[~azure.mgmt.databox.models.SkuInformation]
:param next_link: Link for the next set of skus.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SkuInformation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
super(AvailableSkusResult, self).__init__(**kwargs)
self.value = None
self.next_link = next_link
class CancellationReason(msrest.serialization.Model):
"""Reason for cancellation.
All required parameters must be populated in order to send to Azure.
:param reason: Required. Reason for cancellation.
:type reason: str
"""
_validation = {
'reason': {'required': True},
}
_attribute_map = {
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
*,
reason: str,
**kwargs
):
super(CancellationReason, self).__init__(**kwargs)
self.reason = reason
class CloudError(msrest.serialization.Model):
"""The error information object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code string.
:vartype code: str
:ivar message: Descriptive error information.
:vartype message: str
:param target: Error target.
:type target: str
:param details: More detailed error information.
:type details: list[~azure.mgmt.databox.models.CloudError]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudError]'},
}
def __init__(
self,
*,
target: Optional[str] = None,
details: Optional[List["CloudError"]] = None,
**kwargs
):
super(CloudError, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = target
self.details = details
class ContactDetails(msrest.serialization.Model):
"""Contact Details.
All required parameters must be populated in order to send to Azure.
:param contact_name: Required. Contact name of the person.
:type contact_name: str
:param phone: Required. Phone number of the contact person.
:type phone: str
:param phone_extension: Phone extension number of the contact person.
:type phone_extension: str
:param mobile: Mobile number of the contact person.
:type mobile: str
:param email_list: Required. List of Email-ids to be notified about job progress.
:type email_list: list[str]
:param notification_preference: Notification preference for a job stage.
:type notification_preference: list[~azure.mgmt.databox.models.NotificationPreference]
"""
_validation = {
'contact_name': {'required': True},
'phone': {'required': True},
'email_list': {'required': True},
}
_attribute_map = {
'contact_name': {'key': 'contactName', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
'phone_extension': {'key': 'phoneExtension', 'type': 'str'},
'mobile': {'key': 'mobile', 'type': 'str'},
'email_list': {'key': 'emailList', 'type': '[str]'},
'notification_preference': {'key': 'notificationPreference', 'type': '[NotificationPreference]'},
}
def __init__(
self,
*,
contact_name: str,
phone: str,
email_list: List[str],
phone_extension: Optional[str] = None,
mobile: Optional[str] = None,
notification_preference: Optional[List["NotificationPreference"]] = None,
**kwargs
):
super(ContactDetails, self).__init__(**kwargs)
self.contact_name = contact_name
self.phone = phone
self.phone_extension = phone_extension
self.mobile = mobile
self.email_list = email_list
self.notification_preference = notification_preference
class CopyLogDetails(msrest.serialization.Model):
"""Details for log generated during copy.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxAccountCopyLogDetails, DataBoxDiskCopyLogDetails, DataBoxHeavyAccountCopyLogDetails.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
"""
_validation = {
'copy_log_details_type': {'required': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
}
_subtype_map = {
'copy_log_details_type': {'DataBox': 'DataBoxAccountCopyLogDetails', 'DataBoxDisk': 'DataBoxDiskCopyLogDetails', 'DataBoxHeavy': 'DataBoxHeavyAccountCopyLogDetails'}
}
def __init__(
self,
**kwargs
):
super(CopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = None # type: Optional[str]
class CopyProgress(msrest.serialization.Model):
"""Copy progress.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar storage_account_name: Name of the storage account where the data needs to be uploaded.
:vartype storage_account_name: str
:ivar data_destination_type: Data Destination Type. Possible values include: "StorageAccount",
"ManagedDisk".
:vartype data_destination_type: str or ~azure.mgmt.databox.models.DataDestinationType
:ivar account_id: Id of the account where the data needs to be uploaded.
:vartype account_id: str
:ivar bytes_sent_to_cloud: Amount of data uploaded by the job as of now.
:vartype bytes_sent_to_cloud: long
:ivar total_bytes_to_process: Total amount of data to be processed by the job.
:vartype total_bytes_to_process: long
:ivar files_processed: Number of files processed by the job as of now.
:vartype files_processed: long
:ivar total_files_to_process: Total number of files to be processed by the job.
:vartype total_files_to_process: long
:ivar invalid_files_processed: Number of files not adhering to azure naming conventions which
were processed by automatic renaming.
:vartype invalid_files_processed: long
:ivar invalid_file_bytes_uploaded: Total amount of data not adhering to azure naming
conventions which were processed by automatic renaming.
:vartype invalid_file_bytes_uploaded: long
:ivar renamed_container_count: Number of folders not adhering to azure naming conventions which
were processed by automatic renaming.
:vartype renamed_container_count: long
:ivar files_errored_out: Number of files which could not be copied.
:vartype files_errored_out: long
"""
_validation = {
'storage_account_name': {'readonly': True},
'data_destination_type': {'readonly': True},
'account_id': {'readonly': True},
'bytes_sent_to_cloud': {'readonly': True},
'total_bytes_to_process': {'readonly': True},
'files_processed': {'readonly': True},
'total_files_to_process': {'readonly': True},
'invalid_files_processed': {'readonly': True},
'invalid_file_bytes_uploaded': {'readonly': True},
'renamed_container_count': {'readonly': True},
'files_errored_out': {'readonly': True},
}
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'data_destination_type': {'key': 'dataDestinationType', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'bytes_sent_to_cloud': {'key': 'bytesSentToCloud', 'type': 'long'},
'total_bytes_to_process': {'key': 'totalBytesToProcess', 'type': 'long'},
'files_processed': {'key': 'filesProcessed', 'type': 'long'},
'total_files_to_process': {'key': 'totalFilesToProcess', 'type': 'long'},
'invalid_files_processed': {'key': 'invalidFilesProcessed', 'type': 'long'},
'invalid_file_bytes_uploaded': {'key': 'invalidFileBytesUploaded', 'type': 'long'},
'renamed_container_count': {'key': 'renamedContainerCount', 'type': 'long'},
'files_errored_out': {'key': 'filesErroredOut', 'type': 'long'},
}
def __init__(
self,
**kwargs
):
super(CopyProgress, self).__init__(**kwargs)
self.storage_account_name = None
self.data_destination_type = None
self.account_id = None
self.bytes_sent_to_cloud = None
self.total_bytes_to_process = None
self.files_processed = None
self.total_files_to_process = None
self.invalid_files_processed = None
self.invalid_file_bytes_uploaded = None
self.renamed_container_count = None
self.files_errored_out = None
class ValidationRequest(msrest.serialization.Model):
"""Input request for all pre job creation validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CreateJobValidations.
All required parameters must be populated in order to send to Azure.
:param individual_request_details: Required. List of request details contain validationType and
its request as key and value respectively.
:type individual_request_details: list[~azure.mgmt.databox.models.ValidationInputRequest]
:param validation_category: Required. Identify the nature of validation.Constant filled by
server.
:type validation_category: str
"""
_validation = {
'individual_request_details': {'required': True},
'validation_category': {'required': True},
}
_attribute_map = {
'individual_request_details': {'key': 'individualRequestDetails', 'type': '[ValidationInputRequest]'},
'validation_category': {'key': 'validationCategory', 'type': 'str'},
}
_subtype_map = {
'validation_category': {'JobCreationValidation': 'CreateJobValidations'}
}
def __init__(
self,
*,
individual_request_details: List["ValidationInputRequest"],
**kwargs
):
super(ValidationRequest, self).__init__(**kwargs)
self.individual_request_details = individual_request_details
self.validation_category = None # type: Optional[str]
class CreateJobValidations(ValidationRequest):
"""It does all pre-job creation validations.
All required parameters must be populated in order to send to Azure.
:param individual_request_details: Required. List of request details contain validationType and
its request as key and value respectively.
:type individual_request_details: list[~azure.mgmt.databox.models.ValidationInputRequest]
:param validation_category: Required. Identify the nature of validation.Constant filled by
server.
:type validation_category: str
"""
_validation = {
'individual_request_details': {'required': True},
'validation_category': {'required': True},
}
_attribute_map = {
'individual_request_details': {'key': 'individualRequestDetails', 'type': '[ValidationInputRequest]'},
'validation_category': {'key': 'validationCategory', 'type': 'str'},
}
def __init__(
self,
*,
individual_request_details: List["ValidationInputRequest"],
**kwargs
):
super(CreateJobValidations, self).__init__(individual_request_details=individual_request_details, **kwargs)
self.validation_category = 'JobCreationValidation' # type: str
class ValidationInputRequest(msrest.serialization.Model):
"""Minimum fields that must be present in any type of validation request.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ValidateAddress, CreateOrderLimitForSubscriptionValidationRequest, DataDestinationDetailsValidationRequest, PreferencesValidationRequest, SkuAvailabilityValidationRequest, SubscriptionIsAllowedToCreateJobValidationRequest.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
"""
_validation = {
'validation_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
}
_subtype_map = {
'validation_type': {'ValidateAddress': 'ValidateAddress', 'ValidateCreateOrderLimit': 'CreateOrderLimitForSubscriptionValidationRequest', 'ValidateDataDestinationDetails': 'DataDestinationDetailsValidationRequest', 'ValidatePreferences': 'PreferencesValidationRequest', 'ValidateSkuAvailability': 'SkuAvailabilityValidationRequest', 'ValidateSubscriptionIsAllowedToCreateJob': 'SubscriptionIsAllowedToCreateJobValidationRequest'}
}
def __init__(
self,
**kwargs
):
super(ValidationInputRequest, self).__init__(**kwargs)
self.validation_type = None # type: Optional[str]
class CreateOrderLimitForSubscriptionValidationRequest(ValidationInputRequest):
"""Request to validate create order limit for current subscription.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
}
def __init__(
self,
*,
device_type: Union[str, "SkuName"],
**kwargs
):
super(CreateOrderLimitForSubscriptionValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateCreateOrderLimit' # type: str
self.device_type = device_type
class CreateOrderLimitForSubscriptionValidationResponseProperties(ValidationInputResponse):
"""Properties of create order limit for subscription validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar status: Create order limit validation status. Possible values include: "Valid",
"Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CreateOrderLimitForSubscriptionValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateCreateOrderLimit' # type: str
self.status = None
class DataBoxAccountCopyLogDetails(CopyLogDetails):
"""Copy log details for a storage account of a DataBox job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar account_name: Destination account name.
:vartype account_name: str
:ivar copy_log_link: Link for copy logs.
:vartype copy_log_link: str
"""
_validation = {
'copy_log_details_type': {'required': True},
'account_name': {'readonly': True},
'copy_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'copy_log_link': {'key': 'copyLogLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxAccountCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBox' # type: str
self.account_name = None
self.copy_log_link = None
class DataBoxDiskCopyLogDetails(CopyLogDetails):
"""Copy Log Details for a disk.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar disk_serial_number: Disk Serial Number.
:vartype disk_serial_number: str
:ivar error_log_link: Link for copy error logs.
:vartype error_log_link: str
:ivar verbose_log_link: Link for copy verbose logs.
:vartype verbose_log_link: str
"""
_validation = {
'copy_log_details_type': {'required': True},
'disk_serial_number': {'readonly': True},
'error_log_link': {'readonly': True},
'verbose_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'disk_serial_number': {'key': 'diskSerialNumber', 'type': 'str'},
'error_log_link': {'key': 'errorLogLink', 'type': 'str'},
'verbose_log_link': {'key': 'verboseLogLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBoxDisk' # type: str
self.disk_serial_number = None
self.error_log_link = None
self.verbose_log_link = None
class DataBoxDiskCopyProgress(msrest.serialization.Model):
"""DataBox Disk Copy Progress.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar serial_number: The serial number of the disk.
:vartype serial_number: str
:ivar bytes_copied: Bytes copied during the copy of disk.
:vartype bytes_copied: long
:ivar percent_complete: Indicates the percentage completed for the copy of the disk.
:vartype percent_complete: int
:ivar status: The Status of the copy. Possible values include: "NotStarted", "InProgress",
"Completed", "CompletedWithErrors", "Failed", "NotReturned", "HardwareError",
"DeviceFormatted", "DeviceMetadataModified", "StorageAccountNotAccessible", "UnsupportedData".
:vartype status: str or ~azure.mgmt.databox.models.CopyStatus
"""
_validation = {
'serial_number': {'readonly': True},
'bytes_copied': {'readonly': True},
'percent_complete': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'bytes_copied': {'key': 'bytesCopied', 'type': 'long'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxDiskCopyProgress, self).__init__(**kwargs)
self.serial_number = None
self.bytes_copied = None
self.percent_complete = None
self.status = None
class JobDetails(msrest.serialization.Model):
"""Job details.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxJobDetails, DataBoxDiskJobDetails, DataBoxHeavyJobDetails.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param destination_account_details: Required. Destination account details.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
:ivar error_details: Error details for failure. This is optional.
:vartype error_details: list[~azure.mgmt.databox.models.JobErrorDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'shipping_address': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'destination_account_details': {'required': True},
'error_details': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
}
_attribute_map = {
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'destination_account_details': {'key': 'destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
'error_details': {'key': 'errorDetails', 'type': '[JobErrorDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
}
_subtype_map = {
'job_details_type': {'DataBox': 'DataBoxJobDetails', 'DataBoxDisk': 'DataBoxDiskJobDetails', 'DataBoxHeavy': 'DataBoxHeavyJobDetails'}
}
def __init__(
self,
*,
contact_details: "ContactDetails",
shipping_address: "ShippingAddress",
destination_account_details: List["DestinationAccountDetails"],
expected_data_size_in_terabytes: Optional[int] = None,
preferences: Optional["Preferences"] = None,
**kwargs
):
super(JobDetails, self).__init__(**kwargs)
self.expected_data_size_in_terabytes = expected_data_size_in_terabytes
self.job_stages = None
self.contact_details = contact_details
self.shipping_address = shipping_address
self.delivery_package = None
self.return_package = None
self.destination_account_details = destination_account_details
self.error_details = None
self.job_details_type = None # type: Optional[str]
self.preferences = preferences
self.copy_log_details = None
self.reverse_shipment_label_sas_key = None
self.chain_of_custody_sas_key = None
class DataBoxDiskJobDetails(JobDetails):
"""DataBox Disk Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param destination_account_details: Required. Destination account details.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
:ivar error_details: Error details for failure. This is optional.
:vartype error_details: list[~azure.mgmt.databox.models.JobErrorDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:param preferred_disks: User preference on what size disks are needed for the job. The map is
from the disk size in TB to the count. Eg. {2,5} means 5 disks of 2 TB size. Key is string but
will be checked against an int.
:type preferred_disks: dict[str, int]
:ivar copy_progress: Copy progress per disk.
:vartype copy_progress: list[~azure.mgmt.databox.models.DataBoxDiskCopyProgress]
:ivar disks_and_size_details: Contains the map of disk serial number to the disk size being
used for the job. Is returned only after the disks are shipped to the customer.
:vartype disks_and_size_details: dict[str, int]
:param passkey: User entered passkey for DataBox Disk job.
:type passkey: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'shipping_address': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'destination_account_details': {'required': True},
'error_details': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'copy_progress': {'readonly': True},
'disks_and_size_details': {'readonly': True},
}
_attribute_map = {
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'destination_account_details': {'key': 'destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
'error_details': {'key': 'errorDetails', 'type': '[JobErrorDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'preferred_disks': {'key': 'preferredDisks', 'type': '{int}'},
'copy_progress': {'key': 'copyProgress', 'type': '[DataBoxDiskCopyProgress]'},
'disks_and_size_details': {'key': 'disksAndSizeDetails', 'type': '{int}'},
'passkey': {'key': 'passkey', 'type': 'str'},
}
def __init__(
self,
*,
contact_details: "ContactDetails",
shipping_address: "ShippingAddress",
destination_account_details: List["DestinationAccountDetails"],
expected_data_size_in_terabytes: Optional[int] = None,
preferences: Optional["Preferences"] = None,
preferred_disks: Optional[Dict[str, int]] = None,
passkey: Optional[str] = None,
**kwargs
):
super(DataBoxDiskJobDetails, self).__init__(expected_data_size_in_terabytes=expected_data_size_in_terabytes, contact_details=contact_details, shipping_address=shipping_address, destination_account_details=destination_account_details, preferences=preferences, **kwargs)
self.job_details_type = 'DataBoxDisk' # type: str
self.preferred_disks = preferred_disks
self.copy_progress = None
self.disks_and_size_details = None
self.passkey = passkey
class JobSecrets(msrest.serialization.Model):
"""The base class for the secrets.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataboxJobSecrets, DataBoxDiskJobSecrets, DataBoxHeavyJobSecrets.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:type dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
"""
_validation = {
'job_secrets_type': {'required': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
}
_subtype_map = {
'job_secrets_type': {'DataBox': 'DataboxJobSecrets', 'DataBoxDisk': 'DataBoxDiskJobSecrets', 'DataBoxHeavy': 'DataBoxHeavyJobSecrets'}
}
def __init__(
self,
*,
dc_access_security_code: Optional["DcAccessSecurityCode"] = None,
**kwargs
):
super(JobSecrets, self).__init__(**kwargs)
self.job_secrets_type = None # type: Optional[str]
self.dc_access_security_code = dc_access_security_code
class DataBoxDiskJobSecrets(JobSecrets):
"""The secrets related to disk job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:type dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar disk_secrets: Contains the list of secrets object for that device.
:vartype disk_secrets: list[~azure.mgmt.databox.models.DiskSecret]
:ivar pass_key: PassKey for the disk Job.
:vartype pass_key: str
:ivar is_passkey_user_defined: Whether passkey was provided by user.
:vartype is_passkey_user_defined: bool
"""
_validation = {
'job_secrets_type': {'required': True},
'disk_secrets': {'readonly': True},
'pass_key': {'readonly': True},
'is_passkey_user_defined': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'disk_secrets': {'key': 'diskSecrets', 'type': '[DiskSecret]'},
'pass_key': {'key': 'passKey', 'type': 'str'},
'is_passkey_user_defined': {'key': 'isPasskeyUserDefined', 'type': 'bool'},
}
def __init__(
self,
*,
dc_access_security_code: Optional["DcAccessSecurityCode"] = None,
**kwargs
):
super(DataBoxDiskJobSecrets, self).__init__(dc_access_security_code=dc_access_security_code, **kwargs)
self.job_secrets_type = 'DataBoxDisk' # type: str
self.disk_secrets = None
self.pass_key = None
self.is_passkey_user_defined = None
class DataBoxHeavyAccountCopyLogDetails(CopyLogDetails):
"""Copy log details for a storage account for Databox heavy.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param copy_log_details_type: Required. Indicates the type of job details.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type copy_log_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:ivar account_name: Destination account name.
:vartype account_name: str
:ivar copy_log_link: Link for copy logs.
:vartype copy_log_link: list[str]
"""
_validation = {
'copy_log_details_type': {'required': True},
'account_name': {'readonly': True},
'copy_log_link': {'readonly': True},
}
_attribute_map = {
'copy_log_details_type': {'key': 'copyLogDetailsType', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'copy_log_link': {'key': 'copyLogLink', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavyAccountCopyLogDetails, self).__init__(**kwargs)
self.copy_log_details_type = 'DataBoxHeavy' # type: str
self.account_name = None
self.copy_log_link = None
class DataBoxHeavyJobDetails(JobDetails):
"""Databox Heavy Device Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param destination_account_details: Required. Destination account details.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
:ivar error_details: Error details for failure. This is optional.
:vartype error_details: list[~azure.mgmt.databox.models.JobErrorDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar copy_progress: Copy progress per account.
:vartype copy_progress: list[~azure.mgmt.databox.models.CopyProgress]
:param device_password: Set Device password for unlocking Databox Heavy.
:type device_password: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'shipping_address': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'destination_account_details': {'required': True},
'error_details': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'copy_progress': {'readonly': True},
}
_attribute_map = {
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'destination_account_details': {'key': 'destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
'error_details': {'key': 'errorDetails', 'type': '[JobErrorDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'copy_progress': {'key': 'copyProgress', 'type': '[CopyProgress]'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
}
def __init__(
self,
*,
contact_details: "ContactDetails",
shipping_address: "ShippingAddress",
destination_account_details: List["DestinationAccountDetails"],
expected_data_size_in_terabytes: Optional[int] = None,
preferences: Optional["Preferences"] = None,
device_password: Optional[str] = None,
**kwargs
):
super(DataBoxHeavyJobDetails, self).__init__(expected_data_size_in_terabytes=expected_data_size_in_terabytes, contact_details=contact_details, shipping_address=shipping_address, destination_account_details=destination_account_details, preferences=preferences, **kwargs)
self.job_details_type = 'DataBoxHeavy' # type: str
self.copy_progress = None
self.device_password = device_password
class DataBoxHeavyJobSecrets(JobSecrets):
"""The secrets related to a databox heavy job.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:type dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:ivar cabinet_pod_secrets: Contains the list of secret objects for a databox heavy job.
:vartype cabinet_pod_secrets: list[~azure.mgmt.databox.models.DataBoxHeavySecret]
"""
_validation = {
'job_secrets_type': {'required': True},
'cabinet_pod_secrets': {'readonly': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'cabinet_pod_secrets': {'key': 'cabinetPodSecrets', 'type': '[DataBoxHeavySecret]'},
}
def __init__(
self,
*,
dc_access_security_code: Optional["DcAccessSecurityCode"] = None,
**kwargs
):
super(DataBoxHeavyJobSecrets, self).__init__(dc_access_security_code=dc_access_security_code, **kwargs)
self.job_secrets_type = 'DataBoxHeavy' # type: str
self.cabinet_pod_secrets = None
class DataBoxHeavySecret(msrest.serialization.Model):
"""The secrets related to a databox heavy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar device_serial_number: Serial number of the assigned device.
:vartype device_serial_number: str
:ivar device_password: Password for out of the box experience on device.
:vartype device_password: str
:ivar network_configurations: Network configuration of the appliance.
:vartype network_configurations: list[~azure.mgmt.databox.models.ApplianceNetworkConfiguration]
:ivar encoded_validation_cert_pub_key: The base 64 encoded public key to authenticate with the
device.
:vartype encoded_validation_cert_pub_key: str
:ivar account_credential_details: Per account level access credentials.
:vartype account_credential_details: list[~azure.mgmt.databox.models.AccountCredentialDetails]
"""
_validation = {
'device_serial_number': {'readonly': True},
'device_password': {'readonly': True},
'network_configurations': {'readonly': True},
'encoded_validation_cert_pub_key': {'readonly': True},
'account_credential_details': {'readonly': True},
}
_attribute_map = {
'device_serial_number': {'key': 'deviceSerialNumber', 'type': 'str'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
'network_configurations': {'key': 'networkConfigurations', 'type': '[ApplianceNetworkConfiguration]'},
'encoded_validation_cert_pub_key': {'key': 'encodedValidationCertPubKey', 'type': 'str'},
'account_credential_details': {'key': 'accountCredentialDetails', 'type': '[AccountCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxHeavySecret, self).__init__(**kwargs)
self.device_serial_number = None
self.device_password = None
self.network_configurations = None
self.encoded_validation_cert_pub_key = None
self.account_credential_details = None
class DataBoxJobDetails(JobDetails):
"""Databox Job Details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param expected_data_size_in_terabytes: The expected size of the data, which needs to be
transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
:ivar job_stages: List of stages that run in the job.
:vartype job_stages: list[~azure.mgmt.databox.models.JobStages]
:param contact_details: Required. Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:ivar delivery_package: Delivery package shipping details.
:vartype delivery_package: ~azure.mgmt.databox.models.PackageShippingDetails
:ivar return_package: Return package shipping details.
:vartype return_package: ~azure.mgmt.databox.models.PackageShippingDetails
:param destination_account_details: Required. Destination account details.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
:ivar error_details: Error details for failure. This is optional.
:vartype error_details: list[~azure.mgmt.databox.models.JobErrorDetails]
:param job_details_type: Required. Indicates the type of job details.Constant filled by server.
Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_details_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param preferences: Preferences for the order.
:type preferences: ~azure.mgmt.databox.models.Preferences
:ivar copy_log_details: List of copy log details.
:vartype copy_log_details: list[~azure.mgmt.databox.models.CopyLogDetails]
:ivar reverse_shipment_label_sas_key: Shared access key to download the return shipment label.
:vartype reverse_shipment_label_sas_key: str
:ivar chain_of_custody_sas_key: Shared access key to download the chain of custody logs.
:vartype chain_of_custody_sas_key: str
:ivar copy_progress: Copy progress per storage account.
:vartype copy_progress: list[~azure.mgmt.databox.models.CopyProgress]
:param device_password: Set Device password for unlocking Databox.
:type device_password: str
"""
_validation = {
'job_stages': {'readonly': True},
'contact_details': {'required': True},
'shipping_address': {'required': True},
'delivery_package': {'readonly': True},
'return_package': {'readonly': True},
'destination_account_details': {'required': True},
'error_details': {'readonly': True},
'job_details_type': {'required': True},
'copy_log_details': {'readonly': True},
'reverse_shipment_label_sas_key': {'readonly': True},
'chain_of_custody_sas_key': {'readonly': True},
'copy_progress': {'readonly': True},
}
_attribute_map = {
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
'job_stages': {'key': 'jobStages', 'type': '[JobStages]'},
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'delivery_package': {'key': 'deliveryPackage', 'type': 'PackageShippingDetails'},
'return_package': {'key': 'returnPackage', 'type': 'PackageShippingDetails'},
'destination_account_details': {'key': 'destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
'error_details': {'key': 'errorDetails', 'type': '[JobErrorDetails]'},
'job_details_type': {'key': 'jobDetailsType', 'type': 'str'},
'preferences': {'key': 'preferences', 'type': 'Preferences'},
'copy_log_details': {'key': 'copyLogDetails', 'type': '[CopyLogDetails]'},
'reverse_shipment_label_sas_key': {'key': 'reverseShipmentLabelSasKey', 'type': 'str'},
'chain_of_custody_sas_key': {'key': 'chainOfCustodySasKey', 'type': 'str'},
'copy_progress': {'key': 'copyProgress', 'type': '[CopyProgress]'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
}
def __init__(
self,
*,
contact_details: "ContactDetails",
shipping_address: "ShippingAddress",
destination_account_details: List["DestinationAccountDetails"],
expected_data_size_in_terabytes: Optional[int] = None,
preferences: Optional["Preferences"] = None,
device_password: Optional[str] = None,
**kwargs
):
super(DataBoxJobDetails, self).__init__(expected_data_size_in_terabytes=expected_data_size_in_terabytes, contact_details=contact_details, shipping_address=shipping_address, destination_account_details=destination_account_details, preferences=preferences, **kwargs)
self.job_details_type = 'DataBox' # type: str
self.copy_progress = None
self.device_password = device_password
class DataboxJobSecrets(JobSecrets):
"""The secrets related to a databox job.
All required parameters must be populated in order to send to Azure.
:param job_secrets_type: Required. Used to indicate what type of job secrets object.Constant
filled by server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type job_secrets_type: str or ~azure.mgmt.databox.models.ClassDiscriminator
:param dc_access_security_code: Dc Access Security Code for Customer Managed Shipping.
:type dc_access_security_code: ~azure.mgmt.databox.models.DcAccessSecurityCode
:param pod_secrets: Contains the list of secret objects for a job.
:type pod_secrets: list[~azure.mgmt.databox.models.DataBoxSecret]
"""
_validation = {
'job_secrets_type': {'required': True},
}
_attribute_map = {
'job_secrets_type': {'key': 'jobSecretsType', 'type': 'str'},
'dc_access_security_code': {'key': 'dcAccessSecurityCode', 'type': 'DcAccessSecurityCode'},
'pod_secrets': {'key': 'podSecrets', 'type': '[DataBoxSecret]'},
}
def __init__(
self,
*,
dc_access_security_code: Optional["DcAccessSecurityCode"] = None,
pod_secrets: Optional[List["DataBoxSecret"]] = None,
**kwargs
):
super(DataboxJobSecrets, self).__init__(dc_access_security_code=dc_access_security_code, **kwargs)
self.job_secrets_type = 'DataBox' # type: str
self.pod_secrets = pod_secrets
class ScheduleAvailabilityRequest(msrest.serialization.Model):
"""Request body to get the availability for scheduling orders.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataBoxScheduleAvailabilityRequest, DiskScheduleAvailabilityRequest, HeavyScheduleAvailabilityRequest.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer.
For locations check: https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-
version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
}
_subtype_map = {
'sku_name': {'DataBox': 'DataBoxScheduleAvailabilityRequest', 'DataBoxDisk': 'DiskScheduleAvailabilityRequest', 'DataBoxHeavy': 'HeavyScheduleAvailabilityRequest'}
}
def __init__(
self,
*,
storage_location: str,
**kwargs
):
super(ScheduleAvailabilityRequest, self).__init__(**kwargs)
self.storage_location = storage_location
self.sku_name = None # type: Optional[str]
class DataBoxScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling data box orders orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer.
For locations check: https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-
version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
}
def __init__(
self,
*,
storage_location: str,
**kwargs
):
super(DataBoxScheduleAvailabilityRequest, self).__init__(storage_location=storage_location, **kwargs)
self.sku_name = 'DataBox' # type: str
class DataBoxSecret(msrest.serialization.Model):
"""The secrets related to a DataBox.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar device_serial_number: Serial number of the assigned device.
:vartype device_serial_number: str
:ivar device_password: Password for out of the box experience on device.
:vartype device_password: str
:ivar network_configurations: Network configuration of the appliance.
:vartype network_configurations: list[~azure.mgmt.databox.models.ApplianceNetworkConfiguration]
:ivar encoded_validation_cert_pub_key: The base 64 encoded public key to authenticate with the
device.
:vartype encoded_validation_cert_pub_key: str
:ivar account_credential_details: Per account level access credentials.
:vartype account_credential_details: list[~azure.mgmt.databox.models.AccountCredentialDetails]
"""
_validation = {
'device_serial_number': {'readonly': True},
'device_password': {'readonly': True},
'network_configurations': {'readonly': True},
'encoded_validation_cert_pub_key': {'readonly': True},
'account_credential_details': {'readonly': True},
}
_attribute_map = {
'device_serial_number': {'key': 'deviceSerialNumber', 'type': 'str'},
'device_password': {'key': 'devicePassword', 'type': 'str'},
'network_configurations': {'key': 'networkConfigurations', 'type': '[ApplianceNetworkConfiguration]'},
'encoded_validation_cert_pub_key': {'key': 'encodedValidationCertPubKey', 'type': 'str'},
'account_credential_details': {'key': 'accountCredentialDetails', 'type': '[AccountCredentialDetails]'},
}
def __init__(
self,
**kwargs
):
super(DataBoxSecret, self).__init__(**kwargs)
self.device_serial_number = None
self.device_password = None
self.network_configurations = None
self.encoded_validation_cert_pub_key = None
self.account_credential_details = None
class DataDestinationDetailsValidationRequest(ValidationInputRequest):
"""Request to validate data destination details.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param destination_account_details: Required. Destination account details list.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
:param location: Required. Location of stamp or geo.
:type location: str
"""
_validation = {
'validation_type': {'required': True},
'destination_account_details': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'destination_account_details': {'key': 'destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
destination_account_details: List["DestinationAccountDetails"],
location: str,
**kwargs
):
super(DataDestinationDetailsValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateDataDestinationDetails' # type: str
self.destination_account_details = destination_account_details
self.location = location
class DataDestinationDetailsValidationResponseProperties(ValidationInputResponse):
"""Properties of data destination details validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar status: Data destination details validation status. Possible values include: "Valid",
"Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataDestinationDetailsValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateDataDestinationDetails' # type: str
self.status = None
class DcAccessSecurityCode(msrest.serialization.Model):
"""Dc Access Security code for device.
:param forward_dc_access_code: Dc Access Code for dispatching from DC.
:type forward_dc_access_code: str
:param reverse_dc_access_code: Dc Access code for dropping off at DC.
:type reverse_dc_access_code: str
"""
_attribute_map = {
'forward_dc_access_code': {'key': 'forwardDcAccessCode', 'type': 'str'},
'reverse_dc_access_code': {'key': 'reverseDcAccessCode', 'type': 'str'},
}
def __init__(
self,
*,
forward_dc_access_code: Optional[str] = None,
reverse_dc_access_code: Optional[str] = None,
**kwargs
):
super(DcAccessSecurityCode, self).__init__(**kwargs)
self.forward_dc_access_code = forward_dc_access_code
self.reverse_dc_access_code = reverse_dc_access_code
class DestinationAccountDetails(msrest.serialization.Model):
"""Details of the destination storage accounts.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DestinationManagedDiskDetails, DestinationStorageAccountDetails.
All required parameters must be populated in order to send to Azure.
:param data_destination_type: Required. Data Destination Type.Constant filled by server.
Possible values include: "StorageAccount", "ManagedDisk".
:type data_destination_type: str or ~azure.mgmt.databox.models.DataDestinationType
:param account_id: Arm Id of the destination where the data has to be moved.
:type account_id: str
:param share_password: Share password to be shared by all shares in SA.
:type share_password: str
"""
_validation = {
'data_destination_type': {'required': True},
}
_attribute_map = {
'data_destination_type': {'key': 'dataDestinationType', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
}
_subtype_map = {
'data_destination_type': {'ManagedDisk': 'DestinationManagedDiskDetails', 'StorageAccount': 'DestinationStorageAccountDetails'}
}
def __init__(
self,
*,
account_id: Optional[str] = None,
share_password: Optional[str] = None,
**kwargs
):
super(DestinationAccountDetails, self).__init__(**kwargs)
self.data_destination_type = None # type: Optional[str]
self.account_id = account_id
self.share_password = share_password
class DestinationManagedDiskDetails(DestinationAccountDetails):
"""Details for the destination compute disks.
All required parameters must be populated in order to send to Azure.
:param data_destination_type: Required. Data Destination Type.Constant filled by server.
Possible values include: "StorageAccount", "ManagedDisk".
:type data_destination_type: str or ~azure.mgmt.databox.models.DataDestinationType
:param account_id: Arm Id of the destination where the data has to be moved.
:type account_id: str
:param share_password: Share password to be shared by all shares in SA.
:type share_password: str
:param resource_group_id: Required. Destination Resource Group Id where the Compute disks
should be created.
:type resource_group_id: str
:param staging_storage_account_id: Required. Arm Id of the storage account that can be used to
copy the vhd for staging.
:type staging_storage_account_id: str
"""
_validation = {
'data_destination_type': {'required': True},
'resource_group_id': {'required': True},
'staging_storage_account_id': {'required': True},
}
_attribute_map = {
'data_destination_type': {'key': 'dataDestinationType', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
'resource_group_id': {'key': 'resourceGroupId', 'type': 'str'},
'staging_storage_account_id': {'key': 'stagingStorageAccountId', 'type': 'str'},
}
def __init__(
self,
*,
resource_group_id: str,
staging_storage_account_id: str,
account_id: Optional[str] = None,
share_password: Optional[str] = None,
**kwargs
):
super(DestinationManagedDiskDetails, self).__init__(account_id=account_id, share_password=share_password, **kwargs)
self.data_destination_type = 'ManagedDisk' # type: str
self.resource_group_id = resource_group_id
self.staging_storage_account_id = staging_storage_account_id
class DestinationStorageAccountDetails(DestinationAccountDetails):
"""Details for the destination storage account.
All required parameters must be populated in order to send to Azure.
:param data_destination_type: Required. Data Destination Type.Constant filled by server.
Possible values include: "StorageAccount", "ManagedDisk".
:type data_destination_type: str or ~azure.mgmt.databox.models.DataDestinationType
:param account_id: Arm Id of the destination where the data has to be moved.
:type account_id: str
:param share_password: Share password to be shared by all shares in SA.
:type share_password: str
:param storage_account_id: Required. Destination Storage Account Arm Id.
:type storage_account_id: str
"""
_validation = {
'data_destination_type': {'required': True},
'storage_account_id': {'required': True},
}
_attribute_map = {
'data_destination_type': {'key': 'dataDestinationType', 'type': 'str'},
'account_id': {'key': 'accountId', 'type': 'str'},
'share_password': {'key': 'sharePassword', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
}
def __init__(
self,
*,
storage_account_id: str,
account_id: Optional[str] = None,
share_password: Optional[str] = None,
**kwargs
):
super(DestinationStorageAccountDetails, self).__init__(account_id=account_id, share_password=share_password, **kwargs)
self.data_destination_type = 'StorageAccount' # type: str
self.storage_account_id = storage_account_id
class DestinationToServiceLocationMap(msrest.serialization.Model):
"""Map of destination location to service location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar destination_location: Location of the destination.
:vartype destination_location: str
:ivar service_location: Location of the service.
:vartype service_location: str
"""
_validation = {
'destination_location': {'readonly': True},
'service_location': {'readonly': True},
}
_attribute_map = {
'destination_location': {'key': 'destinationLocation', 'type': 'str'},
'service_location': {'key': 'serviceLocation', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DestinationToServiceLocationMap, self).__init__(**kwargs)
self.destination_location = None
self.service_location = None
class DiskScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling disk orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer.
For locations check: https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-
version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
:param expected_data_size_in_terabytes: Required. The expected size of the data, which needs to
be transferred in this job, in terabytes.
:type expected_data_size_in_terabytes: int
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
'expected_data_size_in_terabytes': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
'expected_data_size_in_terabytes': {'key': 'expectedDataSizeInTerabytes', 'type': 'int'},
}
def __init__(
self,
*,
storage_location: str,
expected_data_size_in_terabytes: int,
**kwargs
):
super(DiskScheduleAvailabilityRequest, self).__init__(storage_location=storage_location, **kwargs)
self.sku_name = 'DataBoxDisk' # type: str
self.expected_data_size_in_terabytes = expected_data_size_in_terabytes
class DiskSecret(msrest.serialization.Model):
"""Contains all the secrets of a Disk.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar disk_serial_number: Serial number of the assigned disk.
:vartype disk_serial_number: str
:ivar bit_locker_key: Bit Locker key of the disk which can be used to unlock the disk to copy
data.
:vartype bit_locker_key: str
"""
_validation = {
'disk_serial_number': {'readonly': True},
'bit_locker_key': {'readonly': True},
}
_attribute_map = {
'disk_serial_number': {'key': 'diskSerialNumber', 'type': 'str'},
'bit_locker_key': {'key': 'bitLockerKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DiskSecret, self).__init__(**kwargs)
self.disk_serial_number = None
self.bit_locker_key = None
class Error(msrest.serialization.Model):
"""Top level error for the job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code that can be used to programmatically identify the error.
:vartype code: str
:ivar message: Describes the error in detail and provides debugging information.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
class HeavyScheduleAvailabilityRequest(ScheduleAvailabilityRequest):
"""Request body to get the availability for scheduling heavy orders.
All required parameters must be populated in order to send to Azure.
:param storage_location: Required. Location for data transfer.
For locations check: https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-
version=2018-01-01.
:type storage_location: str
:param sku_name: Required. Sku Name for which the order is to be scheduled.Constant filled by
server. Possible values include: "DataBox", "DataBoxDisk", "DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'storage_location': {'required': True},
'sku_name': {'required': True},
}
_attribute_map = {
'storage_location': {'key': 'storageLocation', 'type': 'str'},
'sku_name': {'key': 'skuName', 'type': 'str'},
}
def __init__(
self,
*,
storage_location: str,
**kwargs
):
super(HeavyScheduleAvailabilityRequest, self).__init__(storage_location=storage_location, **kwargs)
self.sku_name = 'DataBoxHeavy' # type: str
class JobDeliveryInfo(msrest.serialization.Model):
"""Additional delivery info.
:param scheduled_date_time: Scheduled date time.
:type scheduled_date_time: ~datetime.datetime
"""
_attribute_map = {
'scheduled_date_time': {'key': 'scheduledDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
*,
scheduled_date_time: Optional[datetime.datetime] = None,
**kwargs
):
super(JobDeliveryInfo, self).__init__(**kwargs)
self.scheduled_date_time = scheduled_date_time
class JobErrorDetails(msrest.serialization.Model):
"""Job Error Details for providing the information and recommended action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_message: Message for the error.
:vartype error_message: str
:ivar error_code: Code for the error.
:vartype error_code: int
:ivar recommended_action: Recommended action for the error.
:vartype recommended_action: str
:ivar exception_message: Contains the non localized exception message.
:vartype exception_message: str
"""
_validation = {
'error_message': {'readonly': True},
'error_code': {'readonly': True},
'recommended_action': {'readonly': True},
'exception_message': {'readonly': True},
}
_attribute_map = {
'error_message': {'key': 'errorMessage', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'recommended_action': {'key': 'recommendedAction', 'type': 'str'},
'exception_message': {'key': 'exceptionMessage', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorDetails, self).__init__(**kwargs)
self.error_message = None
self.error_code = None
self.recommended_action = None
self.exception_message = None
class Resource(msrest.serialization.Model):
"""Model of the Resource.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource. This will be one of the supported and
registered Azure Regions (e.g. West US, East US, Southeast Asia, etc.). The region of a
resource cannot be changed once it is created, but if an identical region is specified on
update the request will succeed.
:type location: str
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param sku: Required. The sku type.
:type sku: ~azure.mgmt.databox.models.Sku
"""
_validation = {
'location': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.sku = sku
class JobResource(Resource):
"""Job Resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The location of the resource. This will be one of the supported and
registered Azure Regions (e.g. West US, East US, Southeast Asia, etc.). The region of a
resource cannot be changed once it is created, but if an identical region is specified on
update the request will succeed.
:type location: str
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param sku: Required. The sku type.
:type sku: ~azure.mgmt.databox.models.Sku
:ivar name: Name of the object.
:vartype name: str
:ivar id: Id of the object.
:vartype id: str
:ivar type: Type of the object.
:vartype type: str
:ivar is_cancellable: Describes whether the job is cancellable or not.
:vartype is_cancellable: bool
:ivar is_deletable: Describes whether the job is deletable or not.
:vartype is_deletable: bool
:ivar is_shipping_address_editable: Describes whether the shipping address is editable or not.
:vartype is_shipping_address_editable: bool
:ivar status: Name of the stage which is in progress. Possible values include: "DeviceOrdered",
"DevicePrepared", "Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy", "Completed",
"CompletedWithErrors", "Cancelled", "Failed_IssueReportedAtCustomer",
"Failed_IssueDetectedAtAzureDC", "Aborted", "CompletedWithWarnings",
"ReadyToDispatchFromAzureDC", "ReadyToReceiveAtAzureDC".
:vartype status: str or ~azure.mgmt.databox.models.StageName
:ivar start_time: Time at which the job was started in UTC ISO 8601 format.
:vartype start_time: ~datetime.datetime
:ivar error: Top level error for the job.
:vartype error: ~azure.mgmt.databox.models.Error
:param details: Details of a job run. This field will only be sent for expand details filter.
:type details: ~azure.mgmt.databox.models.JobDetails
:ivar cancellation_reason: Reason for cancellation.
:vartype cancellation_reason: str
:param delivery_type: Delivery type of Job. Possible values include: "NonScheduled",
"Scheduled".
:type delivery_type: str or ~azure.mgmt.databox.models.JobDeliveryType
:param delivery_info: Delivery Info of Job.
:type delivery_info: ~azure.mgmt.databox.models.JobDeliveryInfo
:ivar is_cancellable_without_fee: Flag to indicate cancellation of scheduled job.
:vartype is_cancellable_without_fee: bool
"""
_validation = {
'location': {'required': True},
'sku': {'required': True},
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'is_cancellable': {'readonly': True},
'is_deletable': {'readonly': True},
'is_shipping_address_editable': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'error': {'readonly': True},
'cancellation_reason': {'readonly': True},
'is_cancellable_without_fee': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'is_cancellable': {'key': 'properties.isCancellable', 'type': 'bool'},
'is_deletable': {'key': 'properties.isDeletable', 'type': 'bool'},
'is_shipping_address_editable': {'key': 'properties.isShippingAddressEditable', 'type': 'bool'},
'status': {'key': 'properties.status', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'Error'},
'details': {'key': 'properties.details', 'type': 'JobDetails'},
'cancellation_reason': {'key': 'properties.cancellationReason', 'type': 'str'},
'delivery_type': {'key': 'properties.deliveryType', 'type': 'str'},
'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'JobDeliveryInfo'},
'is_cancellable_without_fee': {'key': 'properties.isCancellableWithoutFee', 'type': 'bool'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
details: Optional["JobDetails"] = None,
delivery_type: Optional[Union[str, "JobDeliveryType"]] = None,
delivery_info: Optional["JobDeliveryInfo"] = None,
**kwargs
):
super(JobResource, self).__init__(location=location, tags=tags, sku=sku, **kwargs)
self.name = None
self.id = None
self.type = None
self.is_cancellable = None
self.is_deletable = None
self.is_shipping_address_editable = None
self.status = None
self.start_time = None
self.error = None
self.details = details
self.cancellation_reason = None
self.delivery_type = delivery_type
self.delivery_info = delivery_info
self.is_cancellable_without_fee = None
class JobResourceList(msrest.serialization.Model):
"""Job Resource Collection.
:param value: List of job resources.
:type value: list[~azure.mgmt.databox.models.JobResource]
:param next_link: Link for the next set of job resources.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[JobResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["JobResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(JobResourceList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class JobResourceUpdateParameter(msrest.serialization.Model):
"""The JobResourceUpdateParameter.
:param tags: A set of tags. The list of key value pairs that describe the resource. These tags
can be used in viewing and grouping this resource (across resource groups).
:type tags: dict[str, str]
:param details: Details of a job to be updated.
:type details: ~azure.mgmt.databox.models.UpdateJobDetails
:param destination_account_details: Destination account details.
:type destination_account_details: list[~azure.mgmt.databox.models.DestinationAccountDetails]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'details': {'key': 'properties.details', 'type': 'UpdateJobDetails'},
'destination_account_details': {'key': 'properties.destinationAccountDetails', 'type': '[DestinationAccountDetails]'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
details: Optional["UpdateJobDetails"] = None,
destination_account_details: Optional[List["DestinationAccountDetails"]] = None,
**kwargs
):
super(JobResourceUpdateParameter, self).__init__(**kwargs)
self.tags = tags
self.details = details
self.destination_account_details = destination_account_details
class JobStages(msrest.serialization.Model):
"""Job stages.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar stage_name: Name of the job stage. Possible values include: "DeviceOrdered",
"DevicePrepared", "Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy", "Completed",
"CompletedWithErrors", "Cancelled", "Failed_IssueReportedAtCustomer",
"Failed_IssueDetectedAtAzureDC", "Aborted", "CompletedWithWarnings",
"ReadyToDispatchFromAzureDC", "ReadyToReceiveAtAzureDC".
:vartype stage_name: str or ~azure.mgmt.databox.models.StageName
:ivar display_name: Display name of the job stage.
:vartype display_name: str
:ivar stage_status: Status of the job stage. Possible values include: "None", "InProgress",
"Succeeded", "Failed", "Cancelled", "Cancelling", "SucceededWithErrors".
:vartype stage_status: str or ~azure.mgmt.databox.models.StageStatus
:ivar stage_time: Time for the job stage in UTC ISO 8601 format.
:vartype stage_time: ~datetime.datetime
:ivar job_stage_details: Job Stage Details.
:vartype job_stage_details: object
:ivar error_details: Error details for the stage.
:vartype error_details: list[~azure.mgmt.databox.models.JobErrorDetails]
"""
_validation = {
'stage_name': {'readonly': True},
'display_name': {'readonly': True},
'stage_status': {'readonly': True},
'stage_time': {'readonly': True},
'job_stage_details': {'readonly': True},
'error_details': {'readonly': True},
}
_attribute_map = {
'stage_name': {'key': 'stageName', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'stage_status': {'key': 'stageStatus', 'type': 'str'},
'stage_time': {'key': 'stageTime', 'type': 'iso-8601'},
'job_stage_details': {'key': 'jobStageDetails', 'type': 'object'},
'error_details': {'key': 'errorDetails', 'type': '[JobErrorDetails]'},
}
def __init__(
self,
**kwargs
):
super(JobStages, self).__init__(**kwargs)
self.stage_name = None
self.display_name = None
self.stage_status = None
self.stage_time = None
self.job_stage_details = None
self.error_details = None
class NotificationPreference(msrest.serialization.Model):
"""Notification preference for a job stage.
All required parameters must be populated in order to send to Azure.
:param stage_name: Required. Name of the stage. Possible values include: "DevicePrepared",
"Dispatched", "Delivered", "PickedUp", "AtAzureDC", "DataCopy".
:type stage_name: str or ~azure.mgmt.databox.models.NotificationStageName
:param send_notification: Required. Notification is required or not.
:type send_notification: bool
"""
_validation = {
'stage_name': {'required': True},
'send_notification': {'required': True},
}
_attribute_map = {
'stage_name': {'key': 'stageName', 'type': 'str'},
'send_notification': {'key': 'sendNotification', 'type': 'bool'},
}
def __init__(
self,
*,
stage_name: Union[str, "NotificationStageName"],
send_notification: bool,
**kwargs
):
super(NotificationPreference, self).__init__(**kwargs)
self.stage_name = stage_name
self.send_notification = send_notification
class Operation(msrest.serialization.Model):
"""Operation entity.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the operation. Format:
{resourceProviderNamespace}/{resourceType}/{read|write|delete|action}.
:vartype name: str
:ivar display: Operation display values.
:vartype display: ~azure.mgmt.databox.models.OperationDisplay
:ivar properties: Operation properties.
:vartype properties: object
:ivar origin: Origin of the operation. Can be : user|system|user,system.
:vartype origin: str
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
'properties': {'readonly': True},
'origin': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'object'},
'origin': {'key': 'origin', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
self.properties = None
self.origin = None
class OperationDisplay(msrest.serialization.Model):
"""Operation display.
:param provider: Provider name.
:type provider: str
:param resource: Resource name.
:type resource: str
:param operation: Localized name of the operation for display purpose.
:type operation: str
:param description: Localized description of the operation for display purpose.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationList(msrest.serialization.Model):
"""Operation Collection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of operations.
:vartype value: list[~azure.mgmt.databox.models.Operation]
:param next_link: Link for the next set of operations.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = None
self.next_link = next_link
class PackageShippingDetails(msrest.serialization.Model):
"""Shipping details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar carrier_name: Name of the carrier.
:vartype carrier_name: str
:ivar tracking_id: Tracking Id of shipment.
:vartype tracking_id: str
:ivar tracking_url: Url where shipment can be tracked.
:vartype tracking_url: str
"""
_validation = {
'carrier_name': {'readonly': True},
'tracking_id': {'readonly': True},
'tracking_url': {'readonly': True},
}
_attribute_map = {
'carrier_name': {'key': 'carrierName', 'type': 'str'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'tracking_url': {'key': 'trackingUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PackageShippingDetails, self).__init__(**kwargs)
self.carrier_name = None
self.tracking_id = None
self.tracking_url = None
class Preferences(msrest.serialization.Model):
"""Preferences related to the order.
:param preferred_data_center_region: Preferred Data Center Region.
:type preferred_data_center_region: list[str]
:param transport_preferences: Preferences related to the shipment logistics of the sku.
:type transport_preferences: ~azure.mgmt.databox.models.TransportPreferences
"""
_attribute_map = {
'preferred_data_center_region': {'key': 'preferredDataCenterRegion', 'type': '[str]'},
'transport_preferences': {'key': 'transportPreferences', 'type': 'TransportPreferences'},
}
def __init__(
self,
*,
preferred_data_center_region: Optional[List[str]] = None,
transport_preferences: Optional["TransportPreferences"] = None,
**kwargs
):
super(Preferences, self).__init__(**kwargs)
self.preferred_data_center_region = preferred_data_center_region
self.transport_preferences = transport_preferences
class PreferencesValidationRequest(ValidationInputRequest):
"""Request to validate preference of transport and data center.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param preference: Preference requested with respect to transport type and data center.
:type preference: ~azure.mgmt.databox.models.Preferences
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'preference': {'key': 'preference', 'type': 'Preferences'},
'device_type': {'key': 'deviceType', 'type': 'str'},
}
def __init__(
self,
*,
device_type: Union[str, "SkuName"],
preference: Optional["Preferences"] = None,
**kwargs
):
super(PreferencesValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidatePreferences' # type: str
self.preference = preference
self.device_type = device_type
class PreferencesValidationResponseProperties(ValidationInputResponse):
"""Properties of data center and transport preference validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar status: Validation status of requested data center and transport. Possible values
include: "Valid", "Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PreferencesValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidatePreferences' # type: str
self.status = None
class RegionConfigurationRequest(msrest.serialization.Model):
"""Request body to get the configuration for the region.
:param schedule_availability_request: Request body to get the availability for scheduling
orders.
:type schedule_availability_request: ~azure.mgmt.databox.models.ScheduleAvailabilityRequest
:param transport_availability_request: Request body to get the transport availability for given
sku.
:type transport_availability_request: ~azure.mgmt.databox.models.TransportAvailabilityRequest
"""
_attribute_map = {
'schedule_availability_request': {'key': 'scheduleAvailabilityRequest', 'type': 'ScheduleAvailabilityRequest'},
'transport_availability_request': {'key': 'transportAvailabilityRequest', 'type': 'TransportAvailabilityRequest'},
}
def __init__(
self,
*,
schedule_availability_request: Optional["ScheduleAvailabilityRequest"] = None,
transport_availability_request: Optional["TransportAvailabilityRequest"] = None,
**kwargs
):
super(RegionConfigurationRequest, self).__init__(**kwargs)
self.schedule_availability_request = schedule_availability_request
self.transport_availability_request = transport_availability_request
class RegionConfigurationResponse(msrest.serialization.Model):
"""Configuration response specific to a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar schedule_availability_response: Schedule availability for given sku in a region.
:vartype schedule_availability_response:
~azure.mgmt.databox.models.ScheduleAvailabilityResponse
:ivar transport_availability_response: Transport options available for given sku in a region.
:vartype transport_availability_response:
~azure.mgmt.databox.models.TransportAvailabilityResponse
"""
_validation = {
'schedule_availability_response': {'readonly': True},
'transport_availability_response': {'readonly': True},
}
_attribute_map = {
'schedule_availability_response': {'key': 'scheduleAvailabilityResponse', 'type': 'ScheduleAvailabilityResponse'},
'transport_availability_response': {'key': 'transportAvailabilityResponse', 'type': 'TransportAvailabilityResponse'},
}
def __init__(
self,
**kwargs
):
super(RegionConfigurationResponse, self).__init__(**kwargs)
self.schedule_availability_response = None
self.transport_availability_response = None
class ScheduleAvailabilityResponse(msrest.serialization.Model):
"""Schedule availability response for given sku in a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar available_dates: List of dates available to schedule.
:vartype available_dates: list[~datetime.datetime]
"""
_validation = {
'available_dates': {'readonly': True},
}
_attribute_map = {
'available_dates': {'key': 'availableDates', 'type': '[iso-8601]'},
}
def __init__(
self,
**kwargs
):
super(ScheduleAvailabilityResponse, self).__init__(**kwargs)
self.available_dates = None
class ShareCredentialDetails(msrest.serialization.Model):
"""Credential details of the shares in account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar share_name: Name of the share.
:vartype share_name: str
:ivar share_type: Type of the share. Possible values include: "UnknownType", "HCS",
"BlockBlob", "PageBlob", "AzureFile", "ManagedDisk".
:vartype share_type: str or ~azure.mgmt.databox.models.ShareDestinationFormatType
:ivar user_name: User name for the share.
:vartype user_name: str
:ivar password: Password for the share.
:vartype password: str
:ivar supported_access_protocols: Access protocols supported on the device.
:vartype supported_access_protocols: list[str or ~azure.mgmt.databox.models.AccessProtocol]
"""
_validation = {
'share_name': {'readonly': True},
'share_type': {'readonly': True},
'user_name': {'readonly': True},
'password': {'readonly': True},
'supported_access_protocols': {'readonly': True},
}
_attribute_map = {
'share_name': {'key': 'shareName', 'type': 'str'},
'share_type': {'key': 'shareType', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'supported_access_protocols': {'key': 'supportedAccessProtocols', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ShareCredentialDetails, self).__init__(**kwargs)
self.share_name = None
self.share_type = None
self.user_name = None
self.password = None
self.supported_access_protocols = None
class ShipmentPickUpRequest(msrest.serialization.Model):
"""Shipment pick up request details.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. Minimum date after which the pick up should commence, this must be
in local time of pick up area.
:type start_time: ~datetime.datetime
:param end_time: Required. Maximum date before which the pick up should commence, this must be
in local time of pick up area.
:type end_time: ~datetime.datetime
:param shipment_location: Required. Shipment Location in the pickup place. Eg.front desk.
:type shipment_location: str
"""
_validation = {
'start_time': {'required': True},
'end_time': {'required': True},
'shipment_location': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'shipment_location': {'key': 'shipmentLocation', 'type': 'str'},
}
def __init__(
self,
*,
start_time: datetime.datetime,
end_time: datetime.datetime,
shipment_location: str,
**kwargs
):
super(ShipmentPickUpRequest, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.shipment_location = shipment_location
class ShipmentPickUpResponse(msrest.serialization.Model):
"""Shipment pick up response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar confirmation_number: Confirmation number for the pick up request.
:vartype confirmation_number: str
:ivar ready_by_time: Time by which shipment should be ready for pick up, this is in local time
of pick up area.
:vartype ready_by_time: ~datetime.datetime
"""
_validation = {
'confirmation_number': {'readonly': True},
'ready_by_time': {'readonly': True},
}
_attribute_map = {
'confirmation_number': {'key': 'confirmationNumber', 'type': 'str'},
'ready_by_time': {'key': 'readyByTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ShipmentPickUpResponse, self).__init__(**kwargs)
self.confirmation_number = None
self.ready_by_time = None
class ShippingAddress(msrest.serialization.Model):
"""Shipping address where customer wishes to receive the device.
All required parameters must be populated in order to send to Azure.
:param street_address1: Required. Street Address line 1.
:type street_address1: str
:param street_address2: Street Address line 2.
:type street_address2: str
:param street_address3: Street Address line 3.
:type street_address3: str
:param city: Name of the City.
:type city: str
:param state_or_province: Name of the State or Province.
:type state_or_province: str
:param country: Required. Name of the Country.
:type country: str
:param postal_code: Required. Postal code.
:type postal_code: str
:param zip_extended_code: Extended Zip Code.
:type zip_extended_code: str
:param company_name: Name of the company.
:type company_name: str
:param address_type: Type of address. Possible values include: "None", "Residential",
"Commercial".
:type address_type: str or ~azure.mgmt.databox.models.AddressType
"""
_validation = {
'street_address1': {'required': True},
'country': {'required': True},
'postal_code': {'required': True},
}
_attribute_map = {
'street_address1': {'key': 'streetAddress1', 'type': 'str'},
'street_address2': {'key': 'streetAddress2', 'type': 'str'},
'street_address3': {'key': 'streetAddress3', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'state_or_province': {'key': 'stateOrProvince', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'zip_extended_code': {'key': 'zipExtendedCode', 'type': 'str'},
'company_name': {'key': 'companyName', 'type': 'str'},
'address_type': {'key': 'addressType', 'type': 'str'},
}
def __init__(
self,
*,
street_address1: str,
country: str,
postal_code: str,
street_address2: Optional[str] = None,
street_address3: Optional[str] = None,
city: Optional[str] = None,
state_or_province: Optional[str] = None,
zip_extended_code: Optional[str] = None,
company_name: Optional[str] = None,
address_type: Optional[Union[str, "AddressType"]] = None,
**kwargs
):
super(ShippingAddress, self).__init__(**kwargs)
self.street_address1 = street_address1
self.street_address2 = street_address2
self.street_address3 = street_address3
self.city = city
self.state_or_province = state_or_province
self.country = country
self.postal_code = postal_code
self.zip_extended_code = zip_extended_code
self.company_name = company_name
self.address_type = address_type
class Sku(msrest.serialization.Model):
"""The Sku.
All required parameters must be populated in order to send to Azure.
:param name: Required. The sku name. Possible values include: "DataBox", "DataBoxDisk",
"DataBoxHeavy".
:type name: str or ~azure.mgmt.databox.models.SkuName
:param display_name: The display name of the sku.
:type display_name: str
:param family: The sku family.
:type family: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
}
def __init__(
self,
*,
name: Union[str, "SkuName"],
display_name: Optional[str] = None,
family: Optional[str] = None,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.family = family
class SkuAvailabilityValidationRequest(ValidationInputRequest):
"""Request to validate sku availability.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
:ivar transfer_type: Required. Type of the transfer. Default value: "ImportToAzure".
:vartype transfer_type: str
:param country: Required. ISO country code. Country for hardware shipment. For codes check:
https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements.
:type country: str
:param location: Required. Location for data transfer. For locations check:
https://management.azure.com/subscriptions/SUBSCRIPTIONID/locations?api-version=2018-01-01.
:type location: str
"""
_validation = {
'validation_type': {'required': True},
'device_type': {'required': True},
'transfer_type': {'required': True, 'constant': True},
'country': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'transfer_type': {'key': 'transferType', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
}
transfer_type = "ImportToAzure"
def __init__(
self,
*,
device_type: Union[str, "SkuName"],
country: str,
location: str,
**kwargs
):
super(SkuAvailabilityValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateSkuAvailability' # type: str
self.device_type = device_type
self.country = country
self.location = location
class SkuAvailabilityValidationResponseProperties(ValidationInputResponse):
"""Properties of sku availability validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar status: Sku availability validation status. Possible values include: "Valid", "Invalid",
"Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuAvailabilityValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateSkuAvailability' # type: str
self.status = None
class SkuCapacity(msrest.serialization.Model):
"""Capacity of the sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar usable: Usable capacity in TB.
:vartype usable: str
:ivar maximum: Maximum capacity in TB.
:vartype maximum: str
"""
_validation = {
'usable': {'readonly': True},
'maximum': {'readonly': True},
}
_attribute_map = {
'usable': {'key': 'usable', 'type': 'str'},
'maximum': {'key': 'maximum', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCapacity, self).__init__(**kwargs)
self.usable = None
self.maximum = None
class SkuCost(msrest.serialization.Model):
"""Describes metadata for retrieving price info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar meter_id: Meter id of the Sku.
:vartype meter_id: str
:ivar meter_type: The type of the meter.
:vartype meter_type: str
"""
_validation = {
'meter_id': {'readonly': True},
'meter_type': {'readonly': True},
}
_attribute_map = {
'meter_id': {'key': 'meterId', 'type': 'str'},
'meter_type': {'key': 'meterType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCost, self).__init__(**kwargs)
self.meter_id = None
self.meter_type = None
class SkuInformation(msrest.serialization.Model):
"""Information of the sku.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar sku: The Sku.
:vartype sku: ~azure.mgmt.databox.models.Sku
:ivar enabled: The sku is enabled or not.
:vartype enabled: bool
:ivar destination_to_service_location_map: The map of destination location to service location.
:vartype destination_to_service_location_map:
list[~azure.mgmt.databox.models.DestinationToServiceLocationMap]
:ivar capacity: Capacity of the Sku.
:vartype capacity: ~azure.mgmt.databox.models.SkuCapacity
:ivar costs: Cost of the Sku.
:vartype costs: list[~azure.mgmt.databox.models.SkuCost]
:ivar api_versions: Api versions that support this Sku.
:vartype api_versions: list[str]
:ivar disabled_reason: Reason why the Sku is disabled. Possible values include: "None",
"Country", "Region", "Feature", "OfferType", "NoSubscriptionInfo".
:vartype disabled_reason: str or ~azure.mgmt.databox.models.SkuDisabledReason
:ivar disabled_reason_message: Message for why the Sku is disabled.
:vartype disabled_reason_message: str
:ivar required_feature: Required feature to access the sku.
:vartype required_feature: str
"""
_validation = {
'sku': {'readonly': True},
'enabled': {'readonly': True},
'destination_to_service_location_map': {'readonly': True},
'capacity': {'readonly': True},
'costs': {'readonly': True},
'api_versions': {'readonly': True},
'disabled_reason': {'readonly': True},
'disabled_reason_message': {'readonly': True},
'required_feature': {'readonly': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'enabled': {'key': 'enabled', 'type': 'bool'},
'destination_to_service_location_map': {'key': 'properties.destinationToServiceLocationMap', 'type': '[DestinationToServiceLocationMap]'},
'capacity': {'key': 'properties.capacity', 'type': 'SkuCapacity'},
'costs': {'key': 'properties.costs', 'type': '[SkuCost]'},
'api_versions': {'key': 'properties.apiVersions', 'type': '[str]'},
'disabled_reason': {'key': 'properties.disabledReason', 'type': 'str'},
'disabled_reason_message': {'key': 'properties.disabledReasonMessage', 'type': 'str'},
'required_feature': {'key': 'properties.requiredFeature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuInformation, self).__init__(**kwargs)
self.sku = None
self.enabled = None
self.destination_to_service_location_map = None
self.capacity = None
self.costs = None
self.api_versions = None
self.disabled_reason = None
self.disabled_reason_message = None
self.required_feature = None
class SubscriptionIsAllowedToCreateJobValidationRequest(ValidationInputRequest):
"""Request to validate subscription permission to create jobs.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
"""
_validation = {
'validation_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionIsAllowedToCreateJobValidationRequest, self).__init__(**kwargs)
self.validation_type = 'ValidateSubscriptionIsAllowedToCreateJob' # type: str
class SubscriptionIsAllowedToCreateJobValidationResponseProperties(ValidationInputResponse):
"""Properties of subscription permission to create job validation response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation response.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:ivar error: Error code and message of validation response.
:vartype error: ~azure.mgmt.databox.models.Error
:ivar status: Validation status of subscription permission to create job. Possible values
include: "Valid", "Invalid", "Skipped".
:vartype status: str or ~azure.mgmt.databox.models.ValidationStatus
"""
_validation = {
'validation_type': {'required': True},
'error': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubscriptionIsAllowedToCreateJobValidationResponseProperties, self).__init__(**kwargs)
self.validation_type = 'ValidateSubscriptionIsAllowedToCreateJob' # type: str
self.status = None
class TransportAvailabilityDetails(msrest.serialization.Model):
"""Transport options availability details for given region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar shipment_type: Transport Shipment Type supported for given region. Possible values
include: "CustomerManaged", "MicrosoftManaged".
:vartype shipment_type: str or ~azure.mgmt.databox.models.TransportShipmentTypes
"""
_validation = {
'shipment_type': {'readonly': True},
}
_attribute_map = {
'shipment_type': {'key': 'shipmentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TransportAvailabilityDetails, self).__init__(**kwargs)
self.shipment_type = None
class TransportAvailabilityRequest(msrest.serialization.Model):
"""Request body to get the transport availability for given sku.
:param sku_name: Type of the device. Possible values include: "DataBox", "DataBoxDisk",
"DataBoxHeavy".
:type sku_name: str or ~azure.mgmt.databox.models.SkuName
"""
_attribute_map = {
'sku_name': {'key': 'skuName', 'type': 'str'},
}
def __init__(
self,
*,
sku_name: Optional[Union[str, "SkuName"]] = None,
**kwargs
):
super(TransportAvailabilityRequest, self).__init__(**kwargs)
self.sku_name = sku_name
class TransportAvailabilityResponse(msrest.serialization.Model):
"""Transport options available for given sku in a region.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar transport_availability_details: List of transport availability details for given region.
:vartype transport_availability_details:
list[~azure.mgmt.databox.models.TransportAvailabilityDetails]
"""
_validation = {
'transport_availability_details': {'readonly': True},
}
_attribute_map = {
'transport_availability_details': {'key': 'transportAvailabilityDetails', 'type': '[TransportAvailabilityDetails]'},
}
def __init__(
self,
**kwargs
):
super(TransportAvailabilityResponse, self).__init__(**kwargs)
self.transport_availability_details = None
class TransportPreferences(msrest.serialization.Model):
"""Preferences related to the shipment logistics of the sku.
All required parameters must be populated in order to send to Azure.
:param preferred_shipment_type: Required. Indicates Shipment Logistics type that the customer
preferred. Possible values include: "CustomerManaged", "MicrosoftManaged".
:type preferred_shipment_type: str or ~azure.mgmt.databox.models.TransportShipmentTypes
"""
_validation = {
'preferred_shipment_type': {'required': True},
}
_attribute_map = {
'preferred_shipment_type': {'key': 'preferredShipmentType', 'type': 'str'},
}
def __init__(
self,
*,
preferred_shipment_type: Union[str, "TransportShipmentTypes"],
**kwargs
):
super(TransportPreferences, self).__init__(**kwargs)
self.preferred_shipment_type = preferred_shipment_type
class UnencryptedCredentials(msrest.serialization.Model):
"""Unencrypted credentials for accessing device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_name: Name of the job.
:vartype job_name: str
:ivar job_secrets: Secrets related to this job.
:vartype job_secrets: ~azure.mgmt.databox.models.JobSecrets
"""
_validation = {
'job_name': {'readonly': True},
'job_secrets': {'readonly': True},
}
_attribute_map = {
'job_name': {'key': 'jobName', 'type': 'str'},
'job_secrets': {'key': 'jobSecrets', 'type': 'JobSecrets'},
}
def __init__(
self,
**kwargs
):
super(UnencryptedCredentials, self).__init__(**kwargs)
self.job_name = None
self.job_secrets = None
class UnencryptedCredentialsList(msrest.serialization.Model):
"""List of unencrypted credentials for accessing device.
:param value: List of unencrypted credentials.
:type value: list[~azure.mgmt.databox.models.UnencryptedCredentials]
:param next_link: Link for the next set of unencrypted credentials.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[UnencryptedCredentials]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["UnencryptedCredentials"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(UnencryptedCredentialsList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class UpdateJobDetails(msrest.serialization.Model):
"""Job details for update.
:param contact_details: Contact details for notification and shipping.
:type contact_details: ~azure.mgmt.databox.models.ContactDetails
:param shipping_address: Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
"""
_attribute_map = {
'contact_details': {'key': 'contactDetails', 'type': 'ContactDetails'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
}
def __init__(
self,
*,
contact_details: Optional["ContactDetails"] = None,
shipping_address: Optional["ShippingAddress"] = None,
**kwargs
):
super(UpdateJobDetails, self).__init__(**kwargs)
self.contact_details = contact_details
self.shipping_address = shipping_address
class ValidateAddress(ValidationInputRequest):
"""The requirements to validate customer address where the device needs to be shipped.
All required parameters must be populated in order to send to Azure.
:param validation_type: Required. Identifies the type of validation request.Constant filled by
server. Possible values include: "ValidateAddress", "ValidateDataDestinationDetails",
"ValidateSubscriptionIsAllowedToCreateJob", "ValidatePreferences", "ValidateCreateOrderLimit",
"ValidateSkuAvailability".
:type validation_type: str or ~azure.mgmt.databox.models.ValidationInputDiscriminator
:param shipping_address: Required. Shipping address of the customer.
:type shipping_address: ~azure.mgmt.databox.models.ShippingAddress
:param device_type: Required. Device type to be used for the job. Possible values include:
"DataBox", "DataBoxDisk", "DataBoxHeavy".
:type device_type: str or ~azure.mgmt.databox.models.SkuName
:param transport_preferences: Preferences related to the shipment logistics of the sku.
:type transport_preferences: ~azure.mgmt.databox.models.TransportPreferences
"""
_validation = {
'validation_type': {'required': True},
'shipping_address': {'required': True},
'device_type': {'required': True},
}
_attribute_map = {
'validation_type': {'key': 'validationType', 'type': 'str'},
'shipping_address': {'key': 'shippingAddress', 'type': 'ShippingAddress'},
'device_type': {'key': 'deviceType', 'type': 'str'},
'transport_preferences': {'key': 'transportPreferences', 'type': 'TransportPreferences'},
}
def __init__(
self,
*,
shipping_address: "ShippingAddress",
device_type: Union[str, "SkuName"],
transport_preferences: Optional["TransportPreferences"] = None,
**kwargs
):
super(ValidateAddress, self).__init__(**kwargs)
self.validation_type = 'ValidateAddress' # type: str
self.shipping_address = shipping_address
self.device_type = device_type
self.transport_preferences = transport_preferences
class ValidationResponse(msrest.serialization.Model):
"""Response of pre job creation validations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: Overall validation status. Possible values include: "AllValidToProceed",
"InputsRevisitRequired", "CertainInputValidationsSkipped".
:vartype status: str or ~azure.mgmt.databox.models.OverallValidationStatus
:ivar individual_response_details: List of response details contain validationType and its
response as key and value respectively.
:vartype individual_response_details: list[~azure.mgmt.databox.models.ValidationInputResponse]
"""
_validation = {
'status': {'readonly': True},
'individual_response_details': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'properties.status', 'type': 'str'},
'individual_response_details': {'key': 'properties.individualResponseDetails', 'type': '[ValidationInputResponse]'},
}
def __init__(
self,
**kwargs
):
super(ValidationResponse, self).__init__(**kwargs)
self.status = None
self.individual_response_details = None
| mit | -3,939,076,954,756,208,600 | 38.867614 | 504 | 0.656142 | false |
ssarangi/numba | numba/typing/listdecl.py | 3 | 6524 | from __future__ import absolute_import, print_function
import random
import numpy as np
from .. import types
from .templates import (ConcreteTemplate, AbstractTemplate, AttributeTemplate,
CallableTemplate, Registry, signature, bound_function,
make_callable_template)
# Ensure list is typed as a collection as well
from . import collections
registry = Registry()
builtin = registry.register
builtin_global = registry.register_global
builtin_attr = registry.register_attr
class ListBuiltin(AbstractTemplate):
key = list
def generic(self, args, kws):
assert not kws
if args:
iterable, = args
if isinstance(iterable, types.IterableType):
dtype = iterable.iterator_type.yield_type
return signature(types.List(dtype), iterable)
builtin_global(list, types.Function(ListBuiltin))
class SortedBuiltin(CallableTemplate):
key = sorted
def generic(self):
def typer(iterable, reverse=None):
if not isinstance(iterable, types.IterableType):
return
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.List(iterable.iterator_type.yield_type)
return typer
builtin_global(sorted, types.Function(SortedBuiltin))
@builtin_attr
class ListAttribute(AttributeTemplate):
key = types.List
# NOTE: some of these should be Sequence / MutableSequence methods
@bound_function("list.append")
def resolve_append(self, list, args, kws):
item, = args
assert not kws
unified = self.context.unify_pairs(list.dtype, item)
sig = signature(types.none, unified)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.clear")
def resolve_clear(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
@bound_function("list.copy")
def resolve_copy(self, list, args, kws):
assert not args
assert not kws
return signature(list)
@bound_function("list.count")
def resolve_count(self, list, args, kws):
item, = args
assert not kws
return signature(types.intp, list.dtype)
@bound_function("list.extend")
def resolve_extend(self, list, args, kws):
iterable, = args
assert not kws
if not isinstance(iterable, types.IterableType):
return
dtype = iterable.iterator_type.yield_type
unified = self.context.unify_pairs(list.dtype, dtype)
sig = signature(types.none, iterable)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.index")
def resolve_index(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.intp, list.dtype)
elif len(args) == 2:
if isinstance(args[1], types.Integer):
return signature(types.intp, list.dtype, types.intp)
elif len(args) == 3:
if (isinstance(args[1], types.Integer)
and isinstance(args[2], types.Integer)):
return signature(types.intp, list.dtype, types.intp, types.intp)
@bound_function("list.insert")
def resolve_insert(self, list, args, kws):
idx, item = args
assert not kws
if isinstance(idx, types.Integer):
unified = self.context.unify_pairs(list.dtype, item)
sig = signature(types.none, types.intp, unified)
sig.recvr = list.copy(dtype=unified)
return sig
@bound_function("list.pop")
def resolve_pop(self, list, args, kws):
assert not kws
if not args:
return signature(list.dtype)
else:
idx, = args
if isinstance(idx, types.Integer):
return signature(list.dtype, types.intp)
@bound_function("list.remove")
def resolve_remove(self, list, args, kws):
assert not kws
if len(args) == 1:
return signature(types.none, list.dtype)
@bound_function("list.reverse")
def resolve_reverse(self, list, args, kws):
assert not args
assert not kws
return signature(types.none)
def resolve_sort(self, list):
def typer(reverse=None):
if (reverse is not None and
not isinstance(reverse, types.Boolean)):
return
return types.none
return types.BoundFunction(make_callable_template(key="list.sort",
typer=typer,
recvr=list),
list)
@builtin
class AddList(AbstractTemplate):
key = "+"
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
unified = self.context.unify_pairs(a, b)
return signature(unified, a, b)
@builtin
class InplaceAddList(AbstractTemplate):
key = "+="
def generic(self, args, kws):
if len(args) == 2:
a, b = args
if isinstance(a, types.List) and isinstance(b, types.List):
if self.context.can_convert(b.dtype, a.dtype):
return signature(a, a, b)
@builtin
class MulList(AbstractTemplate):
key = "*"
def generic(self, args, kws):
a, b = args
if isinstance(a, types.List) and isinstance(b, types.Integer):
return signature(a, a, types.intp)
@builtin
class InplaceMulList(MulList):
key = "*="
class ListCompare(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.List) and isinstance(rhs, types.List):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key,
(lhs.dtype, rhs.dtype), {})
if res is not None:
return signature(types.boolean, lhs, rhs)
@builtin
class ListEq(ListCompare):
key = '=='
@builtin
class ListNe(ListCompare):
key = '!='
@builtin
class ListLt(ListCompare):
key = '<'
@builtin
class ListLe(ListCompare):
key = '<='
@builtin
class ListGt(ListCompare):
key = '>'
@builtin
class ListGe(ListCompare):
key = '>='
| bsd-2-clause | -5,609,010,772,477,457,000 | 27.489083 | 80 | 0.587523 | false |
ladybug-analysis-tools/ladybug-core | tests/psychrometrics_test.py | 1 | 11148 | # coding=utf-8
from ladybug.psychrometrics import humid_ratio_from_db_rh, enthalpy_from_db_hr, \
wet_bulb_from_db_rh, dew_point_from_db_rh, rel_humid_from_db_hr, \
rel_humid_from_db_enth, rel_humid_from_db_dpt, rel_humid_from_db_wb, \
dew_point_from_db_hr, dew_point_from_db_enth, dew_point_from_db_wb, \
db_temp_from_enth_hr, db_temp_from_rh_hr, db_temp_and_hr_from_wb_rh, \
dew_point_from_db_rh_fast, wet_bulb_from_db_rh_fast, wet_bulb_from_db_hr
import pytest
def test_humid_ratio_from_db_rh():
"""Test the accuracy of the humid_ratio_from_db_rh function."""
assert humid_ratio_from_db_rh(30, 0) == pytest.approx(0, rel=1e-3)
assert humid_ratio_from_db_rh(30, 50) == pytest.approx(0.013314, rel=1e-3)
assert humid_ratio_from_db_rh(30, 100) == pytest.approx(0.02721, rel=1e-3)
assert humid_ratio_from_db_rh(20, 0) == pytest.approx(0, rel=1e-3)
assert humid_ratio_from_db_rh(20, 50) == pytest.approx(0.00726, rel=1e-3)
assert humid_ratio_from_db_rh(20, 100) == pytest.approx(0.014698, rel=1e-3)
assert humid_ratio_from_db_rh(-20, 0) == pytest.approx(0, rel=1e-3)
assert humid_ratio_from_db_rh(-20, 50) == pytest.approx(0.0003173, rel=1e-3)
assert humid_ratio_from_db_rh(-20, 100) == pytest.approx(0.00063508, rel=1e-3)
def test_enthalpy_from_db_hr():
"""Test the accuracy of the enthalpy_from_db_hr function."""
assert enthalpy_from_db_hr(30, 0) == pytest.approx(30.18, rel=1e-3)
assert enthalpy_from_db_hr(30, 0.0133) == pytest.approx(64.18544, rel=1e-3)
assert enthalpy_from_db_hr(30, 0.02721) == pytest.approx(99.750528, rel=1e-3)
assert enthalpy_from_db_hr(20, 0) == pytest.approx(20.12, rel=1e-3)
assert enthalpy_from_db_hr(20, 0.00726) == pytest.approx(38.547332, rel=1e-3)
assert enthalpy_from_db_hr(20, 0.01469) == pytest.approx(57.406158, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0) == pytest.approx(0, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0.00031738) == pytest.approx(0, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0.000635) == pytest.approx(0, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0, -273.15) == pytest.approx(254.66889, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0.00031738, -273.15) == pytest.approx(255.6121, rel=1e-3)
assert enthalpy_from_db_hr(-20, 0.000635, -273.15) == pytest.approx(256.556, rel=1e-3)
def test_dew_point_from_db_rh():
"""Test the accuracy of the dew_point_from_db_rh function."""
assert dew_point_from_db_rh(30, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh(30, 50) == pytest.approx(18.4466, rel=1e-3)
assert dew_point_from_db_rh(30, 100) == pytest.approx(30, rel=1e-3)
assert dew_point_from_db_rh(20, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh(20, 50) == pytest.approx(9.27239, rel=1e-3)
assert dew_point_from_db_rh(20, 100) == pytest.approx(20, rel=1e-3)
assert dew_point_from_db_rh(-20, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh(-20, 50) == pytest.approx(-27.0217, rel=1e-3)
assert dew_point_from_db_rh(-20, 100) == pytest.approx(-20, rel=1e-3)
assert dew_point_from_db_rh(180, 10) == pytest.approx(99.6844, rel=1e-3)
assert dew_point_from_db_rh(180, 50) == pytest.approx(151.9373, rel=1e-3)
assert dew_point_from_db_rh(180, 100) == pytest.approx(180, rel=1e-3)
assert dew_point_from_db_rh(-80, 10) == pytest.approx(-93.065214, rel=1e-3)
assert dew_point_from_db_rh(-80, 50) == pytest.approx(-84.125, rel=1e-3)
assert dew_point_from_db_rh(-80, 100) == pytest.approx(-80, rel=1e-3)
def test_wet_bulb_from_db_rh():
"""Test the accuracy of the wet_bulb_from_db_rh function."""
assert wet_bulb_from_db_rh(30, 0) == pytest.approx(10.49804, rel=1e-3)
assert wet_bulb_from_db_rh(30, 50) == pytest.approx(22.011934, rel=1e-3)
assert wet_bulb_from_db_rh(30, 100) == pytest.approx(30.0, rel=1e-3)
assert wet_bulb_from_db_rh(20, 0) == pytest.approx(5.865, rel=1e-3)
assert wet_bulb_from_db_rh(20, 50) == pytest.approx(13.7562, rel=1e-3)
assert wet_bulb_from_db_rh(20, 100) == pytest.approx(20, rel=1e-3)
assert wet_bulb_from_db_rh(-20, 0) == pytest.approx(-21.5142, rel=1e-3)
assert wet_bulb_from_db_rh(-20, 50) == pytest.approx(-20.7405, rel=1e-3)
assert wet_bulb_from_db_rh(-20, 100) == pytest.approx(-20, rel=1e-3)
def test_wet_bulb_from_db_hr():
"""Test the accuracy of the wet_bulb_from_db_hr function."""
assert wet_bulb_from_db_hr(30, 0.01) == pytest.approx(19.622532, rel=1e-3)
assert wet_bulb_from_db_hr(20, 0.005) == pytest.approx(11.54350508, rel=1e-3)
def test_rel_humid_from_db_hr():
"""Test the accuracy of the rel_humid_from_db_hr function."""
assert rel_humid_from_db_hr(30, 0) == pytest.approx(0, rel=1e-2)
assert rel_humid_from_db_hr(30, 0.0133) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_hr(30, 0.02721) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_hr(20, 0) == pytest.approx(0, rel=1e-2)
assert rel_humid_from_db_hr(20, 0.00726) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_hr(20, 0.01469) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_hr(-20, 0) == pytest.approx(0, rel=1e-2)
assert rel_humid_from_db_hr(-20, 0.00031738) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_hr(-20, 0.000635) == pytest.approx(100, rel=1e-2)
def test_rel_humid_from_db_enth():
"""Test the accuracy of the rel_humid_from_db_enth function."""
assert rel_humid_from_db_enth(30, 30.18) == pytest.approx(0, rel=1e-2)
assert rel_humid_from_db_enth(30, 64.18544) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_enth(30, 99.750528) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_enth(20, 20.12) == pytest.approx(0, rel=1e-2)
assert rel_humid_from_db_enth(20, 38.547332) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_enth(20, 57.406158) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_enth(-20, 0) > 100
assert rel_humid_from_db_enth(-20, 255.6121, reference_temp=-273.15) == pytest.approx(50., rel=1e-2)
assert rel_humid_from_db_enth(-20, 256.556, reference_temp=-273.15) == pytest.approx(100., rel=1e-2)
def test_rel_humid_from_db_dpt():
"""Test the accuracy of the rel_humid_from_db_dpt function."""
assert rel_humid_from_db_dpt(30, 18.45805) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_dpt(30, 30) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_dpt(20, 9.270086) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_dpt(20, 20) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_dpt(-20, -27.0215503) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_dpt(-20, -20) == pytest.approx(100, rel=1e-2)
def test_rel_humid_from_db_wb():
"""Test the accuracy of the rel_humid_from_db_wb function."""
assert rel_humid_from_db_wb(30, 10.4980) < 1
assert rel_humid_from_db_wb(30, 22.01193) == pytest.approx(50, rel=1e-2)
assert rel_humid_from_db_wb(30, 30.0) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_wb(20, 5.8649597) < 1
assert rel_humid_from_db_wb(20, 13.756197) == pytest.approx(50, rel=1e-1)
assert rel_humid_from_db_wb(20, 20) == pytest.approx(100, rel=1e-2)
assert rel_humid_from_db_wb(-20, -21.51420288086) < 1
assert 45 < rel_humid_from_db_wb(-20, -20.74057642) < 55
assert rel_humid_from_db_wb(-20, -20) == pytest.approx(100, rel=1e-2)
def test_dew_point_from_db_hr():
"""Test the accuracy of the dew_point_from_db_hr function."""
assert dew_point_from_db_hr(30, 0.015) == pytest.approx(20.330675, rel=1e-3)
assert dew_point_from_db_hr(20, 0.01) == pytest.approx(14.0418, rel=1e-3)
assert dew_point_from_db_hr(-20, 0.0003) == pytest.approx(-27.5661, rel=1e-3)
def test_dew_point_from_db_enth():
"""Test the accuracy of the dew_point_from_db_enth function."""
assert dew_point_from_db_enth(30, 64.18544) == pytest.approx(18.43351, rel=1e-2)
assert dew_point_from_db_enth(20, 38.547332) == pytest.approx(9.2678, rel=1e-2)
assert dew_point_from_db_enth(-20, 0) == -20
assert dew_point_from_db_enth(-20, 255.6121, reference_temp=-273.15) == pytest.approx(-27.01307, rel=1e-2)
def test_dew_point_from_db_wb():
"""Test the accuracy of the dew_point_from_db_wb function."""
assert dew_point_from_db_wb(30, 22.144) == pytest.approx(18.593726, rel=1e-3)
assert dew_point_from_db_wb(20, 13.88) == pytest.approx(9.35052249, rel=1e-3)
assert dew_point_from_db_wb(-20, -20.84) == pytest.approx(-29.78065, rel=1e-3)
def test_db_temp_from_enth_hr():
"""Test the accuracy of the db_temp_from_enth_hr function."""
assert db_temp_from_enth_hr(60, 0.015) == pytest.approx(21.74775, rel=1e-3)
assert db_temp_from_enth_hr(60, 0.01) == pytest.approx(34.1499, rel=1e-3)
assert db_temp_from_enth_hr(30, 0.005) == pytest.approx(17.23136, rel=1e-3)
def test_db_temp_from_rh_hr():
"""Test the accuracy of the db_temp_from_rh_hr function."""
assert db_temp_from_rh_hr(100, 0.3) == pytest.approx(71.365, rel=1e-3)
def test_db_temp_and_hr_from_wb_rh():
"""Test the accuracy of the db_temp_and_hr_from_wb_rh function."""
t, hr = db_temp_and_hr_from_wb_rh(20, 100)
assert t == pytest.approx(20.0, rel=1e-3)
assert hr == pytest.approx(0.01469, rel=1e-3)
t, hr = db_temp_and_hr_from_wb_rh(20, 0)
assert t == pytest.approx(53.04558, rel=1e-3)
assert hr == pytest.approx(0.0, rel=1e-3)
def test_dew_point_from_db_rh_fast():
"""Test the accuracy of the dew_point_from_db_rh_fast function."""
assert dew_point_from_db_rh_fast(30, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh_fast(30, 50) == pytest.approx(18.45805, rel=1e-3)
assert dew_point_from_db_rh_fast(30, 100) == pytest.approx(30, rel=1e-3)
assert dew_point_from_db_rh_fast(20, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh_fast(20, 50) == pytest.approx(9.270086, rel=1e-3)
assert dew_point_from_db_rh_fast(20, 100) == pytest.approx(20, rel=1e-3)
assert dew_point_from_db_rh_fast(-20, 0) == pytest.approx(-273.15, rel=1e-3)
assert dew_point_from_db_rh_fast(-20, 50) == pytest.approx(-27.76753, rel=1e-3)
assert dew_point_from_db_rh_fast(-20, 100) == pytest.approx(-20, rel=1e-3)
def test_wet_bulb_from_db_rh_fast():
"""Test the accuracy of the wet_bulb_from_db_rh_fast function."""
assert wet_bulb_from_db_rh_fast(30, 0) == pytest.approx(10.871, rel=1e-3)
assert wet_bulb_from_db_rh_fast(30, 50) == pytest.approx(22.144, rel=1e-3)
assert wet_bulb_from_db_rh_fast(30, 100) == pytest.approx(29.0, rel=1e-3)
assert wet_bulb_from_db_rh_fast(20, 0) == pytest.approx(6.07, rel=1e-3)
assert wet_bulb_from_db_rh_fast(20, 50) == pytest.approx(13.88, rel=1e-3)
assert wet_bulb_from_db_rh_fast(20, 100) == pytest.approx(20, rel=1e-3)
assert wet_bulb_from_db_rh_fast(-20, 0) == pytest.approx(-21.69, rel=1e-3)
assert wet_bulb_from_db_rh_fast(-20, 50) == pytest.approx(-20.84, rel=1e-3)
assert wet_bulb_from_db_rh_fast(-20, 100) == pytest.approx(-20, rel=1e-3)
| gpl-3.0 | 5,913,900,713,481,346,000 | 55.877551 | 110 | 0.658055 | false |
DCSaunders/tensorflow | tensorflow/contrib/layers/python/layers/regularizers.py | 21 | 6968 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regularizers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = ['l1_regularizer',
'l2_regularizer',
'l1_l2_regularizer',
'sum_regularizer',
'apply_regularization']
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
def l2_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=name)
return l2
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)
def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional scope name
Returns:
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors, name=name)
return sum_reg
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty',
values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
penalties = [
p if p is not None else constant_op.constant(0.0) for p in penalties
]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
| apache-2.0 | 3,572,463,907,771,781,600 | 34.55102 | 80 | 0.671929 | false |
linlife/Python | lin_jumper/author_agent/paramiko-1.10.1/paramiko/util.py | 8 | 9621 | # Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Useful functions used by the rest of paramiko.
"""
from __future__ import generators
import array
from binascii import hexlify, unhexlify
import errno
import sys
import struct
import traceback
import threading
from paramiko.common import *
from paramiko.config import SSHConfig
# Change by RogerB - python < 2.3 doesn't have enumerate so we implement it
if sys.version_info < (2,3):
class enumerate:
def __init__ (self, sequence):
self.sequence = sequence
def __iter__ (self):
count = 0
for item in self.sequence:
yield (count, item)
count += 1
def inflate_long(s, always_positive=False):
"turns a normalized byte string into a long-int (adapted from Crypto.Util.number)"
out = 0L
negative = 0
if not always_positive and (len(s) > 0) and (ord(s[0]) >= 0x80):
negative = 1
if len(s) % 4:
filler = '\x00'
if negative:
filler = '\xff'
s = filler * (4 - len(s) % 4) + s
for i in range(0, len(s), 4):
out = (out << 32) + struct.unpack('>I', s[i:i+4])[0]
if negative:
out -= (1L << (8 * len(s)))
return out
def deflate_long(n, add_sign_padding=True):
"turns a long-int into a normalized byte string (adapted from Crypto.Util.number)"
# after much testing, this algorithm was deemed to be the fastest
s = ''
n = long(n)
while (n != 0) and (n != -1):
s = struct.pack('>I', n & 0xffffffffL) + s
n = n >> 32
# strip off leading zeros, FFs
for i in enumerate(s):
if (n == 0) and (i[1] != '\000'):
break
if (n == -1) and (i[1] != '\xff'):
break
else:
# degenerate case, n was either 0 or -1
i = (0,)
if n == 0:
s = '\000'
else:
s = '\xff'
s = s[i[0]:]
if add_sign_padding:
if (n == 0) and (ord(s[0]) >= 0x80):
s = '\x00' + s
if (n == -1) and (ord(s[0]) < 0x80):
s = '\xff' + s
return s
def format_binary_weird(data):
out = ''
for i in enumerate(data):
out += '%02X' % ord(i[1])
if i[0] % 2:
out += ' '
if i[0] % 16 == 15:
out += '\n'
return out
def format_binary(data, prefix=''):
x = 0
out = []
while len(data) > x + 16:
out.append(format_binary_line(data[x:x+16]))
x += 16
if x < len(data):
out.append(format_binary_line(data[x:]))
return [prefix + x for x in out]
def format_binary_line(data):
left = ' '.join(['%02X' % ord(c) for c in data])
right = ''.join([('.%c..' % c)[(ord(c)+63)//95] for c in data])
return '%-50s %s' % (left, right)
def hexify(s):
return hexlify(s).upper()
def unhexify(s):
return unhexlify(s)
def safe_string(s):
out = ''
for c in s:
if (ord(c) >= 32) and (ord(c) <= 127):
out += c
else:
out += '%%%02X' % ord(c)
return out
# ''.join([['%%%02X' % ord(c), c][(ord(c) >= 32) and (ord(c) <= 127)] for c in s])
def bit_length(n):
norm = deflate_long(n, 0)
hbyte = ord(norm[0])
if hbyte == 0:
return 1
bitlen = len(norm) * 8
while not (hbyte & 0x80):
hbyte <<= 1
bitlen -= 1
return bitlen
def tb_strings():
return ''.join(traceback.format_exception(*sys.exc_info())).split('\n')
def generate_key_bytes(hashclass, salt, key, nbytes):
"""
Given a password, passphrase, or other human-source key, scramble it
through a secure hash into some keyworthy bytes. This specific algorithm
is used for encrypting/decrypting private key files.
@param hashclass: class from L{Crypto.Hash} that can be used as a secure
hashing function (like C{MD5} or C{SHA}).
@type hashclass: L{Crypto.Hash}
@param salt: data to salt the hash with.
@type salt: string
@param key: human-entered password or passphrase.
@type key: string
@param nbytes: number of bytes to generate.
@type nbytes: int
@return: key data
@rtype: string
"""
keydata = ''
digest = ''
if len(salt) > 8:
salt = salt[:8]
while nbytes > 0:
hash_obj = hashclass.new()
if len(digest) > 0:
hash_obj.update(digest)
hash_obj.update(key)
hash_obj.update(salt)
digest = hash_obj.digest()
size = min(nbytes, len(digest))
keydata += digest[:size]
nbytes -= size
return keydata
def load_host_keys(filename):
"""
Read a file of known SSH host keys, in the format used by openssh, and
return a compound dict of C{hostname -> keytype ->} L{PKey <paramiko.pkey.PKey>}.
The hostname may be an IP address or DNS name. The keytype will be either
C{"ssh-rsa"} or C{"ssh-dss"}.
This type of file unfortunately doesn't exist on Windows, but on posix,
it will usually be stored in C{os.path.expanduser("~/.ssh/known_hosts")}.
Since 1.5.3, this is just a wrapper around L{HostKeys}.
@param filename: name of the file to read host keys from
@type filename: str
@return: dict of host keys, indexed by hostname and then keytype
@rtype: dict(hostname, dict(keytype, L{PKey <paramiko.pkey.PKey>}))
"""
from paramiko.hostkeys import HostKeys
return HostKeys(filename)
def parse_ssh_config(file_obj):
"""
Provided only as a backward-compatible wrapper around L{SSHConfig}.
"""
config = SSHConfig()
config.parse(file_obj)
return config
def lookup_ssh_host_config(hostname, config):
"""
Provided only as a backward-compatible wrapper around L{SSHConfig}.
"""
return config.lookup(hostname)
def mod_inverse(x, m):
# it's crazy how small python can make this function.
u1, u2, u3 = 1, 0, m
v1, v2, v3 = 0, 1, x
while v3 > 0:
q = u3 // v3
u1, v1 = v1, u1 - v1 * q
u2, v2 = v2, u2 - v2 * q
u3, v3 = v3, u3 - v3 * q
if u2 < 0:
u2 += m
return u2
_g_thread_ids = {}
_g_thread_counter = 0
_g_thread_lock = threading.Lock()
def get_thread_id():
global _g_thread_ids, _g_thread_counter, _g_thread_lock
tid = id(threading.currentThread())
try:
return _g_thread_ids[tid]
except KeyError:
_g_thread_lock.acquire()
try:
_g_thread_counter += 1
ret = _g_thread_ids[tid] = _g_thread_counter
finally:
_g_thread_lock.release()
return ret
def log_to_file(filename, level=DEBUG):
"send paramiko logs to a logfile, if they're not already going somewhere"
l = logging.getLogger("paramiko")
if len(l.handlers) > 0:
return
l.setLevel(level)
f = open(filename, 'w')
lh = logging.StreamHandler(f)
lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s.%(msecs)03d] thr=%(_threadid)-3d %(name)s: %(message)s',
'%Y%m%d-%H:%M:%S'))
l.addHandler(lh)
# make only one filter object, so it doesn't get applied more than once
class PFilter (object):
def filter(self, record):
record._threadid = get_thread_id()
return True
_pfilter = PFilter()
def get_logger(name):
l = logging.getLogger(name)
l.addFilter(_pfilter)
return l
def retry_on_signal(function):
"""Retries function until it doesn't raise an EINTR error"""
while True:
try:
return function()
except EnvironmentError, e:
if e.errno != errno.EINTR:
raise
class Counter (object):
"""Stateful counter for CTR mode crypto"""
def __init__(self, nbits, initial_value=1L, overflow=0L):
self.blocksize = nbits / 8
self.overflow = overflow
# start with value - 1 so we don't have to store intermediate values when counting
# could the iv be 0?
if initial_value == 0:
self.value = array.array('c', '\xFF' * self.blocksize)
else:
x = deflate_long(initial_value - 1, add_sign_padding=False)
self.value = array.array('c', '\x00' * (self.blocksize - len(x)) + x)
def __call__(self):
"""Increament the counter and return the new value"""
i = self.blocksize - 1
while i > -1:
c = self.value[i] = chr((ord(self.value[i]) + 1) % 256)
if c != '\x00':
return self.value.tostring()
i -= 1
# counter reset
x = deflate_long(self.overflow, add_sign_padding=False)
self.value = array.array('c', '\x00' * (self.blocksize - len(x)) + x)
return self.value.tostring()
def new(cls, nbits, initial_value=1L, overflow=0L):
return cls(nbits, initial_value=initial_value, overflow=overflow)
new = classmethod(new)
| apache-2.0 | -2,917,215,853,014,974,000 | 29.935691 | 125 | 0.583515 | false |
ibrahimgunduz34/SpiffWorkflow | tests/SpiffWorkflow/storage/DictionarySerializerTest.py | 3 | 3771 | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from __future__ import division
import sys, unittest, re, os
dirname = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(dirname, '..', '..', '..'))
from SpiffWorkflow.storage import DictionarySerializer
from .SerializerTest import SerializerTest, SerializeEveryPatternTest
from SpiffWorkflow import Workflow
import uuid
class DictionarySerializerTest(SerializerTest):
CORRELATE = DictionarySerializer
def setUp(self):
SerializerTest.setUp(self)
self.serializer = DictionarySerializer()
self.serial_type = dict
def compareSerialization(self, item1, item2, exclude_dynamic=False, exclude_items=[]):
if exclude_dynamic:
if 'last_state_change' not in exclude_items:
exclude_items.append('last_state_change')
if 'last_task' not in exclude_items:
exclude_items.append('last_task')
if uuid.UUID not in exclude_items:
exclude_items.append(uuid.UUID)
if isinstance(item1, dict):
if not isinstance(item2, dict):
raise Exception(": companion item is not a dict (is a " + str(type(item2)) + "): " + str(item1) + " v " + str(item2))
for key, value in item1.items():
if key not in item2:
raise Exception("Missing Key: " + key + " (in 1, not 2)")
if key in exclude_items:
continue
try:
self.compareSerialization(value, item2[key], exclude_dynamic=exclude_dynamic, exclude_items=exclude_items)
except Exception as e:
raise Exception(key + '/' + str(e))
for key, _ in item2.items():
if key not in item1:
raise Exception("Missing Key: " + key + " (in 2, not 1)")
elif isinstance(item1, list):
if not isinstance(item2, list):
raise Exception(": companion item is not a list (is a " + str(type(item2)) + ")")
if not len(item1) == len(item2):
raise Exception(": companion list is not the same length: " + str(len(item1)) + " v " + str(len(item2)))
for i, listitem in enumerate(item1):
try:
self.compareSerialization(listitem, item2[i], exclude_dynamic=exclude_dynamic, exclude_items=exclude_items)
except Exception as e:
raise Exception('[' + str(i) + ']/' + str(e))
elif isinstance(item1, Workflow):
raise Exception("Item is a Workflow")
else:
if type(item1) != type(item2):
raise Exception(": companion item is not the same type (is a " + str(type(item2)) + "): " + str(item1) + " v " + str(item2))
if type(item1) in exclude_items:
return
if item1 != item2:
raise Exception("Unequal: " + repr(item1) \
+ " vs " + repr(item2))
def testConstructor(self):
DictionarySerializer()
class DictionarySerializeEveryPatternTest(SerializeEveryPatternTest):
def setUp(self):
super(DictionarySerializeEveryPatternTest, self).setUp()
self.serializerTestClass = DictionarySerializerTest(methodName='testConstructor')
self.serializerTestClass.setUp()
def suite():
tests = unittest.defaultTestLoader.loadTestsFromTestCase(DictionarySerializerTest)
tests.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(DictionarySerializeEveryPatternTest))
return tests
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| lgpl-3.0 | 1,582,650,164,624,637,000 | 40.9 | 140 | 0.592946 | false |
nexiles/odoo | addons/website_blog/controllers/main.py | 16 | 16374 | # -*- coding: utf-8 -*-
import datetime
import werkzeug
from openerp import tools
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
class QueryURL(object):
def __init__(self, path='', path_args=None, **args):
self.path = path
self.args = args
self.path_args = set(path_args or [])
def __call__(self, path=None, path_args=None, **kw):
path = path or self.path
for k, v in self.args.items():
kw.setdefault(k, v)
path_args = set(path_args or []).union(self.path_args)
paths, fragments = [], []
for key, value in kw.items():
if value and key in path_args:
if isinstance(value, browse_record):
paths.append((key, slug(value)))
else:
paths.append((key, value))
elif value:
if isinstance(value, list) or isinstance(value, set):
fragments.append(werkzeug.url_encode([(key, item) for item in value]))
else:
fragments.append(werkzeug.url_encode([(key, value)]))
for key, value in paths:
path += '/' + key + '/%s' % value
if fragments:
path += '?' + '&'.join(fragments)
return path
class WebsiteBlog(http.Controller):
_blog_post_per_page = 20
_post_comment_per_page = 10
def nav_list(self):
blog_post_obj = request.registry['blog.post']
groups = blog_post_obj.read_group(
request.cr, request.uid, [], ['name', 'create_date'],
groupby="create_date", orderby="create_date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route([
'/blog',
'/blog/page/<int:page>',
], type='http', auth="public", website=True)
def blogs(self, page=1, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_obj = request.registry['blog.post']
total = blog_obj.search(cr, uid, [], count=True, context=context)
pager = request.website.pager(
url='/blog',
total=total,
page=page,
step=self._blog_post_per_page,
)
post_ids = blog_obj.search(cr, uid, [], offset=(page-1)*self._blog_post_per_page, limit=self._blog_post_per_page, context=context)
posts = blog_obj.browse(cr, uid, post_ids, context=context)
blog_url = QueryURL('', ['blog', 'tag'])
return request.website.render("website_blog.latest_blogs", {
'posts': posts,
'pager': pager,
'blog_url': blog_url,
})
@http.route([
'/blog/<model("blog.blog"):blog>',
'/blog/<model("blog.blog"):blog>/page/<int:page>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>',
'/blog/<model("blog.blog"):blog>/tag/<model("blog.tag"):tag>/page/<int:page>',
], type='http', auth="public", website=True)
def blog(self, blog=None, tag=None, page=1, **opt):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog': current blog
- 'blogs': all blogs for navigation
- 'pager': pager of posts
- 'tag': current tag
- 'tags': all tags, for navigation
- 'nav_list': a dict [year][month] for archives navigation
- 'date': date_begin optional parameter, used in archives navigation
- 'blog_url': help object to create URLs
"""
date_begin, date_end = opt.get('date_begin'), opt.get('date_end')
cr, uid, context = request.cr, request.uid, request.context
blog_post_obj = request.registry['blog.post']
blog_obj = request.registry['blog.blog']
blog_ids = blog_obj.search(cr, uid, [], order="create_date asc", context=context)
blogs = blog_obj.browse(cr, uid, blog_ids, context=context)
domain = []
if blog:
domain += [('blog_id', '=', blog.id)]
if tag:
domain += [('tag_ids', 'in', tag.id)]
if date_begin and date_end:
domain += [("create_date", ">=", date_begin), ("create_date", "<=", date_end)]
blog_url = QueryURL('', ['blog', 'tag'], blog=blog, tag=tag, date_begin=date_begin, date_end=date_end)
post_url = QueryURL('', ['blogpost'], tag_id=tag and tag.id or None, date_begin=date_begin, date_end=date_end)
blog_post_ids = blog_post_obj.search(cr, uid, domain, order="create_date desc", context=context)
blog_posts = blog_post_obj.browse(cr, uid, blog_post_ids, context=context)
pager = request.website.pager(
url=blog_url(),
total=len(blog_posts),
page=page,
step=self._blog_post_per_page,
)
pager_begin = (page - 1) * self._blog_post_per_page
pager_end = page * self._blog_post_per_page
blog_posts = blog_posts[pager_begin:pager_end]
tags = blog.all_tags()[blog.id]
values = {
'blog': blog,
'blogs': blogs,
'tags': tags,
'tag': tag,
'blog_posts': blog_posts,
'pager': pager,
'nav_list': self.nav_list(),
'blog_url': blog_url,
'post_url': post_url,
'date': date_begin,
}
response = request.website.render("website_blog.blog_post_short", values)
return response
@http.route([
'''/blog/<model("blog.blog"):blog>/post/<model("blog.post", "[('blog_id','=',blog[0])]"):blog_post>''',
], type='http', auth="public", website=True)
def blog_post(self, blog, blog_post, tag_id=None, page=1, enable_editor=None, **post):
""" Prepare all values to display the blog.
:return dict values: values for the templates, containing
- 'blog_post': browse of the current post
- 'blog': browse of the current blog
- 'blogs': list of browse records of blogs
- 'tag': current tag, if tag_id in parameters
- 'tags': all tags, for tag-based navigation
- 'pager': a pager on the comments
- 'nav_list': a dict [year][month] for archives navigation
- 'next_post': next blog post, to direct the user towards the next interesting post
"""
cr, uid, context = request.cr, request.uid, request.context
tag_obj = request.registry['blog.tag']
blog_post_obj = request.registry['blog.post']
date_begin, date_end = post.get('date_begin'), post.get('date_end')
pager_url = "/blogpost/%s" % blog_post.id
pager = request.website.pager(
url=pager_url,
total=len(blog_post.website_message_ids),
page=page,
step=self._post_comment_per_page,
scope=7
)
pager_begin = (page - 1) * self._post_comment_per_page
pager_end = page * self._post_comment_per_page
comments = blog_post.website_message_ids[pager_begin:pager_end]
tag = None
if tag_id:
tag = request.registry['blog.tag'].browse(request.cr, request.uid, int(tag_id), context=request.context)
post_url = QueryURL('', ['blogpost'], blogpost=blog_post, tag_id=tag_id, date_begin=date_begin, date_end=date_end)
blog_url = QueryURL('', ['blog', 'tag'], blog=blog_post.blog_id, tag=tag, date_begin=date_begin, date_end=date_end)
if not blog_post.blog_id.id == blog.id:
return request.redirect("/blog/%s/post/%s" % (slug(blog_post.blog_id), slug(blog_post)))
tags = tag_obj.browse(cr, uid, tag_obj.search(cr, uid, [], context=context), context=context)
# Find next Post
visited_blogs = request.httprequest.cookies.get('visited_blogs') or ''
visited_ids = filter(None, visited_blogs.split(','))
visited_ids = map(lambda x: int(x), visited_ids)
if blog_post.id not in visited_ids:
visited_ids.append(blog_post.id)
next_post_id = blog_post_obj.search(cr, uid, [
('id', 'not in', visited_ids),
], order='ranking desc', limit=1, context=context)
if not next_post_id:
next_post_id = blog_post_obj.search(cr, uid, [('id', '!=', blog.id)], order='ranking desc', limit=1, context=context)
next_post = next_post_id and blog_post_obj.browse(cr, uid, next_post_id[0], context=context) or False
values = {
'tags': tags,
'tag': tag,
'blog': blog,
'blog_post': blog_post,
'main_object': blog_post,
'nav_list': self.nav_list(),
'enable_editor': enable_editor,
'next_post': next_post,
'date': date_begin,
'post_url': post_url,
'blog_url': blog_url,
'pager': pager,
'comments': comments,
}
response = request.website.render("website_blog.blog_post_complete", values)
response.set_cookie('visited_blogs', ','.join(map(str, visited_ids)))
request.session[request.session_id] = request.session.get(request.session_id, [])
if not (blog_post.id in request.session[request.session_id]):
request.session[request.session_id].append(blog_post.id)
# Increase counter
blog_post_obj.write(cr, SUPERUSER_ID, [blog_post.id], {
'visits': blog_post.visits+1,
},context=context)
return response
def _blog_post_message(self, user, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
blog_post = request.registry['blog.post']
partner_obj = request.registry['res.partner']
if uid != request.website.user_id.id:
partner_ids = [user.partner_id.id]
else:
partner_ids = blog_post._find_partner_from_emails(
cr, SUPERUSER_ID, 0, [post.get('email')], context=context)
if not partner_ids or not partner_ids[0]:
partner_ids = [partner_obj.create(cr, SUPERUSER_ID, {'name': post.get('name'), 'email': post.get('email')}, context=context)]
message_id = blog_post.message_post(
cr, SUPERUSER_ID, int(blog_post_id),
body=post.get('comment'),
type='comment',
subtype='mt_comment',
author_id=partner_ids[0],
path=post.get('path', False),
context=context)
return message_id
@http.route(['/blogpost/comment'], type='http', auth="public", methods=['POST'], website=True)
def blog_post_comment(self, blog_post_id=0, **post):
cr, uid, context = request.cr, request.uid, request.context
if post.get('comment'):
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
blog_post = request.registry['blog.post']
blog_post.check_access_rights(cr, uid, 'read')
self._blog_post_message(user, blog_post_id, **post)
return werkzeug.utils.redirect(request.httprequest.referrer + "#comments")
def _get_discussion_detail(self, ids, publish=False, **post):
cr, uid, context = request.cr, request.uid, request.context
values = []
mail_obj = request.registry.get('mail.message')
for message in mail_obj.browse(cr, SUPERUSER_ID, ids, context=context):
values.append({
"id": message.id,
"author_name": message.author_id.name,
"author_image": message.author_id.image and \
("data:image/png;base64,%s" % message.author_id.image) or \
'/website_blog/static/src/img/anonymous.png',
"date": message.date,
'body': html2plaintext(message.body),
'website_published' : message.website_published,
'publish' : publish,
})
return values
@http.route(['/blogpost/post_discussion'], type='json', auth="public", website=True)
def post_discussion(self, blog_post_id, **post):
cr, uid, context = request.cr, request.uid, request.context
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
user = request.registry['res.users'].browse(cr, uid, uid, context=context)
id = self._blog_post_message(user, blog_post_id, **post)
return self._get_discussion_detail([id], publish, **post)
@http.route('/blogpost/new', type='http', auth="public", website=True)
def blog_post_create(self, blog_id, **post):
cr, uid, context = request.cr, request.uid, request.context
new_blog_post_id = request.registry['blog.post'].create(cr, uid, {
'blog_id': blog_id,
'name': _("Blog Post Title"),
'subtitle': _("Subtitle"),
'content': '',
'website_published': False,
}, context=context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, new_blog_post_id, context=context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(new_blog_post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/duplicate', type='http', auth="public", website=True)
def blog_post_copy(self, blog_post_id, **post):
""" Duplicate a blog.
:param blog_post_id: id of the blog post currently browsed.
:return redirect to the new blog created
"""
cr, uid, context = request.cr, request.uid, request.context
create_context = dict(context, mail_create_nosubscribe=True)
nid = request.registry['blog.post'].copy(cr, uid, blog_post_id, {}, context=create_context)
new_blog_post = request.registry['blog.post'].browse(cr, uid, nid, context=context)
post = request.registry['blog.post'].browse(cr, uid, nid, context)
return werkzeug.utils.redirect("/blog/%s/post/%s?enable_editor=1" % (slug(post.blog_id), slug(new_blog_post)))
@http.route('/blogpost/get_discussion/', type='json', auth="public", website=True)
def discussion(self, post_id=0, path=None, count=False, **post):
cr, uid, context = request.cr, request.uid, request.context
mail_obj = request.registry.get('mail.message')
domain = [('res_id', '=', int(post_id)), ('model', '=', 'blog.post'), ('path', '=', path)]
#check current user belongs to website publisher group
publish = request.registry['res.users'].has_group(cr, uid, 'base.group_website_publisher')
if not publish:
domain.append(('website_published', '=', True))
ids = mail_obj.search(cr, SUPERUSER_ID, domain, count=count)
if count:
return ids
return self._get_discussion_detail(ids, publish, **post)
@http.route('/blogpost/get_discussions/', type='json', auth="public", website=True)
def discussions(self, post_id=0, paths=None, count=False, **post):
ret = []
for path in paths:
result = self.discussion(post_id=post_id, path=path, count=count, **post)
ret.append({"path": path, "val": result})
return ret
@http.route('/blogpost/change_background', type='json', auth="public", website=True)
def change_bg(self, post_id=0, image=None, **post):
if not post_id:
return False
return request.registry['blog.post'].write(request.cr, request.uid, [int(post_id)], {'background_image': image}, request.context)
@http.route('/blog/get_user/', type='json', auth="public", website=True)
def get_user(self, **post):
return [False if request.session.uid else True]
| agpl-3.0 | -4,335,421,845,214,459,000 | 44.73743 | 141 | 0.583669 | false |
haojunyu/numpy | numpy/polynomial/laguerre.py | 75 | 55335 | """
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
- `lagweight` -- Laguerre weight function.
- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd',
'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder',
'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander',
'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d',
'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion',
'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol):
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c):
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0, 1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl):
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0:
return np.array([off + scl, -scl])
else:
return np.array([off])
def lagfromroots(roots):
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(c, pow, maxpower=16):
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = lagmul(prd, c)
return prd
def lagder(c, m=1, scl=1, axis=0):
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
return c
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
return c
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
c = lagval(z, c, tensor=False)
return c
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c
def lagvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def lagvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
vz = lagvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = lagvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[1 + c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1, n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:, -1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
m = lagcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]`
with the weight function :math:`f(x) = \exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
class Laguerre(ABCPolyBase):
"""A Laguerre series class.
The Laguerre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Laguerre coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [0, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [0, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(lagadd)
_sub = staticmethod(lagsub)
_mul = staticmethod(lagmul)
_div = staticmethod(lagdiv)
_pow = staticmethod(lagpow)
_val = staticmethod(lagval)
_int = staticmethod(lagint)
_der = staticmethod(lagder)
_fit = staticmethod(lagfit)
_line = staticmethod(lagline)
_roots = staticmethod(lagroots)
_fromroots = staticmethod(lagfromroots)
# Virtual properties
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
| bsd-3-clause | -970,609,223,875,002,500 | 30.087079 | 79 | 0.592193 | false |
OpenAcademy-OpenStack/nova-scheduler | nova/virt/hyperv/vmutils.py | 1 | 24595 | # Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = dict((v, k) for k, v in
self._vm_power_states_map.iteritems())
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
vm_names = [v.ElementName for v in
self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vm_names
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
enabled_state = self._enabled_states_map[si.EnabledState]
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug(_('Creating VM %s'), vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug(_('Setting memory for vm %s'), vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug(_('Set vCPUs for vm %s'), vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
(job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
return self._lookup_vm_check(vm_name)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks_count(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return len(volumes)
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
"""Create an IDE drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
if drive_type == constants.IDE_DISK:
res_sub_type = self._DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._DVD_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
#Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
#Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.IDE_DISK:
res_sub_type = self._IDE_DISK_RES_SUB_TYPE
elif drive_type == constants.IDE_DVD:
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
#Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
#Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
#Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
#Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
#Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
#Invalid state for current operation (32775) typically means that
#the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug(_("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds
if r.ResourceSubType in
[self._IDE_DISK_RES_SUB_TYPE,
self._IDE_DVD_RES_SUB_TYPE]]
volume_resources = [r for r in rasds
if r.ResourceSubType ==
self._PHYS_DISK_RES_SUB_TYPE]
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
#Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"),
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _clone_wmi_obj(self, wmi_class, wmi_obj):
"""Clone a WMI object."""
cl = getattr(self._conn, wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
if prop == "VirtualSystemIdentifiers":
strguid = []
strguid.append(str(uuid.uuid4()))
newinst.Properties_.Item(prop).Value = strguid
else:
prop_value = wmi_obj.Properties_.Item(prop).Value
newinst.Properties_.Item(prop).Value = prop_value
return newinst
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
| apache-2.0 | 4,676,667,965,965,371,000 | 41.25945 | 79 | 0.568937 | false |
winhamwr/selenium | py/selenium/webdriver/common/desired_capabilities.py | 6 | 2073 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DesiredCapabilities(object):
FIREFOX = {
"browserName": "firefox",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"version": "",
"platform": "WINDOWS",
"javascriptEnabled": True,
}
CHROME = {
"browserName": "chrome",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
OPERA = {
"browserName": "opera",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
SAFARI = {
"browserName": "safari",
"version": "5",
"platform": "MAC",
"javascriptEnabled": True,
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
ANDROID = {
"browserName": "android",
"version": "",
"platform": "ANDROID",
"javascriptEnabled": True,
}
| apache-2.0 | -3,696,197,680,889,022,000 | 23.388235 | 74 | 0.557164 | false |
zibraproject/zika-pipeline | scripts/vcftagprimersites.py | 1 | 1475 | #!/usr/bin/env python
import vcf
import sys
import subprocess
import csv
from collections import defaultdict
def read_bed_file(fn):
bedfile = []
with open(fn) as csvfile:
reader = csv.reader(csvfile, dialect='excel-tab')
for row in reader:
bedrow = {}
bedrow['Primer_ID'] = row[3]
if len(row) >= 6:
# new style bed
bedrow['direction'] = row[5]
elif len(row) == 5:
# old style without directory
if 'LEFT' in row[3]:
bedrow['direction'] = '+'
elif 'RIGHT' in row[3]:
bedrow['direction'] = '-'
else:
print >>sys.stderr, "Malformed BED file!"
raise SystemExit
if bedrow['direction'] == '+':
bedrow['end'] = int(row[2])
bedrow['start'] = int(row[1])
else:
bedrow['end'] = int(row[1])
bedrow['start'] = int(row[2])
bedfile.append(bedrow)
return bedfile
def overlaps(coords, pos):
for v in coords:
if pos >= v['start'] and pos <= v['end']:
return v
return False
if __name__ == "__main__":
if sys.argv[1] not in sets:
print "Invalid set"
raise SystemExit
bedfile = read_bed_file(sys.argv[1])
vcf_reader = vcf.Reader(filename=sys.argv[2])
vcf_writer = vcf.Writer(sys.stdout, vcf_reader)
for record in vcf_reader:
v = overlaps(bedfile, record.POS)
if v:
record.INFO['PRIMER'] = v["Sequence_(5-3')"]
# PP = list(record.INFO)
# record.INFO = {}
# record.INFO['PP'] = PP
# record.INFO['DEPTH'] = depths[record.CHROM][record.POS]
vcf_writer.write_record(record)
| mit | -4,945,425,300,933,682,000 | 22.046875 | 57 | 0.621017 | false |
marionleborgne/nupic.research | tests/extended_temporal_memory/etm_apical_tiebreak_sequences_cpp_test.py | 1 | 3265 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run the apical tiebreak sequence tests on the C++ ExtendedTemporalMemory.
"""
import unittest
from nupic.bindings.experimental import ExtendedTemporalMemory
from htmresearch.support.shared_tests.apical_tiebreak_sequences_test_base import (
ApicalTiebreakSequencesTestBase)
class ExtendedTMCPP_ApicalTiebreakSequencesTests(ApicalTiebreakSequencesTestBase,
unittest.TestCase):
"""
Run the apical tiebreak sequence tests on the C++ ExtendedTemporalMemory.
"""
def constructTM(self, columnCount, apicalInputSize, cellsPerColumn,
initialPermanence, connectedPermanence, minThreshold,
sampleSize, permanenceIncrement, permanenceDecrement,
predictedSegmentDecrement, activationThreshold, seed):
params = {
"apicalInputDimensions": (apicalInputSize,),
"columnDimensions": (columnCount,),
"cellsPerColumn": cellsPerColumn,
"initialPermanence": initialPermanence,
"connectedPermanence": connectedPermanence,
"minThreshold": minThreshold,
"maxNewSynapseCount": sampleSize,
"permanenceIncrement": permanenceIncrement,
"permanenceDecrement": permanenceDecrement,
"predictedSegmentDecrement": predictedSegmentDecrement,
"activationThreshold": activationThreshold,
"seed": seed,
"learnOnOneCell": False,
"formInternalBasalConnections": True,
}
self.tm = ExtendedTemporalMemory(**params)
def compute(self, activeColumns, apicalInput, learn):
activeColumns = sorted(activeColumns)
apicalInput = sorted(apicalInput)
# Use depolarizeCells + activateCells rather than tm.compute so that
# getPredictiveCells returns predictions for the current timestep.
self.tm.depolarizeCells(activeCellsExternalApical=apicalInput,
learn=learn)
self.tm.activateCells(activeColumns,
reinforceCandidatesExternalApical=apicalInput,
growthCandidatesExternalApical=apicalInput,
learn=learn)
def reset(self):
self.tm.reset()
def getActiveCells(self):
return self.tm.getActiveCells()
def getPredictedCells(self):
return self.tm.getPredictiveCells()
| agpl-3.0 | 8,968,199,195,047,110,000 | 35.685393 | 82 | 0.685145 | false |
Neuroglycerin/hail-seizure | testing/test_testing.py | 1 | 31374 | #!/usr/bin/env python3
import random
import copy
import numpy as np
import sklearn.pipeline
import json
import unittest
import glob
import warnings
import subprocess
import os
import python.utils as utils
import csv
import h5py
class testHDF5parsing(unittest.TestCase):
'''
Unittests for the function that parses the matlab HDF5s
'''
@classmethod
def setUpClass(cls):
cls.settings_fh = 'test_settings.json'
cls.settings = utils.get_settings(cls.settings_fh)
cls.all_subjects = set(['Dog_1',
'Dog_2',
'Dog_3',
'Dog_4',
'Dog_5',
'Patient_1',
'Patient_2'])
cls.all_types = set(['preictal',
'test',
'pseudointerictal',
'interictal',
'pseudopreictal'])
cls.malformed_feat = 'malformed_feat'
cls.malformed_file = os.path.join(cls.settings['TRAIN_DATA_PATH'],
"{0}{1}.h5".format(
cls.malformed_feat,
cls.settings['VERSION']))
cls.malformed_feat = h5py.File(cls.malformed_file, 'w')
cls.malformed_feat.create_dataset('malfie', (10,10))
cls.malformed_feat.close()
def test_ioerror_warning(self):
'''
Assert a non-existent file correctly raises warning
'''
non_existent_feat = 'fake_feat'
h5_file_name = os.path.join(self.settings['TRAIN_DATA_PATH'],
"{0}{1}.h5".format(non_existent_feat,
self.settings['VERSION']))
with warnings.catch_warnings(record=True) as w:
dummy = utils.parse_matlab_HDF5(non_existent_feat,
self.settings)
self.assertEqual(len(w), 1, msg="Check there is one and only "
"one warning raised")
self.assertIs(w[-1].category, UserWarning, msg="Check that "
"warning raised "
"is a UserWarning ")
self.assertEqual(str(w[-1].message),
"{0} does not exist (or is not "
"readable)".format(h5_file_name), msg="Check the "
"warning is "
"the correct "
"format ")
def test_parse_error_warning(self):
'''
Assert malformed HDF5 raises proper warning
'''
malformed_feat = 'malformed_feat'
h5_file_name = os.path.join(self.settings['TRAIN_DATA_PATH'],
"{0}{1}.h5".format(malformed_feat,
self.settings['VERSION']))
with warnings.catch_warnings(record=True) as w:
dummy = utils.parse_matlab_HDF5(malformed_feat,
self.settings)
self.assertEqual(len(w), 1, msg="Check one and only one error "
"raised")
self.assertIs(w[-1].category, UserWarning, msg="Check error is "
"UserWarning")
self.assertEqual(str(w[-1].message), "Unable to "
"parse {0}".format(\
h5_file_name),
msg="Check the warning raised is correct format")
def test_hdf5_parse(self):
'''
Very basic check of parsed object structure containing minimum 3 dicts
'''
parsed_HDF5 = utils.parse_matlab_HDF5(self.settings['FEATURES'][0],
self.settings)
subjects = set(parsed_HDF5.keys())
self.assertEqual(subjects,
self.all_subjects,
msg="Check that parsed HDF5 contains all subjects")
typs = set(parsed_HDF5['Dog_1'].keys())
self.assertEqual(typs,
self.all_types,
msg="Check that all ictypes are in parsed file by "
"checking the ictypes under Dog_1")
num_interictal_dog1_segs = len(\
parsed_HDF5['Dog_1']['interictal'].keys())
self.assertEqual(num_interictal_dog1_segs,
480,
msg="Check there is the correct number of segments in "
"parsed HDF5s by checking dog1 interictal")
@classmethod
def tearDownClass(cls):
os.unlink(cls.malformed_file)
class testBatchParallel(unittest.TestCase):
'''
Class to test the batch parallel script as well as consistency in output
of train.py and predict.py when run on the same data multiple times
'''
@classmethod
def setUpClass(cls):
cls.settings_fh_1 = os.path.join('batch_test', 'batch_test1.json')
cls.settings_fh_2 = os.path.join('batch_test', 'batch_test2.json')
cls.settings_1 = utils.get_settings(cls.settings_fh_1)
cls.settings_2 = utils.get_settings(cls.settings_fh_2)
cls.NULL = open(os.devnull, 'w')
cls.proc = subprocess.call(['./parallel_batch_run.py',
'-s', 'batch_test'],
stdout=cls.NULL,
stderr=cls.NULL)
def test_model_output_and_consistency(self):
self.models = glob.glob(os.path.join(self.settings_1['MODEL_PATH'],
'batch*'))
self.assertEqual(len(self.models), 14)
target_models_1 = glob.glob(os.path.join(self.settings_1['MODEL_PATH'],
self.settings_1['RUN_NAME']+\
'_model_for_*.model'))
target_models_2 = glob.glob(os.path.join(self.settings_2['MODEL_PATH'],
self.settings_2['RUN_NAME']+\
'_model_for_*.model'))
target_models = target_models_1 + target_models_2
self.assertEqual(set(self.models), set(target_models))
random_subj = random.choice(self.settings_2['SUBJECTS'])
model_1 = os.path.join(\
self.settings_1['MODEL_PATH'],
'batch_test1_model_for_{0}'
'_using_{1}_feats.model'.format(random_subj,
self.settings_1['VERSION']))
model_2 = os.path.join(\
self.settings_2['MODEL_PATH'],
'batch_test2_model_for_{0}'
'_using_{1}_feats.model'.format(random_subj,
self.settings_2['VERSION']))
# binaries different sizes - unsure why
#self.assertEqual(os.stat(model_1).st_size,
# os.stat(model_2).st_size)
for f in self.models:
os.unlink(f)
def test_submission_output_and_consistency(self):
self.outputs = glob.glob(os.path.join(self.settings_2['SUBMISSION_PATH'],
'batch*'))
self.assertEqual(len(self.outputs), 2)
with open(self.outputs[0], 'r') as output_1_fh:
output_1 = set(output_1_fh.read().split('\n'))
with open(self.outputs[1], 'r') as output_2_fh:
output_2 = set(output_2_fh.read().split('\n'))
self.assertEqual(output_1, output_2)
for f in self.outputs:
os.unlink(f)
def test_auc_score_output_and_consistency(self):
self.AUC_scores = glob.glob(os.path.join(self.settings_2['AUC_SCORE_PATH'],
'AUC_scores.csv'))
self.assertEqual(len(self.AUC_scores), 1)
with open(self.AUC_scores[0], 'r') as auc_csv_fh:
lines = [line.split('\t') for line in auc_csv_fh.readlines()]
self.assertEqual(lines[1][1:], lines[2][1:])
self.assertEqual(len(lines), 3)
for f in self.AUC_scores:
os.unlink(f)
class testTrain(unittest.TestCase):
'''
Unittests for the train.py script
'''
@classmethod
def setUpClass(cls):
cls.settings_fh = 'test_train.json'
cls.settings = utils.get_settings(cls.settings_fh)
f = open('stdout_tmp', 'w')
cls.proc = subprocess.call(['./train.py',
'-s', 'test_train.json'],
stdout=f)
f.close()
with open('stdout_tmp', 'r') as f:
cls.stdout = f.read()
cls.model_files = glob.glob(os.path.join(cls.settings['MODEL_PATH'],
"{0}_model_for_*_using_{1}_feats.model".format(\
cls.settings['RUN_NAME'],
cls.settings['VERSION'])))
def test_train_stdout(self):
'''
Test stdout prints correct number of AUC scores
'''
# count the number of AUC scores printed to stdout
# and assert this is 8 (7 subjects and 1 overall)
AUC_score_count = self.stdout.count('AUC')
self.assertEqual(AUC_score_count,
8,
msg="Check that train prints 8 AUC scores to stdout")
def test_train_AUC_csv_out(self):
'''
Test the current writing of AUC scores to output csv
'''
AUC_csv = os.path.join(self.settings['AUC_SCORE_PATH'], 'AUC_scores.csv')
# check AUC csv exists or has been created
self.assertTrue(os.path.exists(AUC_csv))
# check there is only one csv that has been created
self.assertEqual(len(glob.glob(os.path.join(\
self.settings['AUC_SCORE_PATH'], '*.csv'))),
1)
# read it and check that it contains two lines with 7 tabs each
with open(AUC_csv, 'r') as AUC_csv_fh:
lines = [line for line in AUC_csv_fh.readlines()]
# check first line contains header information
target_header = "\t".join([''] + list(self.settings['SUBJECTS']))\
+ '\tall\n'
self.assertTrue(lines[0] == target_header)
self.assertEqual(lines[1].count('\t'), 8)
self.assertEqual(lines[1][:10], 'test_train')
def test_model_number(self):
'''
Test correct number of models are generated
'''
# get number of models
self.assertEqual(len(self.model_files),
7,
msg="Check that 7 models are written out to model_path dir")
def test_model_size_correct(self):
'''
Test if one of the serialised models is roughly correct size
'''
# randomly pick an output model
output_model = random.choice(self.model_files)
# get file size and assert between 2.5 and 8k
output_model_stats = os.stat(output_model)
output_model_size = output_model_stats.st_size
self.assertTrue(1000 < output_model_size < 100000000,
msg="Check that randomly picked model ({0}) is between 1 "
"and 100M".format(output_model))
def test_model_can_be_read(self):
'''
Check whether a model can be read
'''
output_model = random.choice(self.settings['SUBJECTS'])
parsed_model = utils.read_trained_model(output_model, self.settings)
self.assertIsInstance(parsed_model,
sklearn.pipeline.Pipeline,
msg="Check that randomly picked model ({0}) is "
"the correct sklearn obj type".format(output_model))
@classmethod
def tearDownClass(cls):
'''
Remove generated model files and stdout output temp file
'''
for f in cls.model_files:
os.unlink(f)
os.unlink(os.path.join(cls.settings['AUC_SCORE_PATH'], 'AUC_scores.csv'))
os.unlink('stdout_tmp')
class testPredict(unittest.TestCase):
'''
Unittests for the predict.py script to ensure output is valid
'''
@classmethod
def setUpClass(cls):
cls.settings_fh = 'test_predict.json'
cls.settings = utils.get_settings(cls.settings_fh)
cls.NULL = open(os.devnull, 'w')
cls.proc = subprocess.call(['./predict.py',
'-s', 'test_predict.json'],
stdout=cls.NULL,
stderr=cls.NULL)
cls.output_file = glob.glob(os.path.join(cls.settings['SUBMISSION_PATH'],
"*.csv"))
def test_file_output(self):
'''
Test whether a file was actually outputted
'''
# Check whether there is only one output in submission path
self.assertEqual(len(self.output_file), 1, msg="Check only one csv is "
"output to output path")
self.assertEqual(self.output_file[0],
os.path.join(self.settings['SUBMISSION_PATH'],
'{0}_submission_using_{1}_feats'
'.csv'.format(self.settings['RUN_NAME'],
self.settings['VERSION'])),
msg="Checking that the output csv has the right "
"abspath and filename")
def test_csv_valid(self):
'''
Test whether file is a csv of the right dimensions
'''
# parse submission csv into list of lists with csv reader
with open(self.output_file[0], 'r') as csv_out_file:
parsed_contents = [row for row \
in csv.reader(csv_out_file, delimiter=',')]
# assert csv has right number of rows
self.assertEqual(len(parsed_contents),
3936,
msg="Check that output csv has 3936 rows "
"(3935 test segments + header)")
# assert all rows have 2 cols
for row in parsed_contents:
self.assertEqual(len(row),
2,
msg="Check that output csv only has 2 cols")
@classmethod
def tearDownClass(cls):
'''
Close /dev/null filehandle and remove any csv output
'''
cls.NULL.close()
for f in cls.output_file:
if f!='.placeholder':
os.unlink(f)
class testDataAssembler(unittest.TestCase):
'''
Unittests for DataAssembler - class which builds training and test data
'''
@classmethod
def setUpClass(cls):
cls.settings_fh = 'test_data_assembler.json'
cls.settings = utils.get_settings(cls.settings_fh)
cls.subjects = cls.settings['SUBJECTS']
cls.features = cls.settings['FEATURES']
cls.data = utils.get_data(cls.settings)
with open('../segmentMetadata.json', 'r') as f:
cls.metadata = json.load(f)
cls.ictyps = cls.settings['DATA_TYPES']
cls.segment_counts = {'Dog_1': {'preictal': 24,
'pseudopreictal': 20,
'interictal': 480,
'pseudointerictal': 400,
'test': 502},
'Dog_2': {'preictal': 42,
'pseudopreictal': 35,
'interictal': 500,
'pseudointerictal': 416,
'test': 1000},
'Dog_3': {'preictal': 72,
'pseudopreictal': 60,
'interictal': 1440,
'pseudointerictal': 1200,
'test': 907},
'Dog_4': {'preictal': 97,
'pseudopreictal': 80,
'interictal': 804,
'pseudointerictal': 670,
'test': 990},
'Dog_5': {'preictal': 30,
'pseudopreictal': 25,
'interictal': 450,
'pseudointerictal': 375,
'test': 191},
'Patient_1': {'preictal': 18,
'pseudopreictal': 15,
'interictal': 50,
'pseudointerictal': 41,
'test': 195},
'Patient_2': {'preictal': 18,
'pseudopreictal': 15,
'interictal': 42,
'pseudointerictal': 35,
'test': 150}}
cls.feature_length = {'Dog_1': 16,
'Dog_2': 16,
'Dog_3': 16,
'Dog_4': 16,
'Dog_5': 15,
'Patient_1': 15,
'Patient_2': 24}
cls.ictyp_mapping = {'preictal': 1,
'interictal': 0,
'pseudopreictal': 1,
'pseudointerictal': 0}
def setUp(self):
self.DataAssemblerInstance = utils.DataAssembler(self.settings,
self.data,
self.metadata)
def test_build_test(self):
'''
For subjs ensure build_test outputs correct X matrix
'''
for subj in self.subjects:
X = self.DataAssemblerInstance.build_test(subj)
target_X_shape = (self.segment_counts[subj]['test'],
self.feature_length[subj] * len(self.features))
self.assertIsInstance(X,
np.ndarray,
msg="Check that for subj {0} "
"X is an array".format(subj))
self.assertEqual(X.shape, target_X_shape,
msg="Check that for subj {0} "
"X is an right shape".format(subj))
self.assertTrue(X[:,
:self.feature_length[subj]].all() == 0,
msg="Check that for subj {0} "
"X is generated with 0 first then 1 "
"afterwards".format(subj))
self.assertTrue(X[:,
self.feature_length[subj]:\
self.feature_length[subj]*2].all() == 1,
msg="Check that for subj {0} "
"X is generated with 0 first then 1 "
"afterwards".format(subj))
def test_build_training(self):
'''
For all subjects ensure that build_training outputs correct X, y
'''
for subj in self.subjects:
X, y = self.DataAssemblerInstance.build_training(subj)
self.assertIsInstance(X,
np.ndarray,
msg="Check that for subj {0} "
"X is an array".format(subj))
target_X_shape = (self.segment_counts[subj]['interictal'] +
self.segment_counts[subj]['preictal'] +
self.segment_counts[subj]['pseudointerictal'] +
self.segment_counts[subj]['pseudopreictal'],
self.feature_length[subj] * len(self.features))
self.assertEqual(X.shape, target_X_shape,
msg="Check that for subj {0} "
"X is an right shape".format(subj))
self.assertTrue(X[:,
:self.feature_length[subj]].all() == 0,
msg="Check that for subj {0} "
"X is generated with 0 first then 1 "
"afterwards".format(subj))
self.assertTrue(X[:,
self.feature_length[subj]:\
self.feature_length[subj]*2].all() == 1,
msg="Check that for subj {0} "
"X is generated with 0 first then 1 "
"afterwards".format(subj))
self.assertIsInstance(y,
np.ndarray,
msg="Check that for subj {0} "
"X is an array".format(subj))
target_y_shape = (self.segment_counts[subj]['interictal'] +
self.segment_counts[subj]['preictal'] +
self.segment_counts[subj]['pseudointerictal'] +
self.segment_counts[subj]['pseudopreictal'], )
self.assertEqual(y.shape, target_y_shape,
msg="Check that for subj {0} "
"y is an right shape".format(subj))
def test__build_y(self):
'''
For each subj and ictyp make sure the returned y vector is correct
'''
for subj in self.subjects:
for ictyp in ['preictal', 'interictal']:
y = self.DataAssemblerInstance._build_y(subj, ictyp)
self.assertIsInstance(y,
np.ndarray,
msg="Check that y for subj {0} and "
"icty {1} is numpy array".format(subj,
ictyp))
self.assertEqual(y.shape[0],
self.segment_counts[subj][ictyp],
msg="Check that y is right length for subj {0} and "
"icty {1} is numpy array".format(subj, ictyp))
self.assertTrue(all(y == self.ictyp_mapping[ictyp]),
msg="Check that y is all right value for subj {0} and "
"icty {1} is numpy array".format(subj, ictyp))
def test__build_y_error_on_test(self):
'''
Test whether _build_y throws error if run on 'test' data
'''
subj = random.choice(self.subjects)
self.assertRaises(ValueError,
self.DataAssemblerInstance._build_y,
subj,
'test')
def test__build_X(self):
'''
For each subj, ictyp check Test _build_x is building X correctly
'''
for subj in self.subjects:
for ictyp in self.ictyps:
X, index = self.DataAssemblerInstance._build_X(subj, ictyp)
self.assertIsInstance(X,
np.ndarray,
msg="Check that for subj {0} and ictyp {1} "
"X is an array".format(subj, ictyp))
self.assertEqual(X.shape,
(self.segment_counts[subj][ictyp],
self.feature_length[subj]*2),
msg="Check that for subj {0} and ictyp {1} "
"X is an right shape".format(subj,
ictyp))
def test__build_X_ordering(self):
'''
Check order of the feature input is preserved by _build_X
'''
ictyp = random.choice(self.ictyps)
subj = random.choice(self.subjects)
X, index = self.DataAssemblerInstance._build_X(subj, ictyp)
self.assertTrue(X[:,
:self.feature_length[subj]].all() == 0,
msg="Check that for random subj {0} and ictyp {1} "
"when reading an all 0 then all 1 feature in order "
"X is generated with 0 first then 1 "
"afterwards".format(subj, ictyp))
self.assertTrue(X[:,
self.feature_length[subj]:\
self.feature_length[subj]*2].all() == 1,
msg="Check that for random subj {0} and ictyp {1} "
"when reading an all 0 then all 1 feature in order "
"X is generated with 0 first then 1 "
"afterwards".format(subj, ictyp))
def test__build_X_feature_index(self):
'''
Check feature index is correctly made by _build_X
'''
ictyp = random.choice(self.ictyps)
subj = random.choice(self.subjects)
X, feature_index = self.DataAssemblerInstance._build_X(subj, ictyp)
self.assertEqual((feature_index[0], feature_index[-1]),
self.features,
msg="Check that for random subj {0} and ictyp {1} "
"feature index is same order as features "
"are in settings".format(subj, ictyp))
def test__assemble_feature_pseudo(self):
'''
Check _assemble_feature returns correct X for feat for random subj and pseudoictyp
'''
ictyp = random.choice(['pseudopreictal', 'pseudointerictal'])
subj = random.choice(self.subjects)
feature = random.choice(self.features)
X_part = self.DataAssemblerInstance._assemble_feature(subj,
feature,
ictyp)
self.assertIsInstance(X_part,
np.ndarray,
msg="Check that for random subj {0} and ictyp {1} "
"X_part is an array".format(subj, ictyp))
self.assertEqual(X_part.shape, (self.segment_counts[subj][ictyp],
self.feature_length[subj]),
msg="Check that for random subj {0} and ictyp {1} "
"X_part is correct shape".format(subj, ictyp))
def test__assemble_feature_non_pseudo(self):
'''
Check _assemble_feature returns correct X for feat for random subj and ictyp
'''
ictyp = random.choice(['test', 'preictal', 'interictal'])
subj = random.choice(self.subjects)
feature = random.choice(self.features)
X_part = self.DataAssemblerInstance._assemble_feature(subj,
feature,
ictyp)
self.assertIsInstance(X_part,
np.ndarray,
msg="Check that for random subj {0} and ictyp {1} "
"X_part is an array".format(subj, ictyp))
self.assertEqual(X_part.shape, (self.segment_counts[subj][ictyp],
self.feature_length[subj]),
msg="Check that for random subj {0} and ictyp {1} "
"X_part is correct shape".format(subj, ictyp))
def test__parse_segment_names(self):
'''
Check parse segment names
- is a dict containing all the subjects
- For a random subject:
* it contains a dict
* a dict of a size equal to the summed segment count for all ictyps
of that subject
'''
segment_names = self.DataAssemblerInstance._parse_segment_names()
self.assertEqual(set(segment_names.keys()), set(self.subjects))
subj = random.choice(self.subjects)
self.assertIsInstance(segment_names[subj], dict)
subj_segment_number = sum([self.segment_counts[subj][ictyp] \
for ictyp in self.ictyps])
length_of_class_segment_names = sum([len(segment_names[subj][ictyp]) \
for ictyp in self.ictyps])
self.assertEqual(length_of_class_segment_names, subj_segment_number,
msg="{0} subj used".format(subj))
def test_init_with_pseudo(self):
'''
Check that pseudo flag is true when init with settings including pseudo
'''
test_settings = copy.deepcopy(self.settings)
test_settings['DATA_TYPES'] = ['test', 'interictal', 'preictal',
'pseudointerictal', 'pseudopreictal']
DataAssemblerInstance = utils.DataAssembler(test_settings,
self.data,
self.metadata)
self.assertTrue(DataAssemblerInstance.include_pseudo)
def test_init_without_pseudo(self):
'''
Check that pseudo flag is false when init with settings including pseudo
'''
test_settings = copy.deepcopy(self.settings)
test_settings['DATA_TYPES'] = ['test', 'interictal', 'preictal']
DataAssemblerInstance = utils.DataAssembler(test_settings,
self.data,
self.metadata)
self.assertFalse(DataAssemblerInstance.include_pseudo)
def test_init(self):
'''
Test that the class inits correctly for all params (apart from pseudoflag)
'''
self.assertEqual(self.DataAssemblerInstance.settings, self.settings)
self.assertEqual(self.DataAssemblerInstance.data, self.data)
self.assertEqual(self.DataAssemblerInstance.metadata, self.metadata)
if __name__=='__main__':
unittest.main()
| apache-2.0 | -6,922,376,345,681,972,000 | 40.065445 | 90 | 0.460349 | false |
shootstar/novatest | nova/tests/api/openstack/compute/contrib/test_cloudpipe.py | 4 | 7117 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
from nova.api.openstack.compute.contrib import cloudpipe
from nova.api.openstack import wsgi
from nova.compute import utils as compute_utils
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_network
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
def fake_vpn_instance():
return {
'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
'uuid': 7777, 'project_id': 'other',
}
def compute_api_get_all_empty(context, search_opts=None):
return []
def compute_api_get_all(context, search_opts=None):
return [fake_vpn_instance()]
def utils_vpn_ping(addr, port, timoeout=0.05, session_id=None):
return True
class CloudpipeTest(test.TestCase):
def setUp(self):
super(CloudpipeTest, self).setUp()
self.controller = cloudpipe.CloudpipeController()
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all_empty)
self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
def test_cloudpipe_list_no_network(self):
def fake_get_nw_info_for_instance(instance):
return {}
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/os-cloudpipe')
res_dict = self.controller.index(req)
response = {'cloudpipes': [{'project_id': 'other',
'instance_id': 7777,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertEqual(res_dict, response)
def test_cloudpipe_list(self):
def network_api_get(context, network_id):
self.assertEqual(context.project_id, 'other')
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
def fake_get_nw_info_for_instance(instance):
return fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
self.stubs.Set(compute_utils, "get_nw_info_for_instance",
fake_get_nw_info_for_instance)
self.stubs.Set(self.controller.network_api, "get",
network_api_get)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/os-cloudpipe')
res_dict = self.controller.index(req)
response = {'cloudpipes': [{'project_id': 'other',
'internal_ip': '192.168.1.100',
'public_ip': '127.0.0.1',
'public_port': 22,
'state': 'running',
'instance_id': 7777,
'created_at': '1981-10-20T00:00:00Z'}]}
self.assertThat(res_dict, matchers.DictMatches(response))
def test_cloudpipe_create(self):
def launch_vpn_instance(context):
return ([fake_vpn_instance()], 'fake-reservation')
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
body = {'cloudpipe': {'project_id': 1}}
req = fakes.HTTPRequest.blank('/v2/fake/os-cloudpipe')
res_dict = self.controller.create(req, body)
response = {'instance_id': 7777}
self.assertEqual(res_dict, response)
def test_cloudpipe_create_already_running(self):
def launch_vpn_instance(*args, **kwargs):
self.fail("Method should not have been called")
self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
launch_vpn_instance)
self.stubs.Set(self.controller.compute_api, "get_all",
compute_api_get_all)
body = {'cloudpipe': {'project_id': 1}}
req = fakes.HTTPRequest.blank('/v2/fake/os-cloudpipe')
res_dict = self.controller.create(req, body)
response = {'instance_id': 7777}
self.assertEqual(res_dict, response)
class CloudpipesXMLSerializerTest(test.TestCase):
def test_default_serializer(self):
serializer = cloudpipe.CloudpipeTemplate()
exemplar = dict(cloudpipe=dict(instance_id='1234-1234-1234-1234'))
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('cloudpipe', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['cloudpipe'])
self.assertEqual(child.text, exemplar['cloudpipe'][child.tag])
def test_index_serializer(self):
serializer = cloudpipe.CloudpipesTemplate()
exemplar = dict(cloudpipes=[
dict(
project_id='1234',
public_ip='1.2.3.4',
public_port='321',
instance_id='1234-1234-1234-1234',
created_at=timeutils.isotime(),
state='running'),
dict(
project_id='4321',
public_ip='4.3.2.1',
public_port='123',
state='pending')])
text = serializer.serialize(exemplar)
tree = etree.fromstring(text)
self.assertEqual('cloudpipes', tree.tag)
self.assertEqual(len(exemplar['cloudpipes']), len(tree))
for idx, cl_pipe in enumerate(tree):
kp_data = exemplar['cloudpipes'][idx]
for child in cl_pipe:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
deserializer = wsgi.XMLDeserializer()
exemplar = dict(cloudpipe=dict(project_id='4321'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<cloudpipe><project_id>4321</project_id></cloudpipe>')
result = deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| apache-2.0 | -4,627,176,863,973,704,000 | 39.4375 | 78 | 0.586202 | false |
sony/nnabla | python/src/nnabla/utils/cli/train.py | 1 | 30077 | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
from collections import OrderedDict
from contextlib2 import ExitStack # Backport from python3
import glob
import numpy as np
import os
import time
import zipfile
import nnabla as nn
from nnabla.logger import logger
from nnabla import available_contexts
from nnabla.parameter import save_parameters
from nnabla.utils.progress import configure_progress, progress
import nnabla.utils.callback as callback
from nnabla.utils.cli.utility import let_data_to_variable
from nnabla.utils.cli.utility import measure_cpu_gpu_instant_load
from nnabla.utils.cli.utility import get_cpu_gpu_average_load
from nnabla.utils.cli.utility import save_optimizer_states
from nnabla.utils.cli.utility import NodeTimeInfoCollector
from nnabla.utils.cli.utility import load_train_state
from nnabla.utils.cli.utility import str_to_num
from nnabla.utils.cli.utility import lms_scheduler
from nnabla.utils.nnp_format import nnp_version
from nnabla.utils.communicator_util import current_communicator, single_or_rankzero
import nnabla.utils.load as load
from nnabla.config import nnabla_config
try:
_OPTIMIZER_CHECKPOINT_INTERVAL = int(
nnabla_config.get('CHECKPOINT', 'optimizer_interval'))
except Exception:
_OPTIMIZER_CHECKPOINT_INTERVAL = 5
_save_parameter_info = {}
nodeTimeCollector = NodeTimeInfoCollector()
def _all_reduce(comm, var, division, inplace):
comm.all_reduce(var, division=division, inplace=inplace)
def _save_parameters(args, suffix, epoch, train_config, force=False):
global _save_parameter_info
if suffix not in _save_parameter_info:
_save_parameter_info[suffix] = {}
_save_parameter_info[suffix]['epoch'] = 0
_save_parameter_info[suffix]['time'] = 0
current_time = time.time()
timediff = current_time - _save_parameter_info[suffix]['time']
epochdiff = epoch - _save_parameter_info[suffix]['epoch']
globname = os.path.join(args.outdir, 'results_{}_*.nnp'.format(suffix))
exists = glob.glob(globname)
base = os.path.join(args.outdir, 'results_{}_{}'.format(suffix, epoch))
base_candidate = callback.result_base(base, suffix, args.outdir)
if base_candidate is None:
if suffix is None or suffix == 'best':
base = os.path.join(args.outdir, 'results')
else:
base = base_candidate
filename = base + '.nnp'
if force or (not os.path.exists(filename) and (timediff > 180.0 or epochdiff > 10)):
# Remove existing nnp before saving new file.
for exist in exists:
os.unlink(exist)
version_filename = base + '_version.txt'
with open(version_filename, 'w') as file:
file.write('{}\n'.format(nnp_version()))
param_filename = f'{base}_param{nnabla_config.get("MISC", "nnp_param_format")}'
save_parameters(param_filename)
need_save_opti = train_config.optimizers and epoch % _OPTIMIZER_CHECKPOINT_INTERVAL == 0
if need_save_opti:
opti_filenames = save_optimizer_states(
base, '.h5', train_config)
with zipfile.ZipFile(filename, 'w') as nnp:
nnp.write(version_filename, 'nnp_version.txt')
nnp.write(_save_parameter_info['config'], os.path.basename(
_save_parameter_info['config']))
nnp.write(param_filename, f'parameter{nnabla_config.get("MISC", "nnp_param_format")}')
if need_save_opti:
for f in opti_filenames:
nnp.write(f, f[len(base) + 1:])
os.unlink(version_filename)
os.unlink(param_filename)
if need_save_opti:
for f in opti_filenames:
os.unlink(f)
_save_parameter_info[suffix]['epoch'] = epoch
_save_parameter_info[suffix]['time'] = current_time
callback.save_train_snapshot()
def _update(iter, config, cost, scheduler):
comm = current_communicator()
loaded_data = {}
is_first_optimizer = True
def _sum_cost():
if comm:
# logger.log(99, "Calc cost with communicator")
var = [nn.NdArray()]
var[0].data = cost.sum_iteration
_all_reduce(comm, var, division=False, inplace=True)
cost.sum_epoch += var[0].data
cost.num_iteration += comm.size
else:
cost.sum_epoch += cost.sum_iteration
cost.num_iteration += 1
def _get_reserved_variable(shape, reserved_variable_name, iter, iter_per_epoch, max_epoch):
if reserved_variable_name == "%iter":
value = iter
elif reserved_variable_name == "%max_iter":
value = max_epoch * iter_per_epoch
elif reserved_variable_name == "%epoch":
value = iter // iter_per_epoch
elif reserved_variable_name == "%epochf":
value = iter * 1.0 / iter_per_epoch
elif reserved_variable_name == "%max_epoch":
value = max_epoch
elif reserved_variable_name == "%progress":
value = (iter * 1.0 / iter_per_epoch) / max_epoch
else:
raise ValueError("Unknown reserved variable {}".format(
reserved_variable_name))
return value
for opt in config.optimizers.values():
o = opt.optimizer
if (o.start_iter == 0 or iter + 1 >= o.start_iter) and (o.end_iter == 0 or iter + 1 <= o.end_iter):
# Load dataset
data = OrderedDict()
for di in opt.data_iterators:
if di not in loaded_data:
loaded_data[di] = di.next()
data.update(zip(di.variables, loaded_data[di]))
for v, d in o.dataset_assign.items():
# TODO: here we consume a bit more memory for loading all edge nodes to cuda
# dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
# 0].inputs else None
dest_context = config.global_config.default_context
if d not in data and d[0] == "%":
value = _get_reserved_variable(
v.variable_instance.shape, d, iter, config.training_config.iter_per_epoch, config.training_config.max_epoch)
v.variable_instance.data.fill(value)
elif d in data:
let_data_to_variable(v.variable_instance, data[
d], ctx=dest_context,
data_name=d, variable_name=v.name)
else:
raise ValueError('Variable "{}" is not found in dataset "{}", optimizer "{}"'.format(
d, ', '.join(o.data_iterators.keys()), o.name))
# Generate data
for v, generator in o.generator_assign.items():
# TODO: here we consume a bit more memory for loading all edge nodes to cuda
# dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
# 0].inputs else None
dest_context = config.global_config.default_context
let_data_to_variable(v.variable_instance,
data=generator(v.variable_instance.d.shape), ctx=dest_context,
variable_name=v.name)
# Monitor loss before forward to prepare input data while processing on
# GPU
if cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
# l.variable_instance.data.zero()
if is_first_optimizer:
is_first_optimizer = False
_sum_cost()
if single_or_rankzero():
progress("Training : cost={0:0.6f}".format(cost.sum_iteration),
(iter % config.training_config.iter_per_epoch) * 1.0 / config.training_config.iter_per_epoch)
cost.sum_iteration = 0.0
with scheduler:
# This size is omitted if multiple variables in loss_variables
# otherwise, this size is omitted since passing through F.sink()
l_size = o.loss_variables[0].variable_instance.size
with nodeTimeCollector.collect_cost_time(comm, iter):
# Forward
o.target.forward(clear_no_need_grad=True)
# Equivalency with previous version
if iter % o.update_interval == 0:
o.solver.zero_grad()
# Backward
if o.comm and iter % o.update_interval == o.update_interval - 1:
params = [x.grad for x in o.parameters.values()]
o.target.backward(grad=1.0 / l_size,
clear_buffer=True, communicator_callbacks=comm.all_reduce_callback(params, 2 << 20, division=True))
else:
o.target.backward(grad=1.0 / l_size, clear_buffer=True)
# Update
if iter % o.update_interval == o.update_interval - 1:
if o.weight_decay > 0:
o.solver.weight_decay(o.weight_decay)
if o.scheduler is not None:
o.solver.set_learning_rate(
o.scheduler.get_learning_rate(iter))
o.solver.update()
# Reserve monitor loss
cost.variables = o.loss_variables
# Monitor loss at the end of epoch
if iter % config.training_config.iter_per_epoch == config.training_config.iter_per_epoch - 1 and cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
# l.variable_instance.data.zero()
_sum_cost()
cost.variables = None
cost.sum_iteration = 0.0
return cost
def _evaluate(args, config, monitoring_report, best_error, epoch, scheduler):
comm = current_communicator()
error_str = ''
valid_error = 0.0
def _sum_error(sum, error):
ret = None
if comm:
# logger.log(99, "Calc error with communicator")
var = [nn.NdArray()]
var[0].data = error
_all_reduce(comm, var, division=False, inplace=True)
ret = sum + var[0].data
else:
ret = sum + error
return ret
for name, mon in config.monitors.items():
m = mon.monitor
error_sum_monitor = 0.0
error_count = 0
data_size = max([di.size for di in mon.data_iterators])
batch_size = max([di.batch_size for di in mon.data_iterators])
for i in range(data_size // batch_size):
# Load dataset
data = OrderedDict()
for di in mon.data_iterators:
data.update(zip(di.variables, di.next()))
# Set data to variable
for v, d in m.dataset_assign.items():
# NOTICE: trivially increase cuda memory usage for loading all edge nodes
dest_context = config.global_config.default_context
let_data_to_variable(v.variable_instance, data[
d], ctx=dest_context,
data_name=d, variable_name=v.name)
# Generate data
for v, generator in m.generator_assign.items():
# NOTICE: trivially increase cuda memory usage for loading all edge nodes
dest_context = config.global_config.default_context
let_data_to_variable(v.variable_instance,
data=generator(v.variable_instance.d.shape), ctx=dest_context,
variable_name=v.name)
# Sum error before forward to prepare input data while processing
# on GPU
if error_count > 0:
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
# v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if single_or_rankzero():
progress('Evaluating "{0}"'.format(
name) + ' : error={0:0.6f}'.format(
error_sum_monitor / error_count),
di.position * 1.0 / di.size)
error_count += comm.size if comm else 1
with scheduler:
# Forward recursive
m.target.forward(clear_no_need_grad=True, clear_buffer=True)
# Sum error at the end of dataset
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
# v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if error_count == 0:
error = 0
else:
error = error_sum_monitor / error_count
if np.isnan(error) or np.isinf(error):
logger.log(99, 'Validation error is Nan')
error = 0.0
monitoring_report.append(' {}: {}\n'.format(name, error))
callback.update_status((['monitoring_report', epoch, name], error))
callback.update_status((['last', name], error)) # save last value
if error_str != '':
error_str += ', '
else:
error_str = ' {'
error_str += '{}={:.6f}'.format(name, error)
if name == 'valid_error':
valid_error = error
if error_str != '':
error_str += '}'
# Save Parameters
if single_or_rankzero():
if (not config.training_config.save_best) or \
(not best_error) or \
(best_error is not None and valid_error <= best_error):
best_error = valid_error
callback.update_status(('best.valid_error', best_error))
callback.update_status(('best.epoch', epoch))
_save_parameters(args, 'best', epoch, config, True)
return best_error, error_str
def _get_current_parameter(args, config):
def convert_to_info(config):
class Info:
pass
ret = Info()
ret.optimizers = OrderedDict()
for name, opt in config.optimizers.items():
ret.optimizers[name] = opt.optimizer
return ret
best_error, best_epoch = callback.get_best_from_status(args)
globname = os.path.join(args.outdir, 'results_current_*.nnp')
exists = glob.glob(globname)
if len(exists) > 0:
ex_list = {}
info = convert_to_info(config)
for ex in exists:
n = int(ex.rsplit('_', 1)[1].rsplit('.', 1)[0])
ex_list[n] = ex
last_epoch = sorted(ex_list.keys(), reverse=True)[0]
last_parameter = ex_list[last_epoch]
logger.log(99, "Load parameter from [{}]".format(
os.path.basename(last_parameter)))
#load.load([last_parameter], parameter_only=True)
load_train_state(last_parameter, info)
return last_epoch, best_epoch, best_error
return 0, best_epoch, best_error
def _calc_estimate_time(timeinfo, max_iter, last_iter, iter):
timeinfo.past_time = time.time() - timeinfo.start_time
timeinfo.estimate_time = timeinfo.past_time * \
(max_iter - last_iter) / (iter - last_iter)
timeinfo.remain_time = timeinfo.estimate_time - timeinfo.past_time
timeinfo.last_past_time = timeinfo.past_time
return timeinfo
def _calc_epoch_span(timeinfo):
cur_time = time.time()
span = cur_time - timeinfo.last_epoch_start_time
timeinfo.last_epoch_start_time = cur_time
return span
def _format_cgload_log(cg_load):
narr = np.array(cg_load).T
if narr.shape[0] == 4:
log_str = 'average load:{{cpu:{:.1f}%, gpu:{:.1f}%}}'.format(
np.mean(narr[1]), np.mean(narr[3]))
else:
log_str = 'average load:{{cpu:{:.1f}%}}'.format(
np.mean(narr[1]))
return log_str
def _train(args, config):
global _save_parameter_info
comm = current_communicator()
_CGLOAD_LOG_INTERVAL = 20
best_epoch = None
best_error = None
last_epoch = 0
if args.resume:
last_epoch, best_epoch, best_error = _get_current_parameter(
args, config)
if best_epoch is not None:
logger.log(
99, "Best error {} recorded at epoch {} in previous training.".format(best_error,
best_epoch))
if best_epoch > last_epoch:
logger.log(
99, "Resumed epoch is {} but this training keep this result.".format(last_epoch))
logger.log(99, "Resume from epoch {}".format(last_epoch + 1))
callback.update_status(('epoch.max', config.training_config.max_epoch))
callback.update_status(('epoch.current', last_epoch + 1
if last_epoch < config.training_config.max_epoch
else config.training_config.max_epoch))
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
if single_or_rankzero():
logger.log(99, 'Training epoch {} of {} begin'.format(last_epoch + 1,
config.training_config.max_epoch))
class Cost:
pass
cost = Cost()
cost.sum_epoch = 0.0
cost.num_iteration = 0
cost.sum_iteration = 0.0
cost.variables = None
class TimeInfo:
pass
timeinfo = TimeInfo()
timeinfo.past_time = 0
timeinfo.estimate_time = 0
timeinfo.last_past_time = None
ctx = config.global_config.default_context
if args.enable_ooc:
logger.log(99, 'OOC enabled.')
logger.log(99, f' OOC GPU memory size {args.ooc_gpu_memory_size}.')
logger.log(99, f' OOC Window length {args.ooc_window_length}.')
scheduler = lms_scheduler(
ctx,
use_lms=args.enable_ooc,
gpu_memory_size=args.ooc_gpu_memory_size,
window_length=args.ooc_window_length)
val_scheduler = lms_scheduler(
ctx,
use_lms=args.enable_ooc,
gpu_memory_size=args.ooc_gpu_memory_size,
window_length=args.ooc_window_length)
if max_iteration > 0:
last_iteration = last_epoch * config.training_config.iter_per_epoch
if last_iteration < max_iteration:
timeinfo.start_time = time.time()
timeinfo.last_epoch_start_time = timeinfo.start_time
callback.update_status('processing', True, timeinfo.start_time)
for iteration in range(last_iteration, max_iteration):
# instant load measurement
measure_cpu_gpu_instant_load()
cost = _update(iteration, config, cost, scheduler)
if np.isnan(cost.sum_epoch) or np.isinf(cost.sum_epoch):
logger.log(99, 'Cost is Nan')
return False, False
timeinfo = _calc_estimate_time(
timeinfo, max_iteration, last_iteration, iteration + 1)
callback.update_time_train(prediction=timeinfo.estimate_time)
if 0 < config.timelimit < timeinfo.estimate_time:
logger.log(99, 'Expected training time ({:.3f}s) will exceed time limit ({}s).'.format(
timeinfo.estimate_time, config.timelimit))
return False, False
if (iteration + 1) % config.training_config.iter_per_epoch == 0:
last_past_time = -1
# End of epoch
epoch = iteration // config.training_config.iter_per_epoch + 1
cost_avg_epoch = cost.sum_epoch / cost.num_iteration if cost.num_iteration else 0
cost.sum_epoch = 0.0
cost.num_iteration = 0
monitoring_report = []
# Evaluation
error_str = ''
if epoch % config.training_config.monitor_interval == 0 or epoch <= 5:
best_error, error_str = _evaluate(
args, config, monitoring_report, best_error, epoch, val_scheduler)
# Cpu/Gpu average load
cg_load_str = ''
cgload_log = ''
cg_load = get_cpu_gpu_average_load()
if cg_load:
cg_load_str = 'epoch {} average_load_matrix: {}'.format(
epoch, cg_load)
span = _calc_epoch_span(timeinfo)
if span > _CGLOAD_LOG_INTERVAL:
cgload_log = _format_cgload_log(cg_load)
if single_or_rankzero():
# Write to monitoring_report.yml
f = open(os.path.join(
args.outdir, 'monitoring_report.yml'), 'a')
f.write('{}:\n'.format(epoch - 1))
f.write(' cost: {}\n'.format(cost_avg_epoch))
for s in monitoring_report:
f.write(s)
f.close()
callback.update_status((['monitoring_report', epoch, 'cost'],
cost_avg_epoch))
_save_parameters(args, 'current', epoch, config)
callback.update_status(('epoch.current', epoch))
callback.update_status()
logger.log(99, 'epoch {} of {} cost={:.6f} {} time=({:.1f}s /{:.1f}s) {}'.format(
epoch, config.training_config.max_epoch, cost_avg_epoch, error_str,
timeinfo.past_time, timeinfo.estimate_time, cgload_log))
if cg_load_str:
# cpu_gpu_average_load record at epoch level
callback.update_status(
(['cpu_gpu_epoch_load', epoch], cg_load))
progress(cg_load_str, 1)
if not callback.check_training_time(args, config, timeinfo, epoch, last_epoch):
_save_parameters(
args, 'current', epoch, config, True)
return False, True
if single_or_rankzero():
_save_parameters(args, 'current', epoch, config, True)
return True, False
def train_command(args):
if args.ooc_gpu_memory_size is not None:
ooc_gpu_memory_size = str_to_num(args.ooc_gpu_memory_size)
if ooc_gpu_memory_size < 0:
logger.log(99, f'Fatal error. invalid ooc_gpu_memory_size [{args.ooc_gpu_memory_size}].')
return False
args.ooc_gpu_memory_size = ooc_gpu_memory_size
if args.ooc_window_length is not None:
ooc_window_length = str_to_num(args.ooc_window_length)
if ooc_window_length < 0:
logger.log(99, f'Fatal error. invalid ooc_window_length [{args.ooc_window_length}].')
return False
args.ooc_window_length = ooc_window_length
callback.update_status(args)
if single_or_rankzero():
configure_progress(os.path.join(args.outdir, 'progress.txt'))
info = load.load([args.config], prepare_data_iterator=None,
exclude_parameter=True, context=args.context)
# Check dataset uri is empty.
dataset_error = False
for dataset in info.datasets.values():
if dataset.uri.strip() == '':
dataset_error = True
if dataset_error:
logger.log(99, 'Fatal error. Dataset URI is empty.')
return False
class TrainConfig:
pass
config = TrainConfig()
config.timelimit = -1
if args.param:
# If this parameter file contains optimizer information
# we need to info to recovery.
#load.load([args.param], parameter_only=True)
load_train_state(args.param, info)
config.timelimit = callback.get_timelimit(args)
config.global_config = info.global_config
config.training_config = info.training_config
if single_or_rankzero():
logger.log(99, 'Train with contexts {}'.format(available_contexts))
class OptConfig:
pass
config.optimizers = OrderedDict()
for name, opt in info.optimizers.items():
o = OptConfig()
o.optimizer = opt
o.data_iterators = []
config.optimizers[name] = o
class MonConfig:
pass
config.monitors = OrderedDict()
for name, mon in info.monitors.items():
m = MonConfig()
m.monitor = mon
m.data_iterators = []
config.monitors[name] = m
# Training
comm = current_communicator()
config.training_config.iter_per_epoch //= comm.size if comm else 1
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
global _save_parameter_info
_save_parameter_info = {}
_, config_ext = os.path.splitext(args.config)
if config_ext == '.prototxt' or config_ext == '.nntxt':
_save_parameter_info['config'] = args.config
elif config_ext == '.nnp':
with zipfile.ZipFile(args.config, 'r') as nnp:
for name in nnp.namelist():
_, ext = os.path.splitext(name)
if ext == '.nntxt' or ext == '.prototxt':
nnp.extract(name, args.outdir)
_save_parameter_info['config'] = os.path.join(
args.outdir, name)
result = False
restart = False
if max_iteration > 0:
rng = np.random.RandomState(comm.rank if comm else 0)
with ExitStack() as stack:
# Create data_iterator instance only once for each dataset in optimizers
optimizer_data_iterators = {}
for name, o in config.optimizers.items():
for di in o.optimizer.data_iterators.values():
if di not in optimizer_data_iterators:
di_instance = stack.enter_context(di())
if comm and comm.size > 1:
di_instance = di_instance.slice(
rng, comm.size, comm.rank)
optimizer_data_iterators[di] = di_instance
else:
di_instance = optimizer_data_iterators[di]
o.data_iterators.append(di_instance)
# Create data_iterator instance only once for each dataset in monitors
monitor_data_iterators = {}
for name, m in config.monitors.items():
for di in m.monitor.data_iterators.values():
if di not in monitor_data_iterators:
di_instance = stack.enter_context(di())
if comm and comm.size > 1:
di_instance = di_instance.slice(
rng, comm.size, comm.rank)
monitor_data_iterators[di] = di_instance
else:
di_instance = monitor_data_iterators[di]
m.data_iterators.append(di_instance)
monitor_data_iterators.update(optimizer_data_iterators)
result, restart = _train(args, config)
else:
# save parameters without training (0 epoch learning)
logger.log(99, '0 epoch learning. (Just save parameter.)')
if single_or_rankzero():
_save_parameters(args, None, 0, config, True)
result = True
if single_or_rankzero() and not restart:
if result:
logger.log(99, 'Training Completed.')
callback.update_status('finished')
else:
logger.log(99, 'Training Incompleted.')
callback.update_status('failed')
if single_or_rankzero():
progress(None)
return True
def add_train_command(subparsers):
# Train
subparser = subparsers.add_parser('train', help='Training with NNP.')
subparser.add_argument(
'-r', '--resume', help='Resume from last saved parameter', action='store_true')
subparser.add_argument(
'-c', '--config', help='Path to nntxt', required=True)
subparser.add_argument(
'-p', '--param', help='Path to parameter file', required=False)
subparser.add_argument(
'-o', '--outdir', help='Output directory', required=True)
subparser.add_argument(
'-O', '--enable-ooc', help='Enable Out Of Core training', action='store_true')
subparser.add_argument(
'-m', '--ooc-gpu-memory-size', help='OOC gpu memory size (INTEGER or NUMeNUM or NUM[KkMmGgTtPp])', default=None)
subparser.add_argument(
'-C', '--context', help='Force exec context (cpu or cudnn[:DEVID])', default=None)
subparser.add_argument(
'-w', '--ooc-window-length', help='OOC window length (INTEGER or NUMeNUM or NUM[KkMmGgTtPp])', default=None)
callback.add_train_command_arg(subparser)
subparser.set_defaults(func=train_command)
| apache-2.0 | 5,872,953,492,563,490,000 | 39.102667 | 141 | 0.562789 | false |
dirtycoder/pets | pets/users/tests/test_view_confirm_information.py | 1 | 1181 | from django.shortcuts import resolve_url
from django.test import TestCase
from users.models import OwnerProfile
class ConfirmInformationTest(TestCase):
def test_redirect_to_confirmation(self):
"""User not confirmed should be redirected to the edit page"""
self.create_user(False)
self.client.login(username="admin", password="test123")
resp = self.client.get(resolve_url("users:confirm_information"))
self.assertRedirects(resp, resolve_url("users:edit"))
def test_redirect_to_index(self):
"""Confirmed user should be redirected to the index page"""
self.create_user(True)
self.client.login(username="admin", password="test123")
resp = self.client.get(resolve_url("users:confirm_information"))
self.assertRedirects(resp, resolve_url("meupet:index"))
@staticmethod
def create_user(is_confirmed):
user = OwnerProfile(
first_name="Admin",
last_name="Adminson",
email="[email protected]",
username="admin",
is_information_confirmed=is_confirmed,
)
user.set_password("test123")
user.save()
| mit | 2,501,567,590,391,385,000 | 31.805556 | 72 | 0.651143 | false |
YanTangZhai/tf | tensorflow/python/training/input_test.py | 1 | 20244 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training.input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import itertools
import tensorflow.python.platform
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
class MatchFilenamesOnceTest(tf.test.TestCase):
def test(self):
temp_dir = self.get_temp_dir()
filenames = [os.path.join(temp_dir, n) for n in os.listdir(temp_dir)]
additional = [os.path.join(self.get_temp_dir(), "match_filenames.%d" % i)
for i in range(3)]
for name in additional:
open(name, "w").write("Some contents")
filenames = list(set(filenames + additional))
with self.test_session():
star = tf.train.match_filenames_once(
os.path.join(self.get_temp_dir(), "*"))
question = tf.train.match_filenames_once(
os.path.join(self.get_temp_dir(), "match_filenames.?"))
one = tf.train.match_filenames_once(additional[1])
tf.initialize_all_variables().run()
self.assertItemsEqual(map(tf.compat.as_bytes, filenames), star.eval())
self.assertItemsEqual(map(tf.compat.as_bytes, additional),
question.eval())
self.assertItemsEqual([tf.compat.as_bytes(additional[1])], one.eval())
class LimitEpochsTest(tf.test.TestCase):
def testNoLimit(self):
with self.test_session():
seven = tf.constant(7)
seven_forever = tf.train.limit_epochs(seven)
tf.initialize_all_variables().run()
for i in range(100):
self.assertEqual(7, seven_forever.eval())
def testLimit(self):
with self.test_session():
love_me = tf.constant("Love Me")
love_me_two_times = tf.train.limit_epochs(love_me, num_epochs=2)
tf.initialize_all_variables().run()
self.assertEqual(b"Love Me", love_me_two_times.eval())
self.assertEqual(b"Love Me", love_me_two_times.eval())
with self.assertRaises(tf.errors.OutOfRangeError):
love_me_two_times.eval()
class StringInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session():
strings = [b"to", b"be", b"or", b"not", b"to", b"be"]
num_epochs = 3
queue = tf.train.string_input_producer(
strings, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(strings) * num_epochs)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(strings * num_epochs, output)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
strings = [b"a", b"b", b"c"]
num_epochs = 600
queue = tf.train.string_input_producer(
strings, num_epochs=num_epochs, shuffle=True, seed=271828)
dequeue_many = queue.dequeue_many(len(strings))
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the strings within an epoch and
# count how often each possible order appears.
expected = [b"abc", b"acb", b"bac", b"bca", b"cab", b"cba"]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = b"".join(output)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testNullStringPython(self):
# Graph-construction time check for empty string list:
with self.test_session():
with self.assertRaises(ValueError):
_ = tf.train.string_input_producer([])
def testNullString(self):
# Runtime check for empty string list. This is slightly oblique:
# The queue runner should die with an assertion error on the null
# input tensor, causing the dequeue to fail with an OutOfRangeError.
with self.test_session():
coord = tf.train.Coordinator()
queue = tf.train.string_input_producer(tf.constant([], dtype=tf.string))
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners(coord=coord)
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
coord.request_stop()
for thread in threads:
thread.join()
class RangeInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session():
num_epochs = 3
range_size = 5
queue = tf.train.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(range_size * num_epochs)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
output = dequeue_many.eval()
self.assertAllEqual(list(xrange(range_size)) * num_epochs, output)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session():
num_epochs = 200
range_size = 2
queue = tf.train.range_input_producer(
range_size, num_epochs=num_epochs, shuffle=True, seed=314159)
dequeue_many = queue.dequeue_many(range_size)
dequeue = queue.dequeue()
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [12, 21]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = dequeue_many.eval()
key = 10 * (output[0] + 1) + (output[1] + 1)
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
dequeue.eval()
for thread in threads:
thread.join()
class SliceInputProducerTest(tf.test.TestCase):
def testNoShuffle(self):
with self.test_session() as sess:
num_epochs = 3
source_strings = [b"Alpha", b"Beta", b"Delta", b"Gamma"]
source_ints = [2, 3, 5, 7]
slices = tf.train.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=False)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# No randomness, so just see repeated copies of the input.
num_items = len(source_strings) * num_epochs
output = [sess.run(slices) for _ in range(num_items)]
out_strings, out_ints = zip(*output)
self.assertAllEqual(source_strings * num_epochs, out_strings)
self.assertAllEqual(source_ints * num_epochs, out_ints)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
def testShuffle(self):
with self.test_session() as sess:
num_epochs = 1200
source_strings = ["A", "B", "D", "G"]
source_ints = [7, 3, 5, 2]
slices = tf.train.slice_input_producer(
[source_strings, source_ints], num_epochs=num_epochs, shuffle=True,
seed=161803)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Validate that we only shuffle the integers within an epoch and
# count how often each possible order appears.
expected = [b",".join(x) for x in
itertools.permutations([b"A7", b"B3", b"D5", b"G2"])]
frequency = {}
for e in expected:
frequency[e] = 0
for _ in range(num_epochs):
output = [sess.run(slices) for _ in range(len(source_strings))]
key = b",".join([s + tf.compat.as_bytes(str(i)) for s, i in output])
self.assertIn(key, expected)
frequency[key] += 1
# Expect an approximately even distribution over all possible orders.
expected_frequency = num_epochs / len(expected)
margin = expected_frequency * 0.4
tf.logging.info("Observed counts: %s", frequency)
for key in expected:
value = frequency[key]
self.assertGreater(value, expected_frequency - margin)
self.assertLess(value, expected_frequency + margin)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(slices)
for thread in threads:
thread.join()
class BatchTest(tf.test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.batch([counter, "string"], batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0], np.arange(i * batch_size,
(i + 1) * batch_size))
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testOneThreadEnqueueMany(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
pre_batched = tf.train.batch([counter, "string"], batch_size=2)
batched = tf.train.batch(pre_batched, enqueue_many=True,
batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
for i in range(num_batches):
results = sess.run(batched)
self.assertAllEqual(results[0], np.arange(i * batch_size,
(i + 1) * batch_size))
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.batch([counter, "string"], batch_size=batch_size,
num_threads=4)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class BatchJoinTest(tf.test.TestCase):
def testTwoThreads(self):
with self.test_session() as sess:
# Two threads, the first generates (0..34, "a").
num_a = 35
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, "b") 45 times and then stops.
num_b = 45
ninety_nine = tf.train.limit_epochs(
tf.constant(99, dtype=tf.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = tf.train.batch_join([[counter, "a"], [ninety_nine, "b"]],
batch_size=batch_size)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
which_a = [i for i, s in enumerate(results[1]) if s == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if len(which_a) > 0 and len(which_b) > 0: saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Verify the order of results from "a" were preserved.
self.assertAllEqual(all_a, np.arange(num_a))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class ShuffleBatchTest(tf.test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.shuffle_batch(
[counter, "string"], batch_size=batch_size, capacity=32,
min_after_dequeue=16, seed=141421)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [all_counts[i + 1] - all_counts[i]
for i in range(len(all_counts) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
def testManyThreads(self):
with self.test_session() as sess:
batch_size = 10
num_batches = 3
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
batched = tf.train.shuffle_batch(
[counter, "string"], batch_size=batch_size, capacity=32,
min_after_dequeue=16, seed=173205, num_threads=4)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
all_counts = []
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
all_counts.extend(results[0])
self.assertAllEqual(results[1], [b"string"] * batch_size)
# Results scrambled, but include all the expected numbers.
deltas = [all_counts[i + 1] - all_counts[i]
for i in range(len(all_counts) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertItemsEqual(all_counts, range(num_batches * batch_size))
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
class ShuffleBatchJoinTest(tf.test.TestCase):
def testTwoThreads(self):
with self.test_session() as sess:
# Two threads, the first generates (0..24, "a").
num_a = 25
zero64 = tf.constant(0, dtype=tf.int64)
examples = tf.Variable(zero64)
counter = examples.count_up_to(num_a)
# The second generates (99, "b") 35 times and then stops.
num_b = 35
ninety_nine = tf.train.limit_epochs(
tf.constant(99, dtype=tf.int64), num_b)
# These get joined together and grouped into batches of 5.
batch_size = 5
batched = tf.train.shuffle_batch_join(
[[counter, "a"], [ninety_nine, "b"]], batch_size=batch_size,
capacity=32, min_after_dequeue=16, seed=223607)
tf.initialize_all_variables().run()
threads = tf.train.start_queue_runners()
# Should see the "a" and "b" threads mixed together.
all_a = []
seen_b = 0
saw_both = 0
num_batches = (num_a + num_b) // batch_size
for i in range(num_batches):
results = sess.run(batched)
tf.logging.info("Batch %d: %s", i, results[0])
self.assertEqual(len(results[0]), batch_size)
self.assertEqual(len(results[1]), batch_size)
which_a = [i for i, s in enumerate(results[1]) if s == b"a"]
which_b = [i for i, s in enumerate(results[1]) if s == b"b"]
self.assertEqual(len(which_a) + len(which_b), batch_size)
if len(which_a) > 0 and len(which_b) > 0: saw_both += 1
all_a.extend([results[0][i] for i in which_a])
seen_b += len(which_b)
self.assertAllEqual([99] * len(which_b),
[results[0][i] for i in which_b])
# Some minimum level of mixing of the results of both threads.
self.assertGreater(saw_both, 1)
# Saw all the items from "a", but scrambled.
self.assertItemsEqual(all_a, range(num_a))
deltas = [all_a[i + 1] - all_a[i]
for i in range(len(all_a) - 1)]
self.assertFalse(all(d == deltas[0] for d in deltas))
self.assertEqual(seen_b, num_b)
# Reached the limit.
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batched)
for thread in threads:
thread.join()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -8,867,017,069,121,812,000 | 36.144954 | 80 | 0.621715 | false |
thalamus/Flexget | flexget/utils/requests.py | 4 | 6668 | from __future__ import unicode_literals, division, absolute_import
import urllib2
import time
import logging
from datetime import timedelta, datetime
from urlparse import urlparse
import requests
# Allow some request objects to be imported from here instead of requests
from requests import RequestException, HTTPError
from flexget import __version__ as version
from flexget.utils.tools import parse_timedelta, TimedDict
log = logging.getLogger('requests')
# Don't emit info level urllib3 log messages or below
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# same as above, but for systems where urllib3 isn't part of the requests pacakge (i.e., Ubuntu)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# Time to wait before trying an unresponsive site again
WAIT_TIME = timedelta(seconds=60)
# Remembers sites that have timed out
unresponsive_hosts = TimedDict(WAIT_TIME)
def is_unresponsive(url):
"""
Checks if host of given url has timed out within WAIT_TIME
:param url: The url to check
:return: True if the host has timed out within WAIT_TIME
:rtype: bool
"""
host = urlparse(url).hostname
return host in unresponsive_hosts
def set_unresponsive(url):
"""
Marks the host of a given url as unresponsive
:param url: The url that timed out
"""
host = urlparse(url).hostname
if host in unresponsive_hosts:
# If somehow this is called again before previous timer clears, don't refresh
return
unresponsive_hosts[host] = True
def wait_for_domain(url, delay_dict):
for domain, domain_dict in delay_dict.iteritems():
if domain in url:
next_req = domain_dict.get('next_req')
if next_req and datetime.now() < next_req:
wait_time = next_req - datetime.now()
seconds = wait_time.seconds + (wait_time.microseconds / 1000000.0)
log.debug('Waiting %.2f seconds until next request to %s' % (seconds, domain))
# Sleep until it is time for the next request
time.sleep(seconds)
# Record the next allowable request time for this domain
domain_dict['next_req'] = datetime.now() + domain_dict['delay']
break
def _wrap_urlopen(url, timeout=None):
"""
Handles alternate schemes using urllib, wraps the response in a requests.Response
This is not installed as an adapter in requests, since urls without network locations
(e.g. file:///somewhere) will cause errors
"""
try:
raw = urllib2.urlopen(url, timeout=timeout)
except IOError as e:
msg = 'Error getting %s: %s' % (url, e)
log.error(msg)
raise RequestException(msg)
resp = requests.Response()
resp.raw = raw
# requests passes the `decode_content` kwarg to read
orig_read = raw.read
resp.raw.read = lambda size, **kwargs: orig_read(size)
resp.status_code = raw.code or 200
resp.headers = requests.structures.CaseInsensitiveDict(raw.headers)
return resp
class Session(requests.Session):
"""
Subclass of requests Session class which defines some of our own defaults, records unresponsive sites,
and raises errors by default.
"""
def __init__(self, timeout=30, max_retries=1):
"""Set some defaults for our session if not explicitly defined."""
requests.Session.__init__(self)
self.timeout = timeout
self.stream = True
self.adapters['http://'].max_retries = max_retries
# Stores min intervals between requests for certain sites
self.domain_delay = {}
self.headers.update({'User-Agent': 'FlexGet/%s (www.flexget.com)' % version})
def add_cookiejar(self, cookiejar):
"""
Merges cookies from `cookiejar` into cookiejar for this session.
:param cookiejar: CookieJar instance to add to the session.
"""
for cookie in cookiejar:
self.cookies.set_cookie(cookie)
def set_domain_delay(self, domain, delay):
"""
Registers a minimum interval between requests to `domain`
:param domain: The domain to set the interval on
:param delay: The amount of time between requests, can be a timedelta or string like '3 seconds'
"""
self.domain_delay[domain] = {'delay': parse_timedelta(delay)}
def request(self, method, url, *args, **kwargs):
"""
Does a request, but raises Timeout immediately if site is known to timeout, and records sites that timeout.
Also raises errors getting the content by default.
:param bool raise_status: If True, non-success status code responses will be raised as errors (True by default)
"""
# Raise Timeout right away if site is known to timeout
if is_unresponsive(url):
raise requests.Timeout('Requests to this site have timed out recently. Waiting before trying again.')
# Delay, if needed, before another request to this site
wait_for_domain(url, self.domain_delay)
kwargs.setdefault('timeout', self.timeout)
raise_status = kwargs.pop('raise_status', True)
# If we do not have an adapter for this url, pass it off to urllib
if not any(url.startswith(adapter) for adapter in self.adapters):
return _wrap_urlopen(url, timeout=kwargs['timeout'])
try:
result = requests.Session.request(self, method, url, *args, **kwargs)
except (requests.Timeout, requests.ConnectionError):
# Mark this site in known unresponsive list
set_unresponsive(url)
raise
if raise_status:
result.raise_for_status()
return result
# Define some module level functions that use our Session, so this module can be used like main requests module
def request(method, url, **kwargs):
s = kwargs.pop('session', Session())
return s.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
| mit | 7,999,690,331,971,007,000 | 35.043243 | 119 | 0.664667 | false |
fbradyirl/home-assistant | homeassistant/components/zha/core/channels/closures.py | 1 | 2113 | """
Closures channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.closures as closures
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ZigbeeChannel
from .. import registries
from ..const import REPORT_CONFIG_IMMEDIATE, SIGNAL_ATTR_UPDATED
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.DoorLock.cluster_id)
class DoorLockChannel(ZigbeeChannel):
"""Door lock channel."""
_value_attribute = 0
REPORT_CONFIG = ({"attr": "lock_state", "config": REPORT_CONFIG_IMMEDIATE},)
async def async_update(self):
"""Retrieve latest state."""
result = await self.get_attribute_value("lock_state", from_cache=True)
async_dispatcher_send(
self._zha_device.hass,
"{}_{}".format(self.unique_id, SIGNAL_ATTR_UPDATED),
result,
)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update from lock cluster."""
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.debug(
"Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value
)
if attrid == self._value_attribute:
async_dispatcher_send(
self._zha_device.hass,
"{}_{}".format(self.unique_id, SIGNAL_ATTR_UPDATED),
value,
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.get_attribute_value(self._value_attribute, from_cache=from_cache)
await super().async_initialize(from_cache)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.Shade.cluster_id)
class Shade(ZigbeeChannel):
"""Shade channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(closures.WindowCovering.cluster_id)
class WindowCovering(ZigbeeChannel):
"""Window channel."""
pass
| apache-2.0 | -6,521,440,783,508,737,000 | 29.623188 | 84 | 0.667298 | false |
passalis/sef | examples/supervised_reduction.py | 1 | 2019 | # License: MIT License https://github.com/passalis/sef/blob/master/LICENSE.txt
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import sklearn
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sef_dr.classification import evaluate_svm
from sef_dr.datasets import load_mnist
from sef_dr.linear import LinearSEF
def supervised_reduction(method=None):
# Load data and init seeds
train_data, train_labels, test_data, test_labels = load_mnist(dataset_path='data')
np.random.seed(1)
sklearn.utils.check_random_state(1)
n_train = 5000
n_classes = len(np.unique(train_labels))
if method == 'lda':
proj = LinearDiscriminantAnalysis(n_components=n_classes - 1)
proj.fit(train_data[:n_train, :], train_labels[:n_train])
elif method == 's-lda':
proj = LinearSEF(train_data.shape[1], output_dimensionality=(n_classes - 1))
proj.cuda()
loss = proj.fit(data=train_data[:n_train, :], target_labels=train_labels[:n_train], epochs=50,
target='supervised', batch_size=128, regularizer_weight=1, learning_rate=0.001, verbose=True)
elif method == 's-lda-2x':
# SEF output dimensions are not limited
proj = LinearSEF(train_data.shape[1], output_dimensionality=2 * (n_classes - 1))
proj.cuda()
loss = proj.fit(data=train_data[:n_train, :], target_labels=train_labels[:n_train], epochs=50,
target='supervised', batch_size=128, regularizer_weight=1, learning_rate=0.001, verbose=True)
acc = evaluate_svm(proj.transform(train_data[:n_train, :]), train_labels[:n_train],
proj.transform(test_data), test_labels)
print("Method: ", method, " Test accuracy: ", 100 * acc, " %")
if __name__ == '__main__':
print("LDA: ")
supervised_reduction('lda')
print("S-LDA: ")
supervised_reduction('s-lda')
print("S-LDA (2x): ")
supervised_reduction('s-lda-2x')
| mit | 6,662,112,117,549,750,000 | 38.588235 | 117 | 0.655275 | false |
wujuguang/sqlalchemy | test/ext/test_horizontal_shard.py | 1 | 23390 | import datetime
import os
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import event
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.ext.horizontal_shard import ShardedSession
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import create_session
from sqlalchemy.orm import deferred
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import SingletonThreadPool
from sqlalchemy.sql import operators
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import provision
from sqlalchemy.testing.engines import testing_engine
# TODO: ShardTest can be turned into a base for further subclasses
class ShardTest(object):
__skip_if__ = (lambda: util.win32,)
__requires__ = ("sqlite",)
schema = None
def setUp(self):
global db1, db2, db3, db4, weather_locations, weather_reports
db1, db2, db3, db4 = self._init_dbs()
meta = MetaData()
ids = Table("ids", meta, Column("nextid", Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
c = db1.connect()
nextid = c.execute(ids.select().with_for_update()).scalar()
c.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1}))
return nextid
weather_locations = Table(
"weather_locations",
meta,
Column("id", Integer, primary_key=True, default=id_generator),
Column("continent", String(30), nullable=False),
Column("city", String(50), nullable=False),
schema=self.schema,
)
weather_reports = Table(
"weather_reports",
meta,
Column("id", Integer, primary_key=True),
Column("location_id", Integer, ForeignKey(weather_locations.c.id)),
Column("temperature", Float),
Column("report_time", DateTime, default=datetime.datetime.now),
schema=self.schema,
)
for db in (db1, db2, db3, db4):
meta.create_all(db)
db1.execute(ids.insert(), nextid=1)
self.setup_session()
self.setup_mappers()
@classmethod
def setup_session(cls):
global create_session
shard_lookup = {
"North America": "north_america",
"Asia": "asia",
"Europe": "europe",
"South America": "south_america",
}
def shard_chooser(mapper, instance, clause=None):
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def id_chooser(query, ident):
return ["north_america", "asia", "europe", "south_america"]
def query_chooser(query):
ids = []
class FindContinent(sql.ClauseVisitor):
def visit_binary(self, binary):
if binary.left.shares_lineage(
weather_locations.c.continent
):
if binary.operator == operators.eq:
ids.append(shard_lookup[binary.right.value])
elif binary.operator == operators.in_op:
for bind in binary.right.clauses:
ids.append(shard_lookup[bind.value])
if query._criterion is not None:
FindContinent().traverse(query._criterion)
if len(ids) == 0:
return ["north_america", "asia", "europe", "south_america"]
else:
return ids
create_session = sessionmaker(
class_=ShardedSession, autoflush=True, autocommit=False
)
create_session.configure(
shards={
"north_america": db1,
"asia": db2,
"europe": db3,
"south_america": db4,
},
shard_chooser=shard_chooser,
id_chooser=id_chooser,
query_chooser=query_chooser,
)
@classmethod
def setup_mappers(cls):
global WeatherLocation, Report
class WeatherLocation(object):
def __init__(self, continent, city):
self.continent = continent
self.city = city
class Report(object):
def __init__(self, temperature, id_=None):
self.temperature = temperature
if id_:
self.id = id_
mapper(
WeatherLocation,
weather_locations,
properties={
"reports": relationship(Report, backref="location"),
"city": deferred(weather_locations.c.city),
},
)
mapper(Report, weather_reports)
def _fixture_data(self):
tokyo = WeatherLocation("Asia", "Tokyo")
newyork = WeatherLocation("North America", "New York")
toronto = WeatherLocation("North America", "Toronto")
london = WeatherLocation("Europe", "London")
dublin = WeatherLocation("Europe", "Dublin")
brasilia = WeatherLocation("South America", "Brasila")
quito = WeatherLocation("South America", "Quito")
tokyo.reports.append(Report(80.0, id_=1))
newyork.reports.append(Report(75, id_=1))
quito.reports.append(Report(85))
sess = create_session()
for c in [tokyo, newyork, toronto, london, dublin, brasilia, quito]:
sess.add(c)
sess.flush()
eq_(inspect(newyork).key[2], "north_america")
eq_(inspect(newyork).identity_token, "north_america")
eq_(inspect(dublin).key[2], "europe")
eq_(inspect(dublin).identity_token, "europe")
sess.commit()
sess.close()
return sess
def test_roundtrip(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city # reload 'city' attribute on tokyo
sess.expire_all()
eq_(
db2.execute(weather_locations.select()).fetchall(),
[(1, "Asia", "Tokyo")],
)
eq_(
db1.execute(weather_locations.select()).fetchall(),
[
(2, "North America", "New York"),
(3, "North America", "Toronto"),
],
)
eq_(
sess.execute(
weather_locations.select(), shard_id="asia"
).fetchall(),
[(1, "Asia", "Tokyo")],
)
t = sess.query(WeatherLocation).get(tokyo.id)
eq_(t.city, tokyo.city)
eq_(t.reports[0].temperature, 80.0)
north_american_cities = sess.query(WeatherLocation).filter(
WeatherLocation.continent == "North America"
)
eq_(
set([c.city for c in north_american_cities]),
set(["New York", "Toronto"]),
)
asia_and_europe = sess.query(WeatherLocation).filter(
WeatherLocation.continent.in_(["Europe", "Asia"])
)
eq_(
set([c.city for c in asia_and_europe]),
set(["Tokyo", "London", "Dublin"]),
)
# inspect the shard token stored with each instance
eq_(
set(inspect(c).key[2] for c in asia_and_europe),
set(["europe", "asia"]),
)
eq_(
set(inspect(c).identity_token for c in asia_and_europe),
set(["europe", "asia"]),
)
newyork = sess.query(WeatherLocation).filter_by(city="New York").one()
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
# same primary key, two identity keys
eq_(
inspect(newyork_report).identity_key,
(Report, (1,), "north_america"),
)
eq_(inspect(tokyo_report).identity_key, (Report, (1,), "asia"))
# the token representing the originating shard is available
eq_(inspect(newyork_report).identity_token, "north_america")
eq_(inspect(tokyo_report).identity_token, "asia")
def test_get_baked_query(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city
sess.expunge_all()
from sqlalchemy.ext.baked import BakedQuery
bakery = BakedQuery.bakery()
bq = bakery(lambda session: session.query(WeatherLocation))
t = bq(sess).get(tokyo.id)
eq_(t.city, tokyo.city)
eq_(inspect(t).key[2], "asia")
def test_get_baked_query_shard_id(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city
sess.expunge_all()
from sqlalchemy.ext.baked import BakedQuery
bakery = BakedQuery.bakery()
bq = bakery(lambda session: session.query(WeatherLocation))
t = (
bq(sess)
.with_post_criteria(lambda q: q.set_shard("asia"))
.get(tokyo.id)
)
eq_(t.city, tokyo.city)
eq_(inspect(t).key[2], "asia")
def test_filter_baked_query_shard_id(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city
sess.expunge_all()
from sqlalchemy.ext.baked import BakedQuery
bakery = BakedQuery.bakery()
bq = bakery(
lambda session: session.query(WeatherLocation)
).with_criteria(lambda q: q.filter_by(id=tokyo.id))
t = bq(sess).with_post_criteria(lambda q: q.set_shard("asia")).one()
eq_(t.city, tokyo.city)
def test_shard_id_event(self):
canary = []
def load(instance, ctx):
canary.append(ctx.attributes["shard_id"])
event.listen(WeatherLocation, "load", load)
sess = self._fixture_data()
tokyo = ( # noqa
sess.query(WeatherLocation)
.filter_by(city="Tokyo")
.set_shard("asia")
.one()
)
sess.query(WeatherLocation).all()
eq_(
canary,
[
"asia",
"north_america",
"north_america",
"europe",
"europe",
"south_america",
"south_america",
],
)
def test_baked_mix(self):
sess = self._fixture_data()
tokyo = sess.query(WeatherLocation).filter_by(city="Tokyo").one()
tokyo.city
sess.expunge_all()
from sqlalchemy.ext.baked import BakedQuery
bakery = BakedQuery.bakery()
def get_tokyo(sess):
bq = bakery(lambda session: session.query(WeatherLocation))
t = bq(sess).get(tokyo.id)
return t
Sess = sessionmaker(
class_=Session, bind=db2, autoflush=True, autocommit=False
)
sess2 = Sess()
t = get_tokyo(sess)
eq_(t.city, tokyo.city)
t = get_tokyo(sess2)
eq_(t.city, tokyo.city)
def test_bulk_update(self):
sess = self._fixture_data()
eq_(
set(row.temperature for row in sess.query(Report.temperature)),
{80.0, 75.0, 85.0},
)
temps = sess.query(Report).all()
eq_(set(t.temperature for t in temps), {80.0, 75.0, 85.0})
sess.query(Report).filter(Report.temperature >= 80).update(
{"temperature": Report.temperature + 6}
)
eq_(
set(row.temperature for row in sess.query(Report.temperature)),
{86.0, 75.0, 91.0},
)
# test synchronize session as well
eq_(set(t.temperature for t in temps), {86.0, 75.0, 91.0})
def test_bulk_delete(self):
sess = self._fixture_data()
temps = sess.query(Report).all()
eq_(set(t.temperature for t in temps), {80.0, 75.0, 85.0})
sess.query(Report).filter(Report.temperature >= 80).delete()
eq_(
set(row.temperature for row in sess.query(Report.temperature)),
{75.0},
)
# test synchronize session as well
for t in temps:
assert inspect(t).deleted is (t.temperature >= 80)
class DistinctEngineShardTest(ShardTest, fixtures.TestBase):
def _init_dbs(self):
db1 = testing_engine(
"sqlite:///shard1_%s.db" % provision.FOLLOWER_IDENT,
options=dict(poolclass=SingletonThreadPool),
)
db2 = testing_engine(
"sqlite:///shard2_%s.db" % provision.FOLLOWER_IDENT
)
db3 = testing_engine(
"sqlite:///shard3_%s.db" % provision.FOLLOWER_IDENT
)
db4 = testing_engine(
"sqlite:///shard4_%s.db" % provision.FOLLOWER_IDENT
)
self.dbs = [db1, db2, db3, db4]
return self.dbs
def teardown(self):
clear_mappers()
for db in self.dbs:
db.connect().invalidate()
for i in range(1, 5):
os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
class AttachedFileShardTest(ShardTest, fixtures.TestBase):
"""Use modern schema conventions along with SQLite ATTACH."""
schema = "changeme"
def _init_dbs(self):
e = testing_engine("sqlite://")
with e.connect() as conn:
for i in range(1, 5):
conn.execute(
'ATTACH DATABASE "shard%s_%s.db" AS shard%s'
% (i, provision.FOLLOWER_IDENT, i)
)
db1 = e.execution_options(schema_translate_map={"changeme": "shard1"})
db2 = e.execution_options(schema_translate_map={"changeme": "shard2"})
db3 = e.execution_options(schema_translate_map={"changeme": "shard3"})
db4 = e.execution_options(schema_translate_map={"changeme": "shard4"})
self.engine = e
return db1, db2, db3, db4
def teardown(self):
clear_mappers()
self.engine.connect().invalidate()
for i in range(1, 5):
os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
class TableNameConventionShardTest(ShardTest, fixtures.TestBase):
"""This fixture uses a single SQLite database along with a table naming
convention to achieve sharding. Event hooks are used to rewrite SQL
statements.
This used to be called "AttachedFileShardTest" but I didn't see any
ATTACH going on.
The approach taken by this test is awkward and I wouldn't recommend using
this pattern in a real situation. I'm not sure of the history of this test
but it likely predates when we knew how to use real ATTACH in SQLite.
"""
schema = "changeme"
def _init_dbs(self):
db1 = testing_engine(
"sqlite://", options={"execution_options": {"shard_id": "shard1"}}
)
db2 = db1.execution_options(shard_id="shard2")
db3 = db1.execution_options(shard_id="shard3")
db4 = db1.execution_options(shard_id="shard4")
import re
@event.listens_for(db1, "before_cursor_execute", retval=True)
def _switch_shard(conn, cursor, stmt, params, context, executemany):
shard_id = conn._execution_options["shard_id"]
# because SQLite can't just give us a "use" statement, we have
# to use the schema hack to locate table names
if shard_id:
stmt = re.sub(r"\"?changeme\"?\.", shard_id + "_", stmt)
return stmt, params
return db1, db2, db3, db4
class SelectinloadRegressionTest(fixtures.DeclarativeMappedTest):
"""test #4175
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Book(Base):
__tablename__ = "book"
id = Column(Integer, primary_key=True)
pages = relationship("Page")
class Page(Base):
__tablename__ = "page"
id = Column(Integer, primary_key=True)
book_id = Column(ForeignKey("book.id"))
def test_selectinload_query(self):
session = ShardedSession(
shards={"test": testing.db},
shard_chooser=lambda *args: "test",
id_chooser=lambda *args: None,
query_chooser=lambda *args: ["test"],
)
Book, Page = self.classes("Book", "Page")
book = Book()
book.pages.append(Page())
session.add(book)
session.commit()
result = session.query(Book).options(selectinload("pages")).all()
eq_(result, [book])
class RefreshDeferExpireTest(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
data = Column(String(30))
deferred_data = deferred(Column(String(30)))
@classmethod
def insert_data(cls):
A = cls.classes.A
s = Session()
s.add(A(data="d1", deferred_data="d2"))
s.commit()
def _session_fixture(self, **kw):
return ShardedSession(
shards={"main": testing.db},
shard_chooser=lambda *args: "main",
id_chooser=lambda *args: ["fake", "main"],
query_chooser=lambda *args: ["fake", "main"],
**kw
)
def test_refresh(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
session.refresh(a1)
def test_deferred(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
eq_(a1.deferred_data, "d2")
def test_unexpire(self):
A = self.classes.A
session = self._session_fixture()
a1 = session.query(A).set_shard("main").first()
session.expire(a1)
eq_(a1.data, "d1")
def test_autocommit_session(self):
A = self.classes.A
session = self._session_fixture(autocommit=True)
a1 = session.query(A).set_shard("main").first()
eq_(a1.data, "d1")
class LazyLoadIdentityKeyTest(fixtures.DeclarativeMappedTest):
def _init_dbs(self):
self.db1 = db1 = testing_engine(
"sqlite:///shard1_%s.db" % provision.FOLLOWER_IDENT
)
self.db2 = db2 = testing_engine(
"sqlite:///shard2_%s.db" % provision.FOLLOWER_IDENT
)
for db in (db1, db2):
self.metadata.create_all(db)
self.dbs = [db1, db2]
return self.dbs
def teardown(self):
for db in self.dbs:
db.connect().invalidate()
for i in range(1, 3):
os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Book(Base):
__tablename__ = "book"
id = Column(Integer, primary_key=True)
title = Column(String(50), nullable=False)
pages = relationship("Page", backref="book")
class Page(Base):
__tablename__ = "page"
id = Column(Integer, primary_key=True)
book_id = Column(ForeignKey("book.id"))
title = Column(String(50))
def _fixture(self, lazy_load_book=False, lazy_load_pages=False):
Book, Page = self.classes("Book", "Page")
def shard_for_book(book):
if book.title == "title 1":
return "test"
elif book.title == "title 2":
return "test2"
else:
assert False
def id_chooser(query, ident):
assert query.lazy_loaded_from
if isinstance(query.lazy_loaded_from.obj(), Book):
token = shard_for_book(query.lazy_loaded_from.obj())
assert query.lazy_loaded_from.identity_token == token
return [query.lazy_loaded_from.identity_token]
def no_query_chooser(query):
if query.column_descriptions[0]["type"] is Book and lazy_load_book:
assert isinstance(query.lazy_loaded_from.obj(), Page)
elif (
query.column_descriptions[0]["type"] is Page
and lazy_load_pages
):
assert isinstance(query.lazy_loaded_from.obj(), Book)
if query.lazy_loaded_from is None:
return ["test", "test2"]
else:
return [query.lazy_loaded_from.identity_token]
def shard_chooser(mapper, instance, **kw):
if isinstance(instance, Page):
return shard_for_book(instance.book)
else:
return shard_for_book(instance)
db1, db2 = self._init_dbs()
session = ShardedSession(
shards={"test": db1, "test2": db2},
shard_chooser=shard_chooser,
id_chooser=id_chooser,
query_chooser=no_query_chooser,
)
return session
def test_lazy_load_from_identity_map(self):
session = self._fixture()
Book, Page = self.classes("Book", "Page")
book = Book(title="title 1")
book.pages.append(Page())
session.add(book)
session.flush()
page = session.query(Page).first()
session.expire(page, ["book"])
def go():
eq_(page.book, book)
# doesn't emit SQL
self.assert_multiple_sql_count(self.dbs, go, [0, 0])
def test_lazy_load_from_db(self):
session = self._fixture(lazy_load_book=True)
Book, Page = self.classes("Book", "Page")
book1 = Book(title="title 1")
book1.pages.append(Page(title="book 1 page 1"))
session.add(book1)
session.flush()
book1_id = inspect(book1).identity_key
session.expunge(book1)
book1_page = session.query(Page).first()
session.expire(book1_page, ["book"])
def go():
eq_(inspect(book1_page.book).identity_key, book1_id)
# emits one query
self.assert_multiple_sql_count(self.dbs, go, [1, 0])
def test_lazy_load_no_baked_conflict(self):
session = self._fixture(lazy_load_pages=True)
Book, Page = self.classes("Book", "Page")
book1 = Book(title="title 1")
book1.pages.append(Page(title="book 1 page 1"))
book2 = Book(title="title 2")
book2.pages.append(Page(title="book 2 page 1"))
session.add(book1)
session.add(book2)
session.flush()
session.expire(book1, ["pages"])
session.expire(book2, ["pages"])
eq_(book1.pages[0].title, "book 1 page 1")
# second lazy load uses correct state
eq_(book2.pages[0].title, "book 2 page 1")
| mit | 8,308,047,299,078,057,000 | 30.103723 | 79 | 0.561479 | false |
HybridF5/tempest_debug | tempest/common/validation_resources.py | 5 | 5292 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from tempest import config
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
def create_ssh_security_group(os, add_rule=False):
security_groups_client = os.compute_security_groups_client
security_group_rules_client = os.compute_security_group_rules_client
sg_name = data_utils.rand_name('securitygroup-')
sg_description = data_utils.rand_name('description-')
security_group = security_groups_client.create_security_group(
name=sg_name, description=sg_description)['security_group']
if add_rule:
security_group_rules_client.create_security_group_rule(
parent_group_id=security_group['id'], ip_protocol='tcp',
from_port=22, to_port=22)
security_group_rules_client.create_security_group_rule(
parent_group_id=security_group['id'], ip_protocol='icmp',
from_port=-1, to_port=-1)
LOG.debug("SSH Validation resource security group with tcp and icmp "
"rules %s created"
% sg_name)
return security_group
def create_validation_resources(os, validation_resources=None):
# Create and Return the validation resources required to validate a VM
validation_data = {}
if validation_resources:
if validation_resources['keypair']:
keypair_name = data_utils.rand_name('keypair')
validation_data.update(os.keypairs_client.create_keypair(
name=keypair_name))
LOG.debug("Validation resource key %s created" % keypair_name)
add_rule = False
if validation_resources['security_group']:
if validation_resources['security_group_rules']:
add_rule = True
validation_data['security_group'] = \
create_ssh_security_group(os, add_rule)
if validation_resources['floating_ip']:
floating_client = os.compute_floating_ips_client
validation_data.update(
floating_client.create_floating_ip(
pool=CONF.network.floating_network_name))
return validation_data
def clear_validation_resources(os, validation_data=None):
# Cleanup the vm validation resources
has_exception = None
if validation_data:
if 'keypair' in validation_data:
keypair_client = os.keypairs_client
keypair_name = validation_data['keypair']['name']
try:
keypair_client.delete_keypair(keypair_name)
except lib_exc.NotFound:
LOG.warning("Keypair %s is not found when attempting to delete"
% keypair_name)
except Exception as exc:
LOG.exception('Exception raised while deleting key %s'
% keypair_name)
if not has_exception:
has_exception = exc
if 'security_group' in validation_data:
security_group_client = os.compute_security_groups_client
sec_id = validation_data['security_group']['id']
try:
security_group_client.delete_security_group(sec_id)
security_group_client.wait_for_resource_deletion(sec_id)
except lib_exc.NotFound:
LOG.warning("Security group %s is not found when attempting "
"to delete" % sec_id)
except lib_exc.Conflict as exc:
LOG.exception('Conflict while deleting security '
'group %s VM might not be deleted ' % sec_id)
if not has_exception:
has_exception = exc
except Exception as exc:
LOG.exception('Exception raised while deleting security '
'group %s ' % sec_id)
if not has_exception:
has_exception = exc
if 'floating_ip' in validation_data:
floating_client = os.compute_floating_ips_client
fip_id = validation_data['floating_ip']['id']
try:
floating_client.delete_floating_ip(fip_id)
except lib_exc.NotFound:
LOG.warning('Floating ip %s not found while attempting to '
'delete' % fip_id)
except Exception as exc:
LOG.exception('Exception raised while deleting ip %s '
% fip_id)
if not has_exception:
has_exception = exc
if has_exception:
raise has_exception
| apache-2.0 | -4,435,317,734,439,661,600 | 43.847458 | 79 | 0.606954 | false |
FabriceSalvaire/python-project-template | RootModule/Logging/ExceptionHook.py | 1 | 5874 | ####################################################################################################
#
# @Project@ - @ProjectDescription@.
# Copyright (C) Fabrice Salvaire 2013
#
####################################################################################################
####################################################################################################
from datetime import datetime
import StringIO
import sys
import traceback
####################################################################################################
from .Email import Email
from RootModule.Tools.Platform import Platform
from RootModule.Tools.Singleton import singleton
####################################################################################################
def format_exception(exception_type, exception_value, exception_traceback):
""" Format an exception to string. """
# traceback.format_exc()
traceback_string_io = StringIO.StringIO()
traceback.print_exception(exception_type, exception_value, exception_traceback, file=traceback_string_io)
return traceback_string_io.getvalue()
####################################################################################################
@singleton
class DispatcherExceptionHook(object):
""" DispatcherExceptionHook install an exception hook in the Python interpreter. This class is a
singleton and follows the Observer Pattern. When an exception is raised, it is catched by the
hook, that calls the method :meth:`notify` for each registered observer.
"""
##############################################
def __init__(self):
self._observers = []
sys.excepthook = self._exception_hook
##############################################
def __iter__(self):
return iter(self._observers)
##############################################
def __getitem__(self, exception_hook_class):
for observer in self:
if isinstance(observer, exception_hook_class):
return observer
else:
return None
##############################################
def register_observer(self, observer):
""" Register an observer, that must have a :meth:`notify` method. """
self._observers.append(observer)
##############################################
def _exception_hook(self, exception_type, exception_value, exception_traceback):
for observer in self:
observer.notify(exception_type, exception_value, exception_traceback)
####################################################################################################
class ExceptionHook(object):
##############################################
def __init__(self, context=''):
self.context = context
# DispatcherExceptionHook().register_observer(self)
####################################################################################################
class StderrExceptionHook(ExceptionHook):
""" Log exception on stderr. """
_line_width = 80
_line = '='*_line_width
##############################################
def __init__(self, context=''):
super(StderrExceptionHook, self).__init__(context)
##############################################
def notify(self, exception_type, exception_value, exception_traceback):
print >>sys.stderr, self._line, '\n'
print >>sys.stderr, 'StderrExceptionHook'.center(self._line_width), '\n'
# traceback.print_exc()
traceback.print_exception(exception_type, exception_value, exception_traceback)
print >>sys.stderr, '\n', self._line
####################################################################################################
class EmailExceptionHook(ExceptionHook):
""" Send per email exception. """
##############################################
def __init__(self, context='', recipients=[]):
""" The mandatory recipient is set in :attr:`Config.Email.to_address`. Additional recipients
can be added using the list *recipients*. A context string can be set using *context*.
"""
self._recipients = recipients
super(EmailExceptionHook, self).__init__(context)
##############################################
def notify(self, exception_type, exception_value, exception_traceback):
template_message = '''
Object: An exception occurred in RootModule software on %(date)s UTC
---------------------------------------------------------------------------------
Context:
%(context)s
---------------------------------------------------------------------------------
%(platform)s
---------------------------------------------------------------------------------
%(traceback)s
---------------------------------------------------------------------------------
'''
traceback = format_exception(exception_type, exception_value, exception_traceback)
now = datetime.utcnow()
platform = Platform()
message = template_message % {'date': now.strftime('%Y-%m-%d %H:%M'),
'context': str(self.context),
'platform': str(platform),
'traceback': traceback,
}
email = Email(subject='An exception occurred in RootModule software',
recipients=Config.Email.to_address,
message=message,
)
email.add_recipients(self._recipients)
email.send()
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 | -750,957,015,245,716,500 | 31.815642 | 109 | 0.418965 | false |
mancoast/CPythonPyc_test | cpython/260_test_linuxaudiodev.py | 56 | 3142 | from test import test_support
test_support.requires('audio')
from test.test_support import findfile, TestSkipped, run_unittest
import errno
linuxaudiodev = test_support.import_module('linuxaudiodev', deprecated=True)
import sys
import sunaudio
import audioop
import unittest
SND_FORMAT_MULAW_8 = 1
class LinuxAudioDevTests(unittest.TestCase):
def setUp(self):
self.dev = linuxaudiodev.open('w')
def tearDown(self):
self.dev.close()
def test_methods(self):
# at least check that these methods can be invoked
self.dev.bufsize()
self.dev.obufcount()
self.dev.obuffree()
self.dev.getptr()
self.dev.fileno()
def test_play_sound_file(self):
path = findfile("audiotest.au")
fp = open(path, 'r')
size, enc, rate, nchannels, extra = sunaudio.gethdr(fp)
data = fp.read()
fp.close()
if enc != SND_FORMAT_MULAW_8:
self.fail("Expect .au file with 8-bit mu-law samples")
# convert the data to 16-bit signed
data = audioop.ulaw2lin(data, 2)
# set the data format
if sys.byteorder == 'little':
fmt = linuxaudiodev.AFMT_S16_LE
else:
fmt = linuxaudiodev.AFMT_S16_BE
# set parameters based on .au file headers
self.dev.setparameters(rate, 16, nchannels, fmt)
self.dev.write(data)
self.dev.flush()
def test_errors(self):
size = 8
fmt = linuxaudiodev.AFMT_U8
rate = 8000
nchannels = 1
try:
self.dev.setparameters(-1, size, nchannels, fmt)
except ValueError, err:
self.assertEquals(err.args[0], "expected rate >= 0, not -1")
try:
self.dev.setparameters(rate, -2, nchannels, fmt)
except ValueError, err:
self.assertEquals(err.args[0], "expected sample size >= 0, not -2")
try:
self.dev.setparameters(rate, size, 3, fmt)
except ValueError, err:
self.assertEquals(err.args[0], "nchannels must be 1 or 2, not 3")
try:
self.dev.setparameters(rate, size, nchannels, 177)
except ValueError, err:
self.assertEquals(err.args[0], "unknown audio encoding: 177")
try:
self.dev.setparameters(rate, size, nchannels, linuxaudiodev.AFMT_U16_LE)
except ValueError, err:
self.assertEquals(err.args[0], "for linear unsigned 16-bit little-endian "
"audio, expected sample size 16, not 8")
try:
self.dev.setparameters(rate, 16, nchannels, fmt)
except ValueError, err:
self.assertEquals(err.args[0], "for linear unsigned 8-bit audio, expected "
"sample size 8, not 16")
def test_main():
try:
dsp = linuxaudiodev.open('w')
except linuxaudiodev.error, msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT, errno.ENODEV, errno.EBUSY):
raise TestSkipped(msg)
raise
dsp.close()
run_unittest(LinuxAudioDevTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 | 4,618,113,464,172,099,000 | 31.061224 | 87 | 0.594526 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.