repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
centrofermi/e3pipe | tracking/E3FittingTool2dUnweighted.py | 1 | 2330 | #!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2015 Luca Baldini ([email protected]) *
# * *
# * For the license terms see the file LICENSE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from e3pipe.tracking.E3FittingTool2d import E3FittingTool2d
from e3pipe.tracking.E3Point import E3Point
class E3FittingTool2dUnweighted(E3FittingTool2d):
""" Simple two-dimensional track-fitting tool.
"""
def __init__(self):
""" Constructor.
"""
E3FittingTool2d.__init__(self, weighted = False)
def test():
"""
"""
fitTool = E3FittingTool2dUnweighted()
# Real event from FRAS-02-2014-10-30-00018_dst.root (11878)
# Processed with e3pipe 2.1.0 gives:
# root [4] Events.Scan("XDir:YDir:ZDir:ChiSquare", "EventNumber==11878")
# ************************************************************
# * Row * XDir * YDir * ZDir * ChiSquare *
# ************************************************************
# * 11878 * -0.050563 * 0.1976770 * 0.9789620 * 1.6044100 *
# ************************************************************
hits = [E3Point(79.229, 38.400, 0.000),
E3Point(82.742, 32.000, 40.000),
E3Point(83.922, 22.400, 80.000)
]
fitTool.run(hits)
print fitTool.track()
if __name__ == '__main__':
test()
| gpl-3.0 |
v-iam/azure-sdk-for-python | azure-mgmt-cognitiveservices/azure/mgmt/cognitiveservices/models/__init__.py | 4 | 2416 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sku import Sku
from .cognitive_services_account_create_parameters import CognitiveServicesAccountCreateParameters
from .cognitive_services_account_update_parameters import CognitiveServicesAccountUpdateParameters
from .cognitive_services_account import CognitiveServicesAccount
from .cognitive_services_account_keys import CognitiveServicesAccountKeys
from .regenerate_key_parameters import RegenerateKeyParameters
from .cognitive_services_resource_and_sku import CognitiveServicesResourceAndSku
from .cognitive_services_account_enumerate_skus_result import CognitiveServicesAccountEnumerateSkusResult
from .error_body import ErrorBody
from .error import Error, ErrorException
from .operation_display_info import OperationDisplayInfo
from .operation_entity import OperationEntity
from .check_sku_availability_parameter import CheckSkuAvailabilityParameter
from .check_sku_availability_result import CheckSkuAvailabilityResult
from .check_sku_availability_result_list import CheckSkuAvailabilityResultList
from .cognitive_services_account_paged import CognitiveServicesAccountPaged
from .operation_entity_paged import OperationEntityPaged
from .cognitive_services_management_client_enums import (
SkuName,
SkuTier,
Kind,
ProvisioningState,
KeyName,
)
__all__ = [
'Sku',
'CognitiveServicesAccountCreateParameters',
'CognitiveServicesAccountUpdateParameters',
'CognitiveServicesAccount',
'CognitiveServicesAccountKeys',
'RegenerateKeyParameters',
'CognitiveServicesResourceAndSku',
'CognitiveServicesAccountEnumerateSkusResult',
'ErrorBody',
'Error', 'ErrorException',
'OperationDisplayInfo',
'OperationEntity',
'CheckSkuAvailabilityParameter',
'CheckSkuAvailabilityResult',
'CheckSkuAvailabilityResultList',
'CognitiveServicesAccountPaged',
'OperationEntityPaged',
'SkuName',
'SkuTier',
'Kind',
'ProvisioningState',
'KeyName',
]
| mit |
shelbycruver/real-python-test | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 982 | 19608 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
| gpl-2.0 |
khalibartan/Antidote-DM | Antidotes DM/youtube_dl/extractor/ro220.py | 176 | 1451 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| gpl-2.0 |
olasitarska/django | django/db/models/sql/subqueries.py | 15 | 10496 | """
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import connections
from django.db.models.query_utils import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE, NO_RESULTS, SelectInfo
from django.db.models.sql.datastructures import Date, DateTime
from django.db.models.sql.query import Query
from django.utils import six
from django.utils import timezone
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'DateTimeQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
compiler = 'SQLDeleteCompiler'
def do_query(self, table, where, using):
self.tables = [table]
self.where = where
self.get_compiler(using).execute_sql(NO_RESULTS)
def delete_batch(self, pk_list, using, field=None):
"""
Set up and execute delete queries for all the objects in pk_list.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
if not field:
field = self.get_meta().pk
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(
**{field.attname + '__in': pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]}))
self.do_query(self.get_meta().db_table, self.where, using=using)
def delete_qs(self, query, using):
"""
Delete the queryset in one SQL query (if possible). For simple queries
this is done by copying the query.query.where to self.query, for
complex queries by using subquery.
"""
innerq = query.query
# Make sure the inner query has at least one table in use.
innerq.get_initial_alias()
# The same for our new query.
self.get_initial_alias()
innerq_used_tables = [t for t in innerq.tables
if innerq.alias_refcount[t]]
if ((not innerq_used_tables or innerq_used_tables == self.tables)
and not len(innerq.having)):
# There is only the base table in use in the query, and there is
# no aggregate filtering going on.
self.where = innerq.where
else:
pk = query.model._meta.pk
if not connections[using].features.update_can_self_select:
# We can't do the delete using subquery.
values = list(query.values_list('pk', flat=True))
if not values:
return
self.delete_batch(values, using)
return
else:
innerq.clear_select_clause()
innerq.select = [
SelectInfo((self.get_initial_alias(), pk.column), None)
]
values = innerq
self.where = self.where_class()
self.add_q(Q(pk__in=values))
self.get_compiler(using).execute_sql(NO_RESULTS)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
compiler = 'SQLUpdateCompiler'
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy(), **kwargs)
def update_batch(self, pk_list, values, using):
self.add_update_values(values)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
self.add_q(Q(pk__in=pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]))
self.get_compiler(using).execute_sql(NO_RESULTS)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in six.iteritems(values):
field, model, direct, m2m = self.get_meta().get_field_by_name(name)
if not direct or m2m:
raise FieldError(
'Cannot update model field %r (only non-relations and '
'foreign keys permitted).' % field
)
if model:
self.add_related_update(model, field, val)
continue
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
self.values.extend(values_seq)
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in six.iteritems(self.related_updates):
query = UpdateQuery(model)
query.values = values
if self.related_ids is not None:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
compiler = 'SQLInsertCompiler'
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.fields = []
self.objs = []
def clone(self, klass=None, **kwargs):
extras = {
'fields': self.fields[:],
'objs': self.objs[:],
'raw': self.raw,
}
extras.update(kwargs)
return super(InsertQuery, self).clone(klass, **extras)
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
compiler = 'SQLDateCompiler'
def add_select(self, field_name, lookup_type, order='ASC'):
"""
Converts the query into an extraction query.
"""
try:
field, _, _, joins, _ = self.setup_joins(
field_name.split(LOOKUP_SEP),
self.get_meta(),
self.get_initial_alias(),
)
except FieldError:
raise FieldDoesNotExist("%s has no field named '%s'" % (
self.get_meta().object_name, field_name
))
self._check_field(field) # overridden in DateTimeQuery
alias = joins[-1]
select = self._get_select((alias, field.column), lookup_type)
self.clear_select_clause()
self.select = [SelectInfo(select, None)]
self.distinct = True
self.order_by = [1] if order == 'ASC' else [-1]
if field.null:
self.add_filter(("%s__isnull" % field_name, False))
def _check_field(self, field):
assert isinstance(field, DateField), \
"%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, DateTimeField), \
"%r is a DateTimeField, not a DateField." % field.name
def _get_select(self, col, lookup_type):
return Date(col, lookup_type)
class DateTimeQuery(DateQuery):
"""
A DateTimeQuery is like a DateQuery but for a datetime field. If time zone
support is active, the tzinfo attribute contains the time zone to use for
converting the values before truncating them. Otherwise it's set to None.
"""
compiler = 'SQLDateTimeCompiler'
def clone(self, klass=None, memo=None, **kwargs):
if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'):
kwargs['tzinfo'] = self.tzinfo
return super(DateTimeQuery, self).clone(klass, memo, **kwargs)
def _check_field(self, field):
assert isinstance(field, DateTimeField), \
"%r isn't a DateTimeField." % field.name
def _get_select(self, col, lookup_type):
if self.tzinfo is None:
tzname = None
else:
tzname = timezone._get_timezone_name(self.tzinfo)
return DateTime(col, lookup_type, tzname)
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'
def add_subquery(self, query, using):
self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
| bsd-3-clause |
Edraak/edraak-platform | cms/djangoapps/contentstore/tests/test_course_create_rerun.py | 14 | 7173 | """
Test view handler for rerun (and eventually create)
"""
import datetime
import ddt
from django.urls import reverse
from django.test.client import RequestFactory
from mock import patch
from opaque_keys.edx.keys import CourseKey
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import UserFactory
from util.organizations_helpers import add_organization, get_course_organizations
from xmodule.course_module import CourseFields
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class TestCourseListing(ModuleStoreTestCase):
"""
Unit tests for getting the list of courses for a logged in user
"""
def setUp(self):
"""
Add a user and a course
"""
super(TestCourseListing, self).setUp()
# create and log in a staff user.
# create and log in a non-staff user
self.user = UserFactory()
self.factory = RequestFactory()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password='test')
self.course_create_rerun_url = reverse('course_handler')
self.course_start = datetime.datetime.utcnow()
self.course_end = self.course_start + datetime.timedelta(days=30)
self.enrollment_start = self.course_start - datetime.timedelta(days=7)
self.enrollment_end = self.course_end - datetime.timedelta(days=14)
source_course = CourseFactory.create(
org='origin',
number='the_beginning',
run='first',
display_name='the one and only',
start=self.course_start,
end=self.course_end,
enrollment_start=self.enrollment_start,
enrollment_end=self.enrollment_end
)
self.source_course_key = source_course.id
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.source_course_key).add_users(self.user)
def tearDown(self):
"""
Reverse the setup
"""
self.client.logout()
ModuleStoreTestCase.tearDown(self)
def test_rerun(self):
"""
Just testing the functionality the view handler adds over the tasks tested in test_clone_course
"""
response = self.client.ajax_post(self.course_create_rerun_url, {
'source_course_key': unicode(self.source_course_key),
'org': self.source_course_key.org, 'course': self.source_course_key.course, 'run': 'copy',
'display_name': 'not the same old name',
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
dest_course_key = CourseKey.from_string(data['destination_course_key'])
self.assertEqual(dest_course_key.run, 'copy')
source_course = self.store.get_course(self.source_course_key)
dest_course = self.store.get_course(dest_course_key)
self.assertEqual(dest_course.start, CourseFields.start.default)
self.assertEqual(dest_course.end, source_course.end)
self.assertEqual(dest_course.enrollment_start, None)
self.assertEqual(dest_course.enrollment_end, None)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_newly_created_course_has_web_certs_enabled(self, store):
"""
Tests newly created course has web certs enabled by default.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course = self.store.get_course(new_course_key)
self.assertTrue(course.cert_html_view_enabled)
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': False})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_without_org_app_enabled(self, store):
"""
Tests course creation workflow should not create course to org
link if organizations_app is not enabled.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(course_orgs, [])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_not_in_system(self, store):
"""
Tests course creation workflow when course organization does not exist
in system.
"""
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 400)
data = parse_json(response)
self.assertIn(u'Organization you selected does not exist in the system', data['error'])
@patch.dict('django.conf.settings.FEATURES', {'ORGANIZATIONS_APP': True})
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_creation_with_org_in_system(self, store):
"""
Tests course creation workflow when course organization exist in system.
"""
add_organization({
'name': 'Test Organization',
'short_name': 'orgX',
'description': 'Testing Organization Description',
})
with modulestore().default_store(store):
response = self.client.ajax_post(self.course_create_rerun_url, {
'org': 'orgX',
'number': 'CS101',
'display_name': 'Course with web certs enabled',
'run': '2015_T2'
})
self.assertEqual(response.status_code, 200)
data = parse_json(response)
new_course_key = CourseKey.from_string(data['course_key'])
course_orgs = get_course_organizations(new_course_key)
self.assertEqual(len(course_orgs), 1)
self.assertEqual(course_orgs[0]['short_name'], 'orgX')
| agpl-3.0 |
jaggu303619/asylum-v2.0 | openerp/addons/hr/__openerp__.py | 54 | 2515 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Directory',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Human Resources',
'sequence': 21,
'website': 'http://www.openerp.com',
'summary': 'Jobs, Departments, Employees Details',
'description': """
Human Resources Management
==========================
This application enables you to manage important aspects of your company's staff and other details such as their skills, contacts, working time...
You can manage:
---------------
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': [
'images/hr_department.jpeg',
'images/hr_employee.jpeg',
'images/hr_job_position.jpeg',
'static/src/img/default_image.png',
],
'depends': ['base_setup','mail', 'resource', 'board'],
'data': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'board_hr_view.xml',
'hr_view.xml',
'hr_department_view.xml',
'process/hr_process.xml',
'hr_installer.xml',
'hr_data.xml',
'res_config_view.xml',
],
'demo': ['hr_demo.xml'],
'test': [
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'css': [ 'static/src/css/hr.css' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wangyum/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/onehot_categorical_test.py | 89 | 10643 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for OneHotCategorical distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import onehot_categorical
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
def make_onehot_categorical(batch_shape, num_classes, dtype=dtypes.int32):
logits = random_ops.random_uniform(
list(batch_shape) + [num_classes], -10, 10, dtype=dtypes.float32) - 50.
return onehot_categorical.OneHotCategorical(logits, dtype=dtype)
class OneHotCategoricalTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testP(self):
p = [0.2, 0.8]
dist = onehot_categorical.OneHotCategorical(probs=p)
with self.test_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllEqual([2], dist.logits.get_shape())
def testLogits(self):
p = np.array([0.2, 0.8], dtype=np.float32)
logits = np.log(p) - 50.
dist = onehot_categorical.OneHotCategorical(logits=logits)
with self.test_session():
self.assertAllEqual([2], dist.probs.get_shape())
self.assertAllEqual([2], dist.logits.get_shape())
self.assertAllClose(dist.probs.eval(), p)
self.assertAllClose(dist.logits.eval(), logits)
def testShapes(self):
with self.test_session():
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape.as_list())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertAllEqual([10], dist.event_shape_tensor().eval())
# event_shape is available as a constant because the shape is
# known at graph build time.
self.assertEqual(10,
tensor_util.constant_value(dist.event_shape_tensor()))
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_onehot_categorical(
batch_shape, constant_op.constant(10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual([10], dist.event_shape.as_list())
self.assertEqual(10, dist.event_shape_tensor().eval())
def testDtype(self):
dist = make_onehot_categorical([], 5, dtype=dtypes.int32)
self.assertEqual(dist.dtype, dtypes.int32)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
dist = make_onehot_categorical([], 5, dtype=dtypes.int64)
self.assertEqual(dist.dtype, dtypes.int64)
self.assertEqual(dist.dtype, dist.sample(5).dtype)
self.assertEqual(dist.dtype, dist.mode().dtype)
self.assertEqual(dist.probs.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dtypes.float32)
self.assertEqual(dist.logits.dtype, dist.entropy().dtype)
self.assertEqual(dist.logits.dtype, dist.prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
self.assertEqual(dist.logits.dtype, dist.log_prob(
np.array([1]+[0]*4, dtype=np.int64)).dtype)
def testUnknownShape(self):
with self.test_session():
logits = array_ops.placeholder(dtype=dtypes.float32)
dist = onehot_categorical.OneHotCategorical(logits)
sample = dist.sample()
# Will sample class 1.
sample_value = sample.eval(feed_dict={logits: [-1000.0, 1000.0]})
self.assertAllEqual([0, 1], sample_value)
# Batch entry 0 will sample class 1, batch entry 1 will sample class 0.
sample_value_batch = sample.eval(
feed_dict={logits: [[-1000.0, 1000.0], [1000.0, -1000.0]]})
self.assertAllEqual([[0, 1], [1, 0]], sample_value_batch)
def testEntropyNoBatch(self):
logits = np.log([0.2, 0.8]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.test_session():
self.assertAllClose(
dist.entropy().eval(),
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
def testEntropyWithBatch(self):
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
dist = onehot_categorical.OneHotCategorical(logits)
with self.test_session():
self.assertAllClose(dist.entropy().eval(), [
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
])
def testPmf(self):
# check that probability of samples correspond to their class probabilities
with self.test_session():
logits = self._rng.random_sample(size=(8, 2, 10))
prob = np.exp(logits)/np.sum(np.exp(logits), axis=-1, keepdims=True)
dist = onehot_categorical.OneHotCategorical(logits=logits)
np_sample = dist.sample().eval()
np_prob = dist.prob(np_sample).eval()
expected_prob = prob[np_sample.astype(np.bool)]
self.assertAllClose(expected_prob, np_prob.flatten())
def testSample(self):
with self.test_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
n = 100
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
self.assertAllEqual([n, 1, 2, 2], sample_values.shape)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
def testSampleWithSampleShape(self):
with self.test_session():
probs = [[[0.2, 0.8], [0.4, 0.6]]]
dist = onehot_categorical.OneHotCategorical(math_ops.log(probs) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
self.assertAllClose([0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()],
atol=1e-2)
self.assertAllClose([0.4**2 + 0.6**2], [prob_val[:, :, :, 1].mean()],
atol=1e-2)
def testCategoricalCategoricalKL(self):
def np_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / exp_logits.sum(axis=-1, keepdims=True)
with self.test_session() as sess:
for categories in [2, 10]:
for batch_size in [1, 2]:
p_logits = self._rng.random_sample((batch_size, categories))
q_logits = self._rng.random_sample((batch_size, categories))
p = onehot_categorical.OneHotCategorical(logits=p_logits)
q = onehot_categorical.OneHotCategorical(logits=q_logits)
prob_p = np_softmax(p_logits)
prob_q = np_softmax(q_logits)
kl_expected = np.sum(
prob_p * (np.log(prob_p) - np.log(prob_q)), axis=-1)
kl_actual = kullback_leibler.kl_divergence(p, q)
kl_same = kullback_leibler.kl_divergence(p, p)
x = p.sample(int(2e4), seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
# Compute empirical KL(p||q).
kl_sample = math_ops.reduce_mean(p.log_prob(x) - q.log_prob(x), 0)
[kl_sample_, kl_actual_, kl_same_] = sess.run([kl_sample, kl_actual,
kl_same])
self.assertEqual(kl_actual.get_shape(), (batch_size,))
self.assertAllClose(kl_same_, np.zeros_like(kl_expected))
self.assertAllClose(kl_actual_, kl_expected, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_expected, atol=1e-2, rtol=0.)
def testSampleUnbiasedNonScalarBatch(self):
with self.test_session() as sess:
logits = self._rng.rand(4, 3, 2).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(3e3)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.07)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.10)
def testSampleUnbiasedScalarBatch(self):
with self.test_session() as sess:
logits = self._rng.rand(3).astype(np.float32)
dist = onehot_categorical.OneHotCategorical(logits=logits)
n = int(1e4)
x = dist.sample(n, seed=0)
x = math_ops.cast(x, dtype=dtypes.float32)
sample_mean = math_ops.reduce_mean(x, 0) # elementwise mean
x_centered = x - sample_mean
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.probs,
dist.covariance(),
])
self.assertAllEqual([3], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.1)
self.assertAllEqual([3, 3], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.1)
if __name__ == "__main__":
test.main()
| apache-2.0 |
decvalts/iris | docs/iris/src/sphinxext/generate_package_rst.py | 3 | 9320 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import os
import sys
import re
import inspect
document_dict = {
# Use autoclass for classes.
'class': '''
{object_docstring}
..
.. autoclass:: {object_name}
:members:
:undoc-members:
:inherited-members:
''',
'function': '''
.. autofunction:: {object_name}
''',
# For everything else, let automodule do some magic...
None: '''
.. autodata:: {object_name}
'''}
horizontal_sep = '''
.. raw:: html
<p class="hr_p"><a href="#">↑   top   ↑</a></p>
<!--
-----------
.. raw:: html
-->
'''
def lookup_object_type(obj):
if inspect.isclass(obj):
return 'class'
elif inspect.isfunction(obj):
return 'function'
else:
return None
def auto_doc_module(file_path, import_name, root_package,
package_toc=None, title=None):
mod = __import__(import_name)
mod = sys.modules[import_name]
elems = dir(mod)
if '__all__' in elems:
document_these = [(attr_name, getattr(mod, attr_name))
for attr_name in mod.__all__]
else:
document_these = [(attr_name, getattr(mod, attr_name))
for attr_name in elems
if (not attr_name.startswith('_') and
not inspect.ismodule(getattr(mod, attr_name)))]
def is_from_this_module(arg):
name = arg[0]
obj = arg[1]
return (hasattr(obj, '__module__') and
obj.__module__ == mod.__name__)
sort_order = {'class': 2, 'function': 1}
# Sort them according to sort_order dict.
def sort_key(arg):
name = arg[0]
obj = arg[1]
return sort_order.get(lookup_object_type(obj), 0)
document_these = filter(is_from_this_module, document_these)
document_these = sorted(document_these, key=sort_key)
lines = []
for element, obj in document_these:
object_name = import_name + '.' + element
obj_content = document_dict[lookup_object_type(obj)].format(
object_name=object_name,
object_name_header_line='+' * len(object_name),
object_docstring=inspect.getdoc(obj))
lines.append(obj_content)
lines = horizontal_sep.join(lines)
module_elements = '\n'.join(' * :py:obj:`{}`'.format(element)
for element, obj in document_these)
lines = r'''.. _{import_name}:
{title_underline}
{title}
{title_underline}
{sidebar}
.. currentmodule:: {root_package}
.. automodule:: {import_name}
In this module:
{module_elements}
''' + lines
if package_toc:
sidebar = '''
.. sidebar:: Modules in this package
{package_toc_tree}
'''.format(package_toc_tree=package_toc)
else:
sidebar = ''
return lines.format(title=title or import_name,
title_underline='=' * len(title or import_name),
import_name=import_name, root_package=root_package,
sidebar=sidebar, module_elements=module_elements)
def auto_doc_package(file_path, import_name, root_package, sub_packages):
max_depth = 1 if import_name == 'iris' else 2
package_toc = '\n '.join(sub_packages)
package_toc = '''
.. toctree::
:maxdepth: {:d}
:titlesonly:
{}
'''.format(max_depth, package_toc)
if '.' in import_name:
title = None
else:
title = import_name.capitalize() + ' reference documentation'
return auto_doc_module(file_path, import_name, root_package,
package_toc=package_toc, title=title)
def auto_package_build(app):
root_package = app.config.autopackage_name
if root_package is None:
raise ValueError('set the autopackage_name variable in the '
'conf.py file')
if not isinstance(root_package, list):
raise ValueError('autopackage was expecting a list of packages to '
'document e.g. ["itertools"]')
for package in root_package:
do_package(package)
def do_package(package_name):
out_dir = package_name + os.path.sep
# Import the root package. If this fails then an import error will be
# raised.
module = __import__(package_name)
root_package = package_name
rootdir = os.path.dirname(module.__file__)
package_folder = []
module_folders = {}
for root, subFolders, files in os.walk(rootdir):
for fname in files:
name, ext = os.path.splitext(fname)
# Skip some non-relevant files.
if (fname.startswith('.') or fname.startswith('#') or
re.search('^_[^_]', fname) or fname.find('.svn') >= 0 or
not (ext in ['.py', '.so'])):
continue
rel_path = root_package + \
os.path.join(root, fname).split(rootdir)[-1]
mod_folder = root_package + \
os.path.join(root).split(rootdir)[-1].replace('/', '.')
# Only add this package to folder list if it contains an __init__
# script.
if name == '__init__':
package_folder.append([mod_folder, rel_path])
else:
import_name = mod_folder + '.' + name
mf_list = module_folders.setdefault(mod_folder, [])
mf_list.append((import_name, rel_path))
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for package, package_path in package_folder:
if '._' in package or 'test' in package:
continue
paths = []
for spackage, spackage_path in package_folder:
# Ignore this packages, packages that are not children of this
# one, test packages, private packages, and packages that are
# subpackages of subpackages (they'll be part of the subpackage).
if spackage == package:
continue
if not spackage.startswith(package):
continue
if spackage.count('.') > package.count('.') + 1:
continue
if 'test' in spackage:
continue
split_path = spackage.rsplit('.', 2)[-2:]
if any(part[0] == '_' for part in split_path):
continue
paths.append(os.path.join(*split_path) + '.rst')
paths.extend(os.path.join(os.path.basename(os.path.dirname(path)),
os.path.splitext(os.path.basename(path))[0])
for imp_name, path in module_folders.get(package, []))
paths.sort()
doc = auto_doc_package(package_path, package, root_package, paths)
package_dir = out_dir + package.replace('.', os.path.sep)
if not os.path.exists(package_dir):
os.makedirs(out_dir + package.replace('.', os.path.sep))
out_path = package_dir + '.rst'
if not os.path.exists(out_path):
print('Creating non-existent document {} ...'.format(out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
else:
with open(out_path, 'r') as fh:
existing_content = ''.join(fh.readlines())
if doc != existing_content:
print('Creating out of date document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
for import_name, module_path in module_folders.get(package, []):
doc = auto_doc_module(module_path, import_name, root_package)
out_path = out_dir + import_name.replace('.', os.path.sep) + '.rst'
if not os.path.exists(out_path):
print('Creating non-existent document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
else:
with open(out_path, 'r') as fh:
existing_content = ''.join(fh.readlines())
if doc != existing_content:
print('Creating out of date document {} ...'.format(
out_path))
with open(out_path, 'w') as fh:
fh.write(doc)
def setup(app):
app.connect('builder-inited', auto_package_build)
app.add_config_value('autopackage_name', None, 'env')
| gpl-3.0 |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/sphinx/util/i18n.py | 1 | 10576 | """
sphinx.util.i18n
~~~~~~~~~~~~~~~~
Builder superclass for all builders.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import gettext
import os
import re
import warnings
from collections import namedtuple
from datetime import datetime
from os import path
import babel.dates
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.matching import Matcher
from sphinx.util.osutil import SEP, relpath
logger = logging.getLogger(__name__)
if False:
# For type annotation
from typing import Callable, List, Set # NOQA
from sphinx.environment import BuildEnvironment # NOQA
LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain,charset')
class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
# type: () -> str
return self.domain + '.po'
@property
def mo_file(self):
# type: () -> str
return self.domain + '.mo'
@property
def po_path(self):
# type: () -> str
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
# type: () -> str
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
# type: () -> bool
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale):
# type: (str) -> None
with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
logger.warning(__('writing error: %s, %s'), self.mo_path, exc)
def find_catalog(docname, compaction):
# type: (str, bool) -> str
if compaction:
ret = docname.split(SEP, 1)[0]
else:
ret = docname
return ret
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
# type: (str, str, List[str], str, bool) -> List[str]
if not(lang and locale_dirs):
return []
domain = find_catalog(docname, compaction)
files = [gettext.find(domain, path.join(srcdir, dir_), [lang])
for dir_ in locale_dirs]
files = [relpath(f, srcdir) for f in files if f]
return files
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=None,
charset='utf-8', force_all=False,
excluded=Matcher([])):
# type: (List[str], str, List[str], bool, str, bool, Matcher) -> Set[CatalogInfo]
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
translation catalogs. Each path contains a structure such as
`<locale>/LC_MESSAGES/domain.po`.
:param str locale: a language as `'en'`
:param list domains: list of domain names to get. If empty list or None
is specified, get all domain names. default is None.
:param boolean force_all:
Set True if you want to get all catalogs rather than updated catalogs.
default is False.
:return: [CatalogInfo(), ...]
"""
if gettext_compact is not None:
warnings.warn('gettext_compact argument for find_catalog_source_files() '
'is deprecated.', RemovedInSphinx30Warning, stacklevel=2)
catalogs = set() # type: Set[CatalogInfo]
if not locale:
return catalogs # locale is not specified
for locale_dir in locale_dirs:
if not locale_dir:
continue # skip system locale directory
base_dir = path.join(locale_dir, locale, 'LC_MESSAGES')
if not path.exists(base_dir):
continue # locale path is not found
for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
filenames = [f for f in filenames if f.endswith('.po')]
for filename in filenames:
if excluded(path.join(relpath(dirpath, base_dir), filename)):
continue
base = path.splitext(filename)[0]
domain = relpath(path.join(dirpath, base), base_dir).replace(path.sep, SEP)
if domains and domain not in domains:
continue
cat = CatalogInfo(base_dir, domain, charset)
if force_all or cat.is_outdated():
catalogs.add(cat)
return catalogs
# date_format mappings: ustrftime() to bable.dates.format_datetime()
date_format_mappings = {
'%a': 'EEE', # Weekday as locale’s abbreviated name.
'%A': 'EEEE', # Weekday as locale’s full name.
'%b': 'MMM', # Month as locale’s abbreviated name.
'%B': 'MMMM', # Month as locale’s full name.
'%c': 'medium', # Locale’s appropriate date and time representation.
'%-d': 'd', # Day of the month as a decimal number.
'%d': 'dd', # Day of the month as a zero-padded decimal number.
'%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23].
'%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23].
'%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12].
'%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12].
'%-j': 'D', # Day of the year as a decimal number.
'%j': 'DDD', # Day of the year as a zero-padded decimal number.
'%-m': 'M', # Month as a decimal number.
'%m': 'MM', # Month as a zero-padded decimal number.
'%-M': 'm', # Minute as a decimal number [0,59].
'%M': 'mm', # Minute as a zero-padded decimal number [00,59].
'%p': 'a', # Locale’s equivalent of either AM or PM.
'%-S': 's', # Second as a decimal number.
'%S': 'ss', # Second as a zero-padded decimal number.
'%U': 'WW', # Week number of the year (Sunday as the first day of the week)
# as a zero padded decimal number. All days in a new year preceding
# the first Sunday are considered to be in week 0.
'%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
'%-W': 'W', # Week number of the year (Monday as the first day of the week)
# as a decimal number. All days in a new year preceding the first
# Monday are considered to be in week 0.
'%W': 'WW', # Week number of the year (Monday as the first day of the week)
# as a zero-padded decimal number.
'%x': 'medium', # Locale’s appropriate date representation.
'%X': 'medium', # Locale’s appropriate time representation.
'%y': 'YY', # Year without century as a zero-padded decimal number.
'%Y': 'YYYY', # Year with century as a decimal number.
'%Z': 'zzzz', # Time zone name (no characters if no time zone exists).
'%%': '%',
}
date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
# type: (datetime, str, str, Callable) -> str
if locale is None:
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
def format_date(format, date=None, language=None):
# type: (str, datetime, str) -> str
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
date = datetime.utcfromtimestamp(float(source_date_epoch))
else:
date = datetime.now()
result = []
tokens = date_format_re.split(format)
for token in tokens:
if token in date_format_mappings:
babel_format = date_format_mappings.get(token, '')
# Check if we have to use a different babel formatter then
# format_datetime, because we only want to format a date
# or a time.
if token == '%x':
function = babel.dates.format_date
elif token == '%X':
function = babel.dates.format_time
else:
function = babel.dates.format_datetime
result.append(babel_format_date(date, babel_format, locale=language,
formatter=function))
else:
result.append(token)
return "".join(result)
def get_image_filename_for_language(filename, env):
# type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
filename_format = env.config.figure_language_filename
d = dict()
d['root'], d['ext'] = path.splitext(filename)
dirname = path.dirname(d['root'])
if dirname and not dirname.endswith(path.sep):
dirname += path.sep
d['path'] = dirname
d['basename'] = path.basename(d['root'])
d['language'] = env.config.language
try:
return filename_format.format(**d)
except KeyError as exc:
raise SphinxError('Invalid figure_language_filename: %r' % exc)
def search_image_for_language(filename, env):
# type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
translated = get_image_filename_for_language(filename, env)
dirname = path.dirname(env.docname)
if path.exists(path.join(env.srcdir, dirname, translated)):
return translated
else:
return filename
| mit |
ojengwa/sympy | sympy/logic/boolalg.py | 2 | 41626 | """
Boolean algebra module for SymPy
"""
from __future__ import print_function, division
from collections import defaultdict
from itertools import product, islice
from sympy.core.basic import Basic
from sympy.core.cache import cacheit
from sympy.core.numbers import Number
from sympy.core.decorators import deprecated
from sympy.core.operations import LatticeOp, AssocOp
from sympy.core.function import Application
from sympy.core.compatibility import ordered, xrange, with_metaclass
from sympy.core.sympify import converter, _sympify, sympify
from sympy.core.singleton import Singleton, S
from sympy.utilities.iterables import multiset
class Boolean(Basic):
"""A boolean object is an object for which logic operations make sense."""
__slots__ = []
def __and__(self, other):
"""Overloading for & operator"""
return And(self, other)
__rand__ = __and__
def __or__(self, other):
"""Overloading for |"""
return Or(self, other)
__ror__ = __or__
def __invert__(self):
"""Overloading for ~"""
return Not(self)
def __rshift__(self, other):
"""Overloading for >>"""
return Implies(self, other)
def __lshift__(self, other):
"""Overloading for <<"""
return Implies(other, self)
__rrshift__ = __lshift__
__rlshift__ = __rshift__
def __xor__(self, other):
return Xor(self, other)
__rxor__ = __xor__
# Developer note: There is liable to be some confusion as to when True should
# be used and when S.true should be used in various contexts throughout SymPy.
# An important thing to remember is that sympify(True) returns S.true. This
# means that for the most part, you can just use True and it will
# automatically be converted to S.true when necessary, similar to how you can
# generally use 1 instead of S.One.
# The rule of thumb is:
# "If the boolean in question can be replaced by an arbitrary symbolic
# Boolean, like Or(x, y) or x > 1, use S.true. Otherwise, use True"
# In other words, use S.true only on those contexts where the boolean is being
# used as a symbolic representation of truth. For example, if the object ends
# up in the .args of any expression, then it must necessarily be S.true
# instead of True, as elements of .args must be Basic. On the other hand, ==
# is not a symbolic operation in SymPy, since it always returns True or False,
# and does so in terms of structural equality rather than mathematical, so it
# should return True. The assumptions system should use True and False. Aside
# from not satisfying the above rule of thumb, the assumptions system uses a
# three-valued logic (True, False, None), whereas S.true and S.false represent
# a two-valued logic. When it doubt, use True.
# 2. "S.true == True" is True.
# While "S.true is True" is False, "S.true == True" is True, so if there is
# any doubt over whether a function or expression will return S.true or True,
# just use "==" instead of "is" to do the comparison, and it will work in
# either case. Finally, for boolean flags, it's better to just use "if x"
# instead of "if x is True". To quote PEP 8:
# Don't compare boolean values to True or False using ==.
# Yes: if greeting:
# No: if greeting == True:
# Worse: if greeting is True:
class BooleanAtom(Boolean):
"""
Base class of BooleanTrue and BooleanFalse.
"""
class BooleanTrue(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of True.
The instances of this class are singletonized and can be accessed via
S.true.
This is the SymPy version of True, for use in the logic module. The
primary advantage of using true instead of True is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
True they act bitwise on 1. Functions in the logic module will return this
class when they evaluate to true.
Examples
========
>>> from sympy import sympify, true, Or
>>> sympify(True)
True
>>> ~true
False
>>> ~True
-2
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanFalse
"""
def __nonzero__(self):
return True
__bool__ = __nonzero__
def __hash__(self):
return hash(True)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import true
>>> true.as_set()
UniversalSet()
"""
return S.UniversalSet
class BooleanFalse(with_metaclass(Singleton, BooleanAtom)):
"""
SymPy version of False.
The instances of this class are singletonized and can be accessed via
S.false.
This is the SymPy version of False, for use in the logic module. The
primary advantage of using false instead of False is that shorthand boolean
operations like ~ and >> will work as expected on this class, whereas with
False they act bitwise on 0. Functions in the logic module will return this
class when they evaluate to false.
Examples
========
>>> from sympy import sympify, false, Or, true
>>> sympify(False)
False
>>> false >> false
True
>>> False >> False
0
>>> Or(True, False)
True
See Also
========
sympy.logic.boolalg.BooleanTrue
"""
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __hash__(self):
return hash(False)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import false
>>> false.as_set()
EmptySet()
"""
from sympy.core.sets import EmptySet
return EmptySet()
true = BooleanTrue()
false = BooleanFalse()
# We want S.true and S.false to work, rather than S.BooleanTrue and
# S.BooleanFalse, but making the class and instance names the same causes some
# major issues (like the inability to import the class directly from this
# file).
S.true = true
S.false = false
converter[bool] = lambda x: S.true if x else S.false
class BooleanFunction(Application, Boolean):
"""Boolean function is a function that lives in a boolean space
It is used as base class for And, Or, Not, etc.
"""
is_Boolean = True
def __call__(self, *args):
return self.func(*[arg(*args) for arg in self.args])
def _eval_simplify(self, ratio, measure):
return simplify_logic(self)
class And(LatticeOp, BooleanFunction):
"""
Logical AND function.
It evaluates its arguments in order, giving False immediately
if any of them are False, and True if they are all True.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import And
>>> x & y
And(x, y)
Notes
=====
The ``&`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
and. Hence, ``And(a, b)`` and ``a & b`` will return different things if
``a`` and ``b`` are integers.
>>> And(x, y).subs(x, 1)
y
"""
zero = false
identity = true
nargs = None
@classmethod
def _new_args_filter(cls, args):
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
return LatticeOp._new_args_filter(newargs, And)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import And, Symbol
>>> x = Symbol('x', real=True)
>>> And(x<2, x>-2).as_set()
(-2, 2)
"""
from sympy.core.sets import Intersection
if len(self.free_symbols) == 1:
return Intersection(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError("Sorry, And.as_set has not yet been"
" implemented for multivariate"
" expressions")
class Or(LatticeOp, BooleanFunction):
"""
Logical OR function
It evaluates its arguments in order, giving True immediately
if any of them are True, and False if they are all False.
Examples
========
>>> from sympy.core import symbols
>>> from sympy.abc import x, y
>>> from sympy.logic.boolalg import Or
>>> x | y
Or(x, y)
Notes
=====
The ``|`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
or. Hence, ``Or(a, b)`` and ``a | b`` will return different things if
``a`` and ``b`` are integers.
>>> Or(x, y).subs(x, 0)
y
"""
zero = true
identity = false
@classmethod
def _new_args_filter(cls, args):
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
return LatticeOp._new_args_filter(newargs, Or)
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Or, Symbol
>>> x = Symbol('x', real=True)
>>> Or(x>2, x<-2).as_set()
(-oo, -2) U (2, oo)
"""
from sympy.core.sets import Union
if len(self.free_symbols) == 1:
return Union(*[arg.as_set() for arg in self.args])
else:
raise NotImplementedError("Sorry, Or.as_set has not yet been"
" implemented for multivariate"
" expressions")
class Not(BooleanFunction):
"""
Logical Not function (negation)
Returns True if the statement is False
Returns False if the statement is True
Examples
========
>>> from sympy.logic.boolalg import Not, And, Or
>>> from sympy.abc import x
>>> Not(True)
False
>>> Not(False)
True
>>> Not(And(True, False))
True
>>> Not(Or(True, False))
False
>>> Not(And(And(True, x), Or(x, False)))
Not(x)
>>> ~x
Not(x)
Notes
=====
- De Morgan rules are applied automatically.
- The ``~`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise
not. In particular, ``~a`` and ``Not(a)`` will be different if ``a`` is
an integer. Furthermore, since bools in Python subclass from ``int``,
``~True`` is the same as ``~1`` which is ``-2``, which has a boolean
value of True. To avoid this issue, use the SymPy boolean types
``true`` and ``false``.
>>> from sympy import true
>>> ~True
-2
>>> ~true
False
"""
is_Not = True
@classmethod
def eval(cls, arg):
if isinstance(arg, Number) or arg in (True, False):
return false if arg else true
# apply De Morgan Rules
if arg.func is And:
return Or(*[Not(a) for a in arg.args])
if arg.func is Or:
return And(*[Not(a) for a in arg.args])
if arg.func is Not:
return arg.args[0]
def as_set(self):
"""
Rewrite logic operators and relationals in terms of real sets.
Examples
========
>>> from sympy import Not, Symbol
>>> x = Symbol('x', real=True)
>>> Not(x>0).as_set()
(-oo, 0]
"""
if len(self.free_symbols) == 1:
return self.args[0].as_set().complement
else:
raise NotImplementedError("Sorry, Not.as_set has not yet been"
" implemented for mutivariate"
" expressions")
class Xor(BooleanFunction):
"""
Logical XOR (exclusive OR) function.
Returns True if an odd number of the arguments are True and the rest are
False.
Returns False if an even number of the arguments are True and the rest are
False.
Examples
========
>>> from sympy.logic.boolalg import Xor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Xor(True, False)
True
>>> Xor(True, True)
False
>>> Xor(True, False, True, True, False)
True
>>> Xor(True, False, True, False)
False
>>> x ^ y
Or(And(Not(x), y), And(Not(y), x))
Notes
=====
The ``^`` operator is provided as a convenience, but note that its use
here is different from its normal use in Python, which is bitwise xor. In
particular, ``a ^ b`` and ``Xor(a, b)`` will be different if ``a`` and
``b`` are integers.
>>> Xor(x, y).subs(y, 0)
x
"""
def __new__(cls, *args, **options):
args = [_sympify(arg) for arg in args]
argset = multiset(args) # dictionary
args_final=[]
# xor is commutative and is false if count of x is even and x
# if count of x is odd. Here x can be True, False or any Symbols
for x, freq in argset.items():
if freq % 2 == 0:
argset[x] = false
else:
argset[x] = x
for _, z in argset.items():
args_final.append(z)
argset = set(args_final)
truecount = 0
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
if x:
truecount += 1
if len(argset) < 1:
return true if truecount % 2 != 0 else false
if truecount % 2 != 0:
return Not(Xor(*argset))
_args = frozenset(argset)
obj = super(Xor, cls).__new__(cls, *_args, **options)
if isinstance(obj, Xor):
obj._argset = _args
return obj
@property
@cacheit
def args(self):
return tuple(ordered(self._argset))
@classmethod
def eval(cls, *args):
if not args:
return false
args = list(args)
A = args.pop()
while args:
B = args.pop()
A = Or(And(A, Not(B)), And(Not(A), B))
return A
class Nand(BooleanFunction):
"""
Logical NAND function.
It evaluates its arguments in order, giving True immediately if any
of them are False, and False if they are all True.
Returns True if any of the arguments are False
Returns False if all arguments are True
Examples
========
>>> from sympy.logic.boolalg import Nand
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nand(False, True)
True
>>> Nand(True, True)
False
>>> Nand(x, y)
Or(Not(x), Not(y))
"""
@classmethod
def eval(cls, *args):
return Not(And(*args))
class Nor(BooleanFunction):
"""
Logical NOR function.
It evaluates its arguments in order, giving False immediately if any
of them are True, and True if they are all False.
Returns False if any argument is True
Returns True if all arguments are False
Examples
========
>>> from sympy.logic.boolalg import Nor
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Nor(True, False)
False
>>> Nor(True, True)
False
>>> Nor(False, True)
False
>>> Nor(False, False)
True
>>> Nor(x, y)
And(Not(x), Not(y))
"""
@classmethod
def eval(cls, *args):
return Not(Or(*args))
class Implies(BooleanFunction):
"""
Logical implication.
A implies B is equivalent to !A v B
Accepts two Boolean arguments; A and B.
Returns False if A is True and B is False
Returns True otherwise.
Examples
========
>>> from sympy.logic.boolalg import Implies
>>> from sympy import symbols
>>> x, y = symbols('x y')
>>> Implies(True, False)
False
>>> Implies(False, False)
True
>>> Implies(True, True)
True
>>> Implies(False, True)
True
>>> x >> y
Implies(x, y)
>>> y << x
Implies(x, y)
Notes
=====
The ``>>`` and ``<<`` operators are provided as a convenience, but note
that their use here is different from their normal use in Python, which is
bit shifts. Hence, ``Implies(a, b)`` and ``a >> b`` will return different
things if ``a`` and ``b`` are integers. In particular, since Python
considers ``True`` and ``False`` to be integers, ``True >> True`` will be
the same as ``1 >> 1``, i.e., 0, which has a truth value of False. To
avoid this issue, use the SymPy objects ``true`` and ``false``.
>>> from sympy import true, false
>>> True >> False
1
>>> true >> false
False
"""
@classmethod
def eval(cls, *args):
try:
newargs = []
for x in args:
if isinstance(x, Number) or x in (0, 1):
newargs.append(True if x else False)
else:
newargs.append(x)
A, B = newargs
except ValueError:
raise ValueError(
"%d operand(s) used for an Implies "
"(pairs are required): %s" % (len(args), str(args)))
if A == True or A == False or B == True or B == False:
return Or(Not(A), B)
else:
return Basic.__new__(cls, *args)
class Equivalent(BooleanFunction):
"""
Equivalence relation.
Equivalent(A, B) is True iff A and B are both True or both False
Returns True if all of the arguments are logically equivalent.
Returns False otherwise.
Examples
========
>>> from sympy.logic.boolalg import Equivalent, And
>>> from sympy.abc import x, y
>>> Equivalent(False, False, False)
True
>>> Equivalent(True, False, False)
False
>>> Equivalent(x, And(x, True))
True
"""
def __new__(cls, *args, **options):
args = [_sympify(arg) for arg in args]
argset = set(args)
for x in args:
if isinstance(x, Number) or x in [True, False]: # Includes 0, 1
argset.discard(x)
argset.add(True if x else False)
if len(argset) <= 1:
return true
if True in argset:
argset.discard(True)
return And(*argset)
if False in argset:
argset.discard(False)
return Nor(*argset)
_args = frozenset(argset)
obj = super(Equivalent, cls).__new__(cls, _args)
obj._argset = _args
return obj
@property
@cacheit
def args(self):
return tuple(ordered(self._argset))
class ITE(BooleanFunction):
"""
If then else clause.
ITE(A, B, C) evaluates and returns the result of B if A is true
else it returns the result of C
Examples
========
>>> from sympy.logic.boolalg import ITE, And, Xor, Or
>>> from sympy.abc import x, y, z
>>> ITE(True, False, True)
False
>>> ITE(Or(True, False), And(True, True), Xor(True, True))
True
>>> ITE(x, y, z)
Or(And(Not(x), z), And(x, y))
"""
@classmethod
def eval(cls, *args):
args = list(args)
if len(args) == 3:
return Or(And(args[0], args[1]), And(Not(args[0]), args[2]))
raise ValueError("ITE expects 3 arguments, but got %d: %s" %
(len(args), str(args)))
### end class definitions. Some useful methods
def conjuncts(expr):
"""Return a list of the conjuncts in the expr s.
Examples
========
>>> from sympy.logic.boolalg import conjuncts
>>> from sympy.abc import A, B
>>> conjuncts(A & B)
frozenset([A, B])
>>> conjuncts(A | B)
frozenset([Or(A, B)])
"""
return And.make_args(expr)
def disjuncts(expr):
"""Return a list of the disjuncts in the sentence s.
Examples
========
>>> from sympy.logic.boolalg import disjuncts
>>> from sympy.abc import A, B
>>> disjuncts(A | B)
frozenset([A, B])
>>> disjuncts(A & B)
frozenset([And(A, B)])
"""
return Or.make_args(expr)
def distribute_and_over_or(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in CNF.
Examples
========
>>> from sympy.logic.boolalg import distribute_and_over_or, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_and_over_or(Or(A, And(Not(B), Not(C))))
And(Or(A, Not(B)), Or(A, Not(C)))
"""
return _distribute((expr, And, Or))
def distribute_or_over_and(expr):
"""
Given a sentence s consisting of conjunctions and disjunctions
of literals, return an equivalent sentence in DNF.
Note that the output is NOT simplified.
Examples
========
>>> from sympy.logic.boolalg import distribute_or_over_and, And, Or, Not
>>> from sympy.abc import A, B, C
>>> distribute_or_over_and(And(Or(Not(A), B), C))
Or(And(B, C), And(C, Not(A)))
"""
return _distribute((expr, Or, And))
def _distribute(info):
"""
Distributes info[1] over info[2] with respect to info[0].
"""
if info[0].func is info[2]:
for arg in info[0].args:
if arg.func is info[1]:
conj = arg
break
else:
return info[0]
rest = info[2](*[a for a in info[0].args if a is not conj])
return info[1](*list(map(_distribute,
[(info[2](c, rest), info[1], info[2]) for c in conj.args])))
elif info[0].func is info[1]:
return info[1](*list(map(_distribute,
[(x, info[1], info[2]) for x in info[0].args])))
else:
return info[0]
def to_cnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to conjunctive normal form.
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...)
If simplify is True, the expr is evaluated to its simplest CNF form.
Examples
========
>>> from sympy.logic.boolalg import to_cnf
>>> from sympy.abc import A, B, D
>>> to_cnf(~(A | B) | D)
And(Or(D, Not(A)), Or(D, Not(B)))
>>> to_cnf((A | B) & (A | ~A), True)
Or(A, B)
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'cnf', True)
# Don't convert unless we have to
if is_cnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_and_over_or(expr)
def to_dnf(expr, simplify=False):
"""
Convert a propositional logical sentence s to disjunctive normal form.
That is, of the form ((A & ~B & ...) | (B & C & ...) | ...)
If simplify is True, the expr is evaluated to its simplest DNF form.
Examples
========
>>> from sympy.logic.boolalg import to_dnf
>>> from sympy.abc import A, B, C
>>> to_dnf(B & (A | C))
Or(And(A, B), And(B, C))
>>> to_dnf((A & B) | (A & ~B) | (B & C) | (~B & C), True)
Or(A, C)
"""
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
if simplify:
return simplify_logic(expr, 'dnf', True)
# Don't convert unless we have to
if is_dnf(expr):
return expr
expr = eliminate_implications(expr)
return distribute_or_over_and(expr)
def is_cnf(expr):
"""
Test whether or not an expression is in conjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_cnf
>>> from sympy.abc import A, B, C
>>> is_cnf(A | B | C)
True
>>> is_cnf(A & B & C)
True
>>> is_cnf((A & B) | C)
False
"""
return _is_form(expr, And, Or)
def is_dnf(expr):
"""
Test whether or not an expression is in disjunctive normal form.
Examples
========
>>> from sympy.logic.boolalg import is_dnf
>>> from sympy.abc import A, B, C
>>> is_dnf(A | B | C)
True
>>> is_dnf(A & B & C)
True
>>> is_dnf((A & B) | C)
True
>>> is_dnf(A & (B | C))
False
"""
return _is_form(expr, Or, And)
def _is_form(expr, function1, function2):
"""
Test whether or not an expression is of the required form.
"""
expr = sympify(expr)
# Special case of an Atom
if expr.is_Atom:
return True
# Special case of a single expression of function2
if expr.func is function2:
for lit in expr.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
# Special case of a single negation
if expr.func is Not:
if not expr.args[0].is_Atom:
return False
if expr.func is not function1:
return False
for cls in expr.args:
if cls.is_Atom:
continue
if cls.func is Not:
if not cls.args[0].is_Atom:
return False
elif cls.func is not function2:
return False
for lit in cls.args:
if lit.func is Not:
if not lit.args[0].is_Atom:
return False
else:
if not lit.is_Atom:
return False
return True
def eliminate_implications(expr):
"""
Change >>, <<, and Equivalent into &, |, and ~. That is, return an
expression that is equivalent to s, but has only &, |, and ~ as logical
operators.
Examples
========
>>> from sympy.logic.boolalg import Implies, Equivalent, \
eliminate_implications
>>> from sympy.abc import A, B, C
>>> eliminate_implications(Implies(A, B))
Or(B, Not(A))
>>> eliminate_implications(Equivalent(A, B))
And(Or(A, Not(B)), Or(B, Not(A)))
>>> eliminate_implications(Equivalent(A, B, C))
And(Or(A, Not(C)), Or(B, Not(A)), Or(C, Not(B)))
"""
expr = sympify(expr)
if expr.is_Atom:
return expr # (Atoms are unchanged.)
args = list(map(eliminate_implications, expr.args))
if expr.func is Implies:
a, b = args[0], args[-1]
return (~a) | b
elif expr.func is Equivalent:
clauses = []
for a, b in zip(islice(args, None), islice(args, 1, None)):
clauses.append(Or(Not(a), b))
a, b = args[-1], args[0]
clauses.append(Or(Not(a), b))
return And(*clauses)
else:
return expr.func(*args)
@deprecated(
useinstead="sympify", issue=6550, deprecated_since_version="0.7.3")
def compile_rule(s):
"""
Transforms a rule into a SymPy expression
A rule is a string of the form "symbol1 & symbol2 | ..."
Note: This function is deprecated. Use sympify() instead.
"""
import re
return sympify(re.sub(r'([a-zA-Z_][a-zA-Z0-9_]*)', r'Symbol("\1")', s))
def to_int_repr(clauses, symbols):
"""
Takes clauses in CNF format and puts them into an integer representation.
Examples
========
>>> from sympy.logic.boolalg import to_int_repr
>>> from sympy.abc import x, y
>>> to_int_repr([x | y, y], [x, y]) == [set([1, 2]), set([2])]
True
"""
# Convert the symbol list into a dict
symbols = dict(list(zip(symbols, list(xrange(1, len(symbols) + 1)))))
def append_symbol(arg, symbols):
if arg.func is Not:
return -symbols[arg.args[0]]
else:
return symbols[arg]
return [set(append_symbol(arg, symbols) for arg in Or.make_args(c))
for c in clauses]
def _check_pair(minterm1, minterm2):
"""
Checks if a pair of minterms differs by only one bit. If yes, returns
index, else returns -1.
"""
index = -1
for x, (i, j) in enumerate(zip(minterm1, minterm2)):
if i != j:
if index == -1:
index = x
else:
return -1
return index
def _convert_to_varsSOP(minterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for SOP).
"""
temp = []
for i, m in enumerate(minterm):
if m == 0:
temp.append(Not(variables[i]))
elif m == 1:
temp.append(variables[i])
else:
pass # ignore the 3s
return And(*temp)
def _convert_to_varsPOS(maxterm, variables):
"""
Converts a term in the expansion of a function from binary to it's
variable form (for POS).
"""
temp = []
for i, m in enumerate(maxterm):
if m == 1:
temp.append(Not(variables[i]))
elif m == 0:
temp.append(variables[i])
else:
pass # ignore the 3s
return Or(*temp)
def _simplified_pairs(terms):
"""
Reduces a set of minterms, if possible, to a simplified set of minterms
with one less variable in the terms using QM method.
"""
simplified_terms = []
todo = list(range(len(terms)))
for i, ti in enumerate(terms[:-1]):
for j_i, tj in enumerate(terms[(i + 1):]):
index = _check_pair(ti, tj)
if index != -1:
todo[i] = todo[j_i + i + 1] = None
newterm = ti[:]
newterm[index] = 3
if newterm not in simplified_terms:
simplified_terms.append(newterm)
simplified_terms.extend(
[terms[i] for i in [_ for _ in todo if _ is not None]])
return simplified_terms
def _compare_term(minterm, term):
"""
Return True if a binary term is satisfied by the given term. Used
for recognizing prime implicants.
"""
for i, x in enumerate(term):
if x != 3 and x != minterm[i]:
return False
return True
def _rem_redundancy(l1, terms):
"""
After the truth table has been sufficiently simplified, use the prime
implicant table method to recognize and eliminate redundant pairs,
and return the essential arguments.
"""
essential = []
for x in terms:
temporary = []
for y in l1:
if _compare_term(x, y):
temporary.append(y)
if len(temporary) == 1:
if temporary[0] not in essential:
essential.append(temporary[0])
for x in terms:
for y in essential:
if _compare_term(x, y):
break
else:
for z in l1:
if _compare_term(x, z):
if z not in essential:
essential.append(z)
break
return essential
def SOPform(variables, minterms, dontcares=None):
"""
The SOPform function uses simplified_pairs and a redundant group-
eliminating algorithm to convert the list of all input combos that
generate '1' (the minterms) into the smallest Sum of Products form.
The variables must be given as the first argument.
Return a logical Or function (i.e., the "sum of products" or "SOP"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import SOPform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1],
... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> SOPform(['w','x','y','z'], minterms, dontcares)
Or(And(Not(w), z), And(y, z))
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
old = None
new = minterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, minterms)
return Or(*[_convert_to_varsSOP(x, variables) for x in essential])
def POSform(variables, minterms, dontcares=None):
"""
The POSform function uses simplified_pairs and a redundant-group
eliminating algorithm to convert the list of all input combinations
that generate '1' (the minterms) into the smallest Product of Sums form.
The variables must be given as the first argument.
Return a logical And function (i.e., the "product of sums" or "POS"
form) that gives the desired outcome. If there are inputs that can
be ignored, pass them as a list, too.
The result will be one of the (perhaps many) functions that satisfy
the conditions.
Examples
========
>>> from sympy.logic import POSform
>>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1],
... [1, 0, 1, 1], [1, 1, 1, 1]]
>>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]]
>>> POSform(['w','x','y','z'], minterms, dontcares)
And(Or(Not(w), y), z)
References
==========
.. [1] en.wikipedia.org/wiki/Quine-McCluskey_algorithm
"""
from sympy.core.symbol import Symbol
variables = [sympify(v) for v in variables]
if minterms == []:
return false
minterms = [list(i) for i in minterms]
dontcares = [list(i) for i in (dontcares or [])]
for d in dontcares:
if d in minterms:
raise ValueError('%s in minterms is also in dontcares' % d)
maxterms = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if (t not in minterms) and (t not in dontcares):
maxterms.append(t)
old = None
new = maxterms + dontcares
while new != old:
old = new
new = _simplified_pairs(old)
essential = _rem_redundancy(new, maxterms)
return And(*[_convert_to_varsPOS(x, variables) for x in essential])
def _find_predicates(expr):
"""Helper to find logical predicates in BooleanFunctions.
A logical predicate is defined here as anything within a BooleanFunction
that is not a BooleanFunction itself.
"""
if not isinstance(expr, BooleanFunction):
return set([expr])
return set.union(*(_find_predicates(i) for i in expr.args))
def simplify_logic(expr, form=None, deep=True):
"""
This function simplifies a boolean function to its simplified version
in SOP or POS form. The return type is an Or or And object in SymPy.
Parameters
==========
expr : string or boolean expression
form : string ('cnf' or 'dnf') or None (default).
If 'cnf' or 'dnf', the simplest expression in the corresponding
normal form is returned; if None, the answer is returned
according to the form with fewest args (in CNF by default).
deep : boolean (default True)
indicates whether to recursively simplify any
non-boolean functions contained within the input.
Examples
========
>>> from sympy.logic import simplify_logic
>>> from sympy.abc import x, y, z
>>> from sympy import S
>>> b = '(~x & ~y & ~z) | ( ~x & ~y & z)'
>>> simplify_logic(b)
And(Not(x), Not(y))
>>> S(b)
Or(And(Not(x), Not(y), Not(z)), And(Not(x), Not(y), z))
>>> simplify_logic(_)
And(Not(x), Not(y))
"""
if form == 'cnf' or form == 'dnf' or form is None:
expr = sympify(expr)
if not isinstance(expr, BooleanFunction):
return expr
variables = _find_predicates(expr)
truthtable = []
for t in product([0, 1], repeat=len(variables)):
t = list(t)
if expr.xreplace(dict(zip(variables, t))) == True:
truthtable.append(t)
if deep:
from sympy.simplify.simplify import simplify
variables = [simplify(v) for v in variables]
if form == 'dnf' or \
(form is None and len(truthtable) >= (2 ** (len(variables) - 1))):
return SOPform(variables, truthtable)
elif form == 'cnf' or form is None:
return POSform(variables, truthtable)
else:
raise ValueError("form can be cnf or dnf only")
def _finger(eq):
"""
Assign a 5-item fingerprint to each symbol in the equation:
[
# of times it appeared as a Symbol,
# of times it appeared as a Not(symbol),
# of times it appeared as a Symbol in an And or Or,
# of times it appeared as a Not(Symbol) in an And or Or,
sum of the number of arguments with which it appeared,
counting Symbol as 1 and Not(Symbol) as 2
]
>>> from sympy.logic.boolalg import _finger as finger
>>> from sympy import And, Or, Not
>>> from sympy.abc import a, b, x, y
>>> eq = Or(And(Not(y), a), And(Not(y), b), And(x, y))
>>> dict(finger(eq))
{(0, 0, 1, 0, 2): [x], (0, 0, 1, 0, 3): [a, b], (0, 0, 1, 2, 8): [y]}
So y and x have unique fingerprints, but a and b do not.
"""
f = eq.free_symbols
d = dict(list(zip(f, [[0] * 5 for fi in f])))
for a in eq.args:
if a.is_Symbol:
d[a][0] += 1
elif a.is_Not:
d[a.args[0]][1] += 1
else:
o = len(a.args) + sum(ai.func is Not for ai in a.args)
for ai in a.args:
if ai.is_Symbol:
d[ai][2] += 1
d[ai][-1] += o
else:
d[ai.args[0]][3] += 1
d[ai.args[0]][-1] += o
inv = defaultdict(list)
for k, v in ordered(iter(d.items())):
inv[tuple(v)].append(k)
return inv
def bool_map(bool1, bool2):
"""
Return the simplified version of bool1, and the mapping of variables
that makes the two expressions bool1 and bool2 represent the same
logical behaviour for some correspondence between the variables
of each.
If more than one mappings of this sort exist, one of them
is returned.
For example, And(x, y) is logically equivalent to And(a, b) for
the mapping {x: a, y:b} or {x: b, y:a}.
If no such mapping exists, return False.
Examples
========
>>> from sympy import SOPform, bool_map, Or, And, Not, Xor
>>> from sympy.abc import w, x, y, z, a, b, c, d
>>> function1 = SOPform(['x','z','y'],[[1, 0, 1], [0, 0, 1]])
>>> function2 = SOPform(['a','b','c'],[[1, 0, 1], [1, 0, 0]])
>>> bool_map(function1, function2)
(And(Not(z), y), {y: a, z: b})
The results are not necessarily unique, but they are canonical. Here,
``(w, z)`` could be ``(a, d)`` or ``(d, a)``:
>>> eq = Or(And(Not(y), w), And(Not(y), z), And(x, y))
>>> eq2 = Or(And(Not(c), a), And(Not(c), d), And(b, c))
>>> bool_map(eq, eq2)
(Or(And(Not(y), w), And(Not(y), z), And(x, y)), {w: a, x: b, y: c, z: d})
>>> eq = And(Xor(a, b), c, And(c,d))
>>> bool_map(eq, eq.subs(c, x))
(And(Or(Not(a), Not(b)), Or(a, b), c, d), {a: a, b: b, c: d, d: x})
"""
def match(function1, function2):
"""Return the mapping that equates variables between two
simplified boolean expressions if possible.
By "simplified" we mean that a function has been denested
and is either an And (or an Or) whose arguments are either
symbols (x), negated symbols (Not(x)), or Or (or an And) whose
arguments are only symbols or negated symbols. For example,
And(x, Not(y), Or(w, Not(z))).
Basic.match is not robust enough (see issue 4835) so this is
a workaround that is valid for simplified boolean expressions
"""
# do some quick checks
if function1.__class__ != function2.__class__:
return None
if len(function1.args) != len(function2.args):
return None
if function1.is_Symbol:
return {function1: function2}
# get the fingerprint dictionaries
f1 = _finger(function1)
f2 = _finger(function2)
# more quick checks
if len(f1) != len(f2):
return False
# assemble the match dictionary if possible
matchdict = {}
for k in f1.keys():
if k not in f2:
return False
if len(f1[k]) != len(f2[k]):
return False
for i, x in enumerate(f1[k]):
matchdict[x] = f2[k][i]
return matchdict
a = simplify_logic(bool1)
b = simplify_logic(bool2)
m = match(a, b)
if m:
return a, m
return m is not None
@deprecated(
useinstead="bool_map", issue=7197, deprecated_since_version="0.7.4")
def bool_equal(bool1, bool2, info=False):
"""Return True if the two expressions represent the same logical
behaviour for some correspondence between the variables of each
(which may be different). For example, And(x, y) is logically
equivalent to And(a, b) for {x: a, y: b} (or vice versa). If the
mapping is desired, then set ``info`` to True and the simplified
form of the functions and mapping of variables will be returned.
"""
mapping = bool_map(bool1, bool2)
if not mapping:
return False
if info:
return mapping
return True
| bsd-3-clause |
KaranToor/MA450 | google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/gs/test_storage_uri.py | 135 | 6558 | # -*- coding: utf-8 -*-
# Copyright (c) 2013, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Integration tests for StorageUri interface."""
import binascii
import re
import StringIO
from boto import storage_uri
from boto.exception import BotoClientError
from boto.gs.acl import SupportedPermissions as perms
from tests.integration.gs.testcase import GSTestCase
class GSStorageUriTest(GSTestCase):
def testHasVersion(self):
uri = storage_uri("gs://bucket/obj")
self.assertFalse(uri.has_version())
uri.version_id = "versionid"
self.assertTrue(uri.has_version())
uri = storage_uri("gs://bucket/obj")
# Generation triggers versioning.
uri.generation = 12345
self.assertTrue(uri.has_version())
uri.generation = None
self.assertFalse(uri.has_version())
# Zero-generation counts as a version.
uri = storage_uri("gs://bucket/obj")
uri.generation = 0
self.assertTrue(uri.has_version())
def testCloneReplaceKey(self):
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
orig_uri = storage_uri("gs://%s/" % b.name)
uri = orig_uri.clone_replace_key(k)
self.assertTrue(uri.has_version())
self.assertRegexpMatches(str(uri.generation), r"[0-9]+")
def testSetAclXml(self):
"""Ensures that calls to the set_xml_acl functions succeed."""
b = self._MakeBucket()
k = b.new_key("obj")
k.set_contents_from_string("stringdata")
bucket_uri = storage_uri("gs://%s/" % b.name)
# Get a valid ACL for an object.
bucket_uri.object_name = "obj"
bucket_acl = bucket_uri.get_acl()
bucket_uri.object_name = None
# Add a permission to the ACL.
all_users_read_permission = ("<Entry><Scope type='AllUsers'/>"
"<Permission>READ</Permission></Entry>")
acl_string = re.sub(r"</Entries>",
all_users_read_permission + "</Entries>",
bucket_acl.to_xml())
# Test-generated owner IDs are not currently valid for buckets
acl_no_owner_string = re.sub(r"<Owner>.*</Owner>", "", acl_string)
# Set ACL on an object.
bucket_uri.set_xml_acl(acl_string, "obj")
# Set ACL on a bucket.
bucket_uri.set_xml_acl(acl_no_owner_string)
# Set the default ACL for a bucket.
bucket_uri.set_def_xml_acl(acl_no_owner_string)
# Verify all the ACLs were successfully applied.
new_obj_acl_string = k.get_acl().to_xml()
new_bucket_acl_string = bucket_uri.get_acl().to_xml()
new_bucket_def_acl_string = bucket_uri.get_def_acl().to_xml()
self.assertRegexpMatches(new_obj_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_acl_string, r"AllUsers")
self.assertRegexpMatches(new_bucket_def_acl_string, r"AllUsers")
def testPropertiesUpdated(self):
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri = bucket_uri.clone_replace_name("obj")
key_uri.set_contents_from_string("data1")
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data1")
key_uri.set_contents_from_stream(StringIO.StringIO("data2"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data2")
key_uri.set_contents_from_file(StringIO.StringIO("data3"))
self.assertRegexpMatches(str(key_uri.generation), r"[0-9]+")
self.assertGreater(key_uri.generation, k.generation)
k = b.get_key("obj")
self.assertEqual(k.generation, key_uri.generation)
self.assertEquals(k.get_contents_as_string(), "data3")
def testCompose(self):
data1 = 'hello '
data2 = 'world!'
expected_crc = 1238062967
b = self._MakeBucket()
bucket_uri = storage_uri("gs://%s" % b.name)
key_uri1 = bucket_uri.clone_replace_name("component1")
key_uri1.set_contents_from_string(data1)
key_uri2 = bucket_uri.clone_replace_name("component2")
key_uri2.set_contents_from_string(data2)
# Simple compose.
key_uri_composite = bucket_uri.clone_replace_name("composite")
components = [key_uri1, key_uri2]
key_uri_composite.compose(components, content_type='text/plain')
self.assertEquals(key_uri_composite.get_contents_as_string(),
data1 + data2)
composite_key = key_uri_composite.get_key()
cloud_crc32c = binascii.hexlify(
composite_key.cloud_hashes['crc32c'])
self.assertEquals(cloud_crc32c, hex(expected_crc)[2:])
self.assertEquals(composite_key.content_type, 'text/plain')
# Compose disallowed between buckets.
key_uri1.bucket_name += '2'
try:
key_uri_composite.compose(components)
self.fail('Composing between buckets didn\'t fail as expected.')
except BotoClientError as err:
self.assertEquals(
err.reason, 'GCS does not support inter-bucket composing')
| apache-2.0 |
dvhbru/dvhb-hybrid | dvhb_hybrid/mailer/django.py | 1 | 2561 | import base64
from concurrent.futures import ThreadPoolExecutor
from django.core import mail
from . import base
class DjangoConnection(base.BaseConnection):
def __init__(self, loop, conf, **kwargs):
super().__init__(**kwargs)
self.loop = loop
self.executor = ThreadPoolExecutor(max_workers=1)
self._conn = None
self.conf = conf
async def send_message(self, message):
if not self._conn:
raise ConnectionError()
kwargs = dict(
subject=message.subject,
body=message.body,
from_email=self.conf['from_email'],
to=message.mail_to,
connection=self._conn,
)
if message.html:
msg = mail.EmailMultiAlternatives(**kwargs)
await self.loop.run_in_executor(
self.executor, msg.attach_alternative,
message.html, "text/html")
else:
msg = mail.EmailMessage(**kwargs)
def attach_files(message, attachments):
if attachments:
for attachment in attachments:
path = attachment.get('path')
filename = attachment.get('filename')
mimetype = attachment.get('mimetype')
if path:
message.attach_file(path, mimetype=mimetype)
elif filename:
content = attachment.get('content')
if content:
message.attach(filename,
base64.decodebytes(
content.encode()),
mimetype)
await self.loop.run_in_executor(self.executor, attach_files,
msg, message.attachments)
return await self.loop.run_in_executor(self.executor, msg.send)
async def close(self):
if self._conn:
await self.loop.run_in_executor(
self.executor, self._conn.close)
async def open(self):
await self.close()
if not self._conn:
params = {
'backend': self.conf.get('django_email_backend'),
**self.conf.get('django_email_backend_params', {}),
}
self._conn = mail.get_connection(**params)
await self.loop.run_in_executor(self.executor, self._conn.open)
return self
class Mailer(base.BaseMailer):
connection_class = DjangoConnection
| mit |
felipenaselva/felipe.repository | script.module.resolveurl/lib/resolveurl/plugins/grifthost.py | 3 | 1209 | """
grifthost resolveurl plugin
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from lib import helpers
from resolveurl.resolver import ResolveUrl, ResolverError
class GrifthostResolver(ResolveUrl):
name = "grifthost"
domains = ["grifthost.com"]
pattern = '(?://|\.)(grifthost\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id), patterns=['''file:\s*['"](?P<url>[^'"]+)''']).replace(' ', '%20')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| gpl-2.0 |
aidan-/ansible-modules-extras | cloud/vmware/vmware_dvs_portgroup.py | 31 | 6867 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_dvs_portgroup
short_description: Create or remove a Distributed vSwitch portgroup
description:
- Create or remove a Distributed vSwitch portgroup
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
portgroup_name:
description:
- The name of the portgroup that is to be created or deleted
required: True
switch_name:
description:
- The name of the distributed vSwitch the port group should be created on.
required: True
vlan_id:
description:
- The VLAN ID that should be configured with the portgroup
required: True
num_ports:
description:
- The number of ports the portgroup should contain
required: True
portgroup_type:
description:
- See VMware KB 1022312 regarding portgroup types
required: True
choices:
- 'earlyBinding'
- 'lateBinding'
- 'ephemeral'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Create Management portgroup
local_action:
module: vmware_dvs_portgroup
hostname: vcenter_ip_or_hostname
username: vcenter_username
password: vcenter_password
portgroup_name: Management
switch_name: dvSwitch
vlan_id: 123
num_ports: 120
portgroup_type: earlyBinding
state: present
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareDvsPortgroup(object):
def __init__(self, module):
self.module = module
self.dvs_portgroup = None
self.switch_name = self.module.params['switch_name']
self.portgroup_name = self.module.params['portgroup_name']
self.vlan_id = self.module.params['vlan_id']
self.num_ports = self.module.params['num_ports']
self.portgroup_type = self.module.params['portgroup_type']
self.dv_switch = None
self.state = self.module.params['state']
self.content = connect_to_api(module)
def process_state(self):
try:
dvspg_states = {
'absent': {
'present': self.state_destroy_dvspg,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvspg,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvspg,
}
}
dvspg_states[self.state][self.check_dvspg_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def create_port_group(self):
config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
config.name = self.portgroup_name
config.numPorts = self.num_ports
# vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
# but this is the correct managed object type.
config.defaultPortConfig = vim.VMwareDVSPortSetting()
# vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
# pyvmomi documentation but this is the correct managed object type
config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
config.defaultPortConfig.vlan.inherited = False
config.defaultPortConfig.vlan.vlanId = self.vlan_id
config.type = self.portgroup_type
spec = [config]
task = self.dv_switch.AddDVPortgroup_Task(spec)
changed, result = wait_for_task(task)
return changed, result
def state_destroy_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
task = self.dvs_portgroup.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=str(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvspg(self):
self.module.exit_json(changed=False, msg="Currently not implemented.")
def state_create_dvspg(self):
changed = True
result = None
if not self.module.check_mode:
changed, result = self.create_port_group()
self.module.exit_json(changed=changed, result=str(result))
def check_dvspg_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
if self.dv_switch is None:
raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name)
self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)
if self.dvs_portgroup is None:
return 'absent'
else:
return 'present'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int'),
num_ports=dict(required=True, type='int'),
portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_dvs_portgroup = VMwareDvsPortgroup(module)
vmware_dvs_portgroup.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
crossbario/autobahn-python | examples/twisted/wamp/pubsub/basic/backend.py | 3 | 2100 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print('backend publishing com.myapp.topic1', counter)
self.publish('com.myapp.topic1', counter)
counter += 1
yield sleep(1)
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
| mit |
jayceyxc/hue | desktop/core/ext-py/Django-1.6.10/django/db/models/aggregates.py | 114 | 2601 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| apache-2.0 |
CDrummond/cantata | icons/yaru/render-bitmaps.py | 2 | 6913 | #!/usr/bin/python3
#
# This file has been take from Suru, and modified to just generate cantata icons
#
# ------------8<----------
# Legal Stuff:
#
# This file is part of the Suru Icon Theme and is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; version 3.
#
# This file is part of the Suru Icon Theme and is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <https://www.gnu.org/licenses/lgpl-3.0.txt>
#
#
# Thanks to the GNOME icon developers for the original version of this script
# ------------8<----------
import os
import sys
import xml.sax
import subprocess
import argparse
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
# DPI multipliers to render at
DPIS = [1, 2]
inkscape_process = None
def main(SRC):
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen([INKSCAPE, '--shell'], bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, dpi, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
cmd = [icon_file,
'--export-dpi', str(dpi),
'-i', rect,
'-e', output_file]
wait_for_prompt(inkscape_process, ' '.join(cmd))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
for dpi_factor in DPIS:
width = rect['width']
height = rect['height']
id = rect['id']
dpi = 96 * dpi_factor
size_str = "%sx%s" % (width, height)
if dpi_factor != 1:
size_str += "@%sx" % dpi_factor
outfile = self.icon_name+'-'+size_str+'.png'
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, dpi, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, dpi, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
print ('')
print ('Rendering from SVGs in', SRC)
print ('')
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
print ('')
main('.')
| gpl-3.0 |
klmitch/nova | nova/policies/extensions.py | 3 | 1318 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:extensions'
extensions_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ANY,
description="List available extensions and show information "
"for an extension by alias",
operations=[
{
'method': 'GET',
'path': '/extensions'
},
{
'method': 'GET',
'path': '/extensions/{alias}'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return extensions_policies
| apache-2.0 |
andrejb/cloudant_bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Options/__init__.py | 61 | 2667 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/__init__.py 5134 2010/08/16 23:02:40 bdeegan"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
from BoolOption import BoolOption # okay
from EnumOption import EnumOption # okay
from ListOption import ListOption # naja
from PackageOption import PackageOption # naja
from PathOption import PathOption # okay
warned = False
class Options(SCons.Variables.Variables):
def __init__(self, *args, **kw):
global warned
if not warned:
msg = "The Options class is deprecated; use the Variables class instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
SCons.Variables.Variables.__init__(self, *args, **kw)
def AddOptions(self, *args, **kw):
return SCons.Variables.Variables.AddVariables(self, *args, **kw)
def UnknownOptions(self, *args, **kw):
return SCons.Variables.Variables.UnknownVariables(self, *args, **kw)
def FormatOptionHelpText(self, *args, **kw):
return SCons.Variables.Variables.FormatVariableHelpText(self, *args,
**kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
rtruxal/metagoofil | pdfminer/pdffont.py | 32 | 26471 | #!/usr/bin/env python2
import sys
import struct
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cmapdb import CMapDB, CMapParser, FileUnicodeMap, CMap
from encodingdb import EncodingDB, name2unicode
from psparser import PSStackParser
from psparser import PSSyntaxError, PSEOF
from psparser import LIT, KWD, STRICT
from psparser import PSLiteral, literal_name
from pdftypes import PDFException, resolve1
from pdftypes import int_value, float_value, num_value
from pdftypes import str_value, list_value, dict_value, stream_value
from fontmetrics import FONT_METRICS
from utils import apply_matrix_norm, nunpack, choplist
def get_widths(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i,w) in enumerate(v):
widths[char1+i] = w
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 3:
(char1,char2,w) = r
for i in xrange(char1, char2+1):
widths[i] = w
r = []
return widths
#assert get_widths([1]) == {}
#assert get_widths([1,2,3]) == {1:3, 2:3}
#assert get_widths([1,[2,3],6,[7,8]]) == {1:2,2:3, 6:7,7:8}
def get_widths2(seq):
widths = {}
r = []
for v in seq:
if isinstance(v, list):
if r:
char1 = r[-1]
for (i,(w,vx,vy)) in enumerate(choplist(3,v)):
widths[char1+i] = (w,(vx,vy))
r = []
elif isinstance(v, int):
r.append(v)
if len(r) == 5:
(char1,char2,w,vx,vy) = r
for i in xrange(char1, char2+1):
widths[i] = (w,(vx,vy))
r = []
return widths
#assert get_widths2([1]) == {}
#assert get_widths2([1,2,3,4,5]) == {1:(3,(4,5)), 2:(3,(4,5))}
#assert get_widths2([1,[2,3,4,5],6,[7,8,9]]) == {1:(2,(3,4)), 6:(7,(8,9))}
## FontMetricsDB
##
class FontMetricsDB(object):
@classmethod
def get_metrics(klass, fontname):
return FONT_METRICS[fontname]
## Type1FontHeaderParser
##
class Type1FontHeaderParser(PSStackParser):
KEYWORD_BEGIN = KWD('begin')
KEYWORD_END = KWD('end')
KEYWORD_DEF = KWD('def')
KEYWORD_PUT = KWD('put')
KEYWORD_DICT = KWD('dict')
KEYWORD_ARRAY = KWD('array')
KEYWORD_READONLY = KWD('readonly')
KEYWORD_FOR = KWD('for')
KEYWORD_FOR = KWD('for')
def __init__(self, data):
PSStackParser.__init__(self, data)
self._cid2unicode = {}
return
def get_encoding(self):
while 1:
try:
(cid,name) = self.nextobject()
except PSEOF:
break
try:
self._cid2unicode[cid] = name2unicode(name)
except KeyError:
pass
return self._cid2unicode
def do_keyword(self, pos, token):
if token is self.KEYWORD_PUT:
((_,key),(_,value)) = self.pop(2)
if (isinstance(key, int) and
isinstance(value, PSLiteral)):
self.add_results((key, literal_name(value)))
return
## CFFFont
## (Format specified in Adobe Technical Note: #5176
## "The Compact Font Format Specification")
##
NIBBLES = ('0','1','2','3','4','5','6','7','8','9','.','e','e-',None,'-')
def getdict(data):
d = {}
fp = StringIO(data)
stack = []
while 1:
c = fp.read(1)
if not c: break
b0 = ord(c)
if b0 <= 21:
d[b0] = stack
stack = []
continue
if b0 == 30:
s = ''
loop = True
while loop:
b = ord(fp.read(1))
for n in (b >> 4, b & 15):
if n == 15:
loop = False
else:
s += NIBBLES[n]
value = float(s)
elif 32 <= b0 and b0 <= 246:
value = b0-139
else:
b1 = ord(fp.read(1))
if 247 <= b0 and b0 <= 250:
value = ((b0-247)<<8)+b1+108
elif 251 <= b0 and b0 <= 254:
value = -((b0-251)<<8)-b1-108
else:
b2 = ord(fp.read(1))
if 128 <= b1: b1 -= 256
if b0 == 28:
value = b1<<8 | b2
else:
value = b1<<24 | b2<<16 | struct.unpack('>H', fp.read(2))[0]
stack.append(value)
return d
class CFFFont(object):
STANDARD_STRINGS = (
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft',
'parenright', 'asterisk', 'plus', 'comma', 'hyphen', 'period',
'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 'six',
'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a',
'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'braceleft', 'bar', 'braceright', 'asciitilde', 'exclamdown',
'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash',
'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright',
'guillemotright', 'ellipsis', 'perthousand', 'questiondown',
'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut',
'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash',
'oslash', 'oe', 'germandbls', 'onesuperior', 'logicalnot', 'mu',
'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth',
'multiply', 'threesuperior', 'copyright', 'Aacute',
'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 'Atilde',
'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde',
'Oacute', 'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde',
'Scaron', 'Uacute', 'Ucircumflex', 'Udieresis', 'Ugrave',
'Yacute', 'Ydieresis', 'Zcaron', 'aacute', 'acircumflex',
'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 'eacute',
'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex',
'odieresis', 'ograve', 'otilde', 'scaron', 'uacute',
'ucircumflex', 'udieresis', 'ugrave', 'yacute', 'ydieresis',
'zcaron', 'exclamsmall', 'Hungarumlautsmall', 'dollaroldstyle',
'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader',
'onedotenleader', 'zerooldstyle', 'oneoldstyle', 'twooldstyle',
'threeoldstyle', 'fouroldstyle', 'fiveoldstyle', 'sixoldstyle',
'sevenoldstyle', 'eightoldstyle', 'nineoldstyle',
'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior',
'dsuperior', 'esuperior', 'isuperior', 'lsuperior', 'msuperior',
'nsuperior', 'osuperior', 'rsuperior', 'ssuperior', 'tsuperior',
'ff', 'ffi', 'ffl', 'parenleftinferior', 'parenrightinferior',
'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 'Asmall',
'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall',
'Hsmall', 'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall',
'Nsmall', 'Osmall', 'Psmall', 'Qsmall', 'Rsmall', 'Ssmall',
'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 'Ysmall',
'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall',
'Zcaronsmall', 'Dieresissmall', 'Brevesmall', 'Caronsmall',
'Dotaccentsmall', 'Macronsmall', 'figuredash', 'hypheninferior',
'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 'questiondownsmall',
'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior',
'threeinferior', 'fourinferior', 'fiveinferior', 'sixinferior',
'seveninferior', 'eightinferior', 'nineinferior',
'centinferior', 'dollarinferior', 'periodinferior',
'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall',
'Aringsmall', 'AEsmall', 'Ccedillasmall', 'Egravesmall',
'Eacutesmall', 'Ecircumflexsmall', 'Edieresissmall',
'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall',
'Oacutesmall', 'Ocircumflexsmall', 'Otildesmall',
'Odieresissmall', 'OEsmall', 'Oslashsmall', 'Ugravesmall',
'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000',
'001.001', '001.002', '001.003', 'Black', 'Bold', 'Book',
'Light', 'Medium', 'Regular', 'Roman', 'Semibold',
)
class INDEX(object):
def __init__(self, fp):
self.fp = fp
self.offsets = []
(count, offsize) = struct.unpack('>HB', self.fp.read(3))
for i in xrange(count+1):
self.offsets.append(nunpack(self.fp.read(offsize)))
self.base = self.fp.tell()-1
self.fp.seek(self.base+self.offsets[-1])
return
def __repr__(self):
return '<INDEX: size=%d>' % len(self)
def __len__(self):
return len(self.offsets)-1
def __getitem__(self, i):
self.fp.seek(self.base+self.offsets[i])
return self.fp.read(self.offsets[i+1]-self.offsets[i])
def __iter__(self):
return iter( self[i] for i in xrange(len(self)) )
def __init__(self, name, fp):
self.name = name
self.fp = fp
# Header
(_major,_minor,hdrsize,offsize) = struct.unpack('BBBB', self.fp.read(4))
self.fp.read(hdrsize-4)
# Name INDEX
self.name_index = self.INDEX(self.fp)
# Top DICT INDEX
self.dict_index = self.INDEX(self.fp)
# String INDEX
self.string_index = self.INDEX(self.fp)
# Global Subr INDEX
self.subr_index = self.INDEX(self.fp)
# Top DICT DATA
self.top_dict = getdict(self.dict_index[0])
(charset_pos,) = self.top_dict.get(15, [0])
(encoding_pos,) = self.top_dict.get(16, [0])
(charstring_pos,) = self.top_dict.get(17, [0])
# CharStrings
self.fp.seek(charstring_pos)
self.charstring = self.INDEX(self.fp)
self.nglyphs = len(self.charstring)
# Encodings
self.code2gid = {}
self.gid2code = {}
self.fp.seek(encoding_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
(n,) = struct.unpack('B', self.fp.read(1))
for (code,gid) in enumerate(struct.unpack('B'*n, self.fp.read(n))):
self.code2gid[code] = gid
self.gid2code[gid] = code
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
code = 0
for i in xrange(n):
(first,nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first,first+nleft+1):
self.code2gid[code] = gid
self.gid2code[gid] = code
code += 1
else:
raise ValueError('unsupported encoding format: %r' % format)
# Charsets
self.name2gid = {}
self.gid2name = {}
self.fp.seek(charset_pos)
format = self.fp.read(1)
if format == '\x00':
# Format 0
n = self.nglyphs-1
for (gid,sid) in enumerate(struct.unpack('>'+'H'*n, self.fp.read(2*n))):
gid += 1
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
elif format == '\x01':
# Format 1
(n,) = struct.unpack('B', self.fp.read(1))
sid = 0
for i in xrange(n):
(first,nleft) = struct.unpack('BB', self.fp.read(2))
for gid in xrange(first,first+nleft+1):
name = self.getstr(sid)
self.name2gid[name] = gid
self.gid2name[gid] = name
sid += 1
elif format == '\x02':
# Format 2
assert 0
else:
raise ValueError('unsupported charset format: %r' % format)
#print self.code2gid
#print self.name2gid
#assert 0
return
def getstr(self, sid):
if sid < len(self.STANDARD_STRINGS):
return self.STANDARD_STRINGS[sid]
return self.string_index[sid-len(self.STANDARD_STRINGS)]
## TrueTypeFont
##
class TrueTypeFont(object):
class CMapNotFound(Exception): pass
def __init__(self, name, fp):
self.name = name
self.fp = fp
self.tables = {}
self.fonttype = fp.read(4)
(ntables, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
for _ in xrange(ntables):
(name, tsum, offset, length) = struct.unpack('>4sLLL', fp.read(16))
self.tables[name] = (offset, length)
return
def create_unicode_map(self):
if 'cmap' not in self.tables:
raise TrueTypeFont.CMapNotFound
(base_offset, length) = self.tables['cmap']
fp = self.fp
fp.seek(base_offset)
(version, nsubtables) = struct.unpack('>HH', fp.read(4))
subtables = []
for i in xrange(nsubtables):
subtables.append(struct.unpack('>HHL', fp.read(8)))
char2gid = {}
# Only supports subtable type 0, 2 and 4.
for (_1, _2, st_offset) in subtables:
fp.seek(base_offset+st_offset)
(fmttype, fmtlen, fmtlang) = struct.unpack('>HHH', fp.read(6))
if fmttype == 0:
char2gid.update(enumerate(struct.unpack('>256B', fp.read(256))))
elif fmttype == 2:
subheaderkeys = struct.unpack('>256H', fp.read(512))
firstbytes = [0]*8192
for (i,k) in enumerate(subheaderkeys):
firstbytes[k/8] = i
nhdrs = max(subheaderkeys)/8 + 1
hdrs = []
for i in xrange(nhdrs):
(firstcode,entcount,delta,offset) = struct.unpack('>HHhH', fp.read(8))
hdrs.append((i,firstcode,entcount,delta,fp.tell()-2+offset))
for (i,firstcode,entcount,delta,pos) in hdrs:
if not entcount: continue
first = firstcode + (firstbytes[i] << 8)
fp.seek(pos)
for c in xrange(entcount):
gid = struct.unpack('>H', fp.read(2))
if gid:
gid += delta
char2gid[first+c] = gid
elif fmttype == 4:
(segcount, _1, _2, _3) = struct.unpack('>HHHH', fp.read(8))
segcount /= 2
ecs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
fp.read(2)
scs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
idds = struct.unpack('>%dh' % segcount, fp.read(2*segcount))
pos = fp.tell()
idrs = struct.unpack('>%dH' % segcount, fp.read(2*segcount))
for (ec,sc,idd,idr) in zip(ecs, scs, idds, idrs):
if idr:
fp.seek(pos+idr)
for c in xrange(sc, ec+1):
char2gid[c] = (struct.unpack('>H', fp.read(2))[0] + idd) & 0xffff
else:
for c in xrange(sc, ec+1):
char2gid[c] = (c + idd) & 0xffff
else:
assert 0
# create unicode map
unicode_map = FileUnicodeMap()
for (char,gid) in char2gid.iteritems():
unicode_map.add_cid2unichr(gid, char)
return unicode_map
## Fonts
##
class PDFFontError(PDFException): pass
class PDFUnicodeNotDefined(PDFFontError): pass
LITERAL_STANDARD_ENCODING = LIT('StandardEncoding')
LITERAL_TYPE1C = LIT('Type1C')
# PDFFont
class PDFFont(object):
def __init__(self, descriptor, widths, default_width=None):
self.descriptor = descriptor
self.widths = widths
self.fontname = resolve1(descriptor.get('FontName', 'unknown'))
if isinstance(self.fontname, PSLiteral):
self.fontname = literal_name(self.fontname)
self.flags = int_value(descriptor.get('Flags', 0))
self.ascent = num_value(descriptor.get('Ascent', 0))
self.descent = num_value(descriptor.get('Descent', 0))
self.italic_angle = num_value(descriptor.get('ItalicAngle', 0))
self.default_width = default_width or num_value(descriptor.get('MissingWidth', 0))
self.leading = num_value(descriptor.get('Leading', 0))
self.bbox = list_value(descriptor.get('FontBBox', (0,0,0,0)))
self.hscale = self.vscale = .001
return
def __repr__(self):
return '<PDFFont>'
def is_vertical(self):
return False
def is_multibyte(self):
return False
def decode(self, bytes):
return map(ord, bytes)
def get_ascent(self):
return self.ascent * self.vscale
def get_descent(self):
return self.descent * self.vscale
def get_width(self):
w = self.bbox[2]-self.bbox[0]
if w == 0:
w = -self.default_width
return w * self.hscale
def get_height(self):
h = self.bbox[3]-self.bbox[1]
if h == 0:
h = self.ascent - self.descent
return h * self.vscale
def char_width(self, cid):
return self.widths.get(cid, self.default_width) * self.hscale
def char_disp(self, cid):
return 0
def string_width(self, s):
return sum( self.char_width(cid) for cid in self.decode(s) )
# PDFSimpleFont
class PDFSimpleFont(PDFFont):
def __init__(self, descriptor, widths, spec):
# Font encoding is specified either by a name of
# built-in encoding or a dictionary that describes
# the differences.
if 'Encoding' in spec:
encoding = resolve1(spec['Encoding'])
else:
encoding = LITERAL_STANDARD_ENCODING
if isinstance(encoding, dict):
name = literal_name(encoding.get('BaseEncoding', LITERAL_STANDARD_ENCODING))
diff = list_value(encoding.get('Differences', None))
self.cid2unicode = EncodingDB.get_encoding(name, diff)
else:
self.cid2unicode = EncodingDB.get_encoding(literal_name(encoding))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
PDFFont.__init__(self, descriptor, widths)
return
def to_unichr(self, cid):
if self.unicode_map:
try:
return self.unicode_map.get_unichr(cid)
except KeyError:
pass
try:
return self.cid2unicode[cid]
except KeyError:
raise PDFUnicodeNotDefined(None, cid)
# PDFType1Font
class PDFType1Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
try:
(descriptor, widths) = FontMetricsDB.get_metrics(self.basefont)
except KeyError:
descriptor = dict_value(spec.get('FontDescriptor', {}))
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 255))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths) )
PDFSimpleFont.__init__(self, descriptor, widths, spec)
if 'Encoding' not in spec and 'FontFile' in descriptor:
# try to recover the missing encoding info from the font file.
self.fontfile = stream_value(descriptor.get('FontFile'))
length1 = int_value(self.fontfile['Length1'])
data = self.fontfile.get_data()[:length1]
parser = Type1FontHeaderParser(StringIO(data))
self.cid2unicode = parser.get_encoding()
return
def __repr__(self):
return '<PDFType1Font: basefont=%r>' % self.basefont
# PDFTrueTypeFont
class PDFTrueTypeFont(PDFType1Font):
def __repr__(self):
return '<PDFTrueTypeFont: basefont=%r>' % self.basefont
# PDFType3Font
class PDFType3Font(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
firstchar = int_value(spec.get('FirstChar', 0))
lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict( (i+firstchar,w) for (i,w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent':0, 'Descent':0,
'FontBBox':spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_,self.descent,_,self.ascent) = self.bbox
(self.hscale,self.vscale) = apply_matrix_norm(self.matrix, (1,1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
class PDFCIDFont(PDFFont):
def __init__(self, rsrcmgr, spec):
try:
self.basefont = literal_name(spec['BaseFont'])
except KeyError:
if STRICT:
raise PDFFontError('BaseFont is missing')
self.basefont = 'unknown'
self.cidsysteminfo = dict_value(spec.get('CIDSystemInfo', {}))
self.cidcoding = '%s-%s' % (self.cidsysteminfo.get('Registry', 'unknown'),
self.cidsysteminfo.get('Ordering', 'unknown'))
try:
name = literal_name(spec['Encoding'])
except KeyError:
if STRICT:
raise PDFFontError('Encoding is unspecified')
name = 'unknown'
try:
self.cmap = CMapDB.get_cmap(name)
except CMapDB.CMapNotFound, e:
if STRICT:
raise PDFFontError(e)
self.cmap = CMap()
try:
descriptor = dict_value(spec['FontDescriptor'])
except KeyError:
if STRICT:
raise PDFFontError('FontDescriptor is missing')
descriptor = {}
ttf = None
if 'FontFile2' in descriptor:
self.fontfile = stream_value(descriptor.get('FontFile2'))
ttf = TrueTypeFont(self.basefont,
StringIO(self.fontfile.get_data()))
self.unicode_map = None
if 'ToUnicode' in spec:
strm = stream_value(spec['ToUnicode'])
self.unicode_map = FileUnicodeMap()
CMapParser(self.unicode_map, StringIO(strm.get_data())).run()
elif self.cidcoding == 'Adobe-Identity':
if ttf:
try:
self.unicode_map = ttf.create_unicode_map()
except TrueTypeFont.CMapNotFound:
pass
else:
try:
self.unicode_map = CMapDB.get_unicode_map(self.cidcoding, self.cmap.is_vertical())
except CMapDB.CMapNotFound, e:
pass
self.vertical = self.cmap.is_vertical()
if self.vertical:
# writing mode: vertical
widths = get_widths2(list_value(spec.get('W2', [])))
self.disps = dict( (cid,(vx,vy)) for (cid,(_,(vx,vy))) in widths.iteritems() )
(vy,w) = spec.get('DW2', [880, -1000])
self.default_disp = (None,vy)
widths = dict( (cid,w) for (cid,(w,_)) in widths.iteritems() )
default_width = w
else:
# writing mode: horizontal
self.disps = {}
self.default_disp = 0
widths = get_widths(list_value(spec.get('W', [])))
default_width = spec.get('DW', 1000)
PDFFont.__init__(self, descriptor, widths, default_width=default_width)
return
def __repr__(self):
return '<PDFCIDFont: basefont=%r, cidcoding=%r>' % (self.basefont, self.cidcoding)
def is_vertical(self):
return self.vertical
def is_multibyte(self):
return True
def decode(self, bytes):
return self.cmap.decode(bytes)
def char_disp(self, cid):
"Returns an integer for horizontal fonts, a tuple for vertical fonts."
return self.disps.get(cid, self.default_disp)
def to_unichr(self, cid):
try:
if not self.unicode_map: raise KeyError(cid)
return self.unicode_map.get_unichr(cid)
except KeyError:
raise PDFUnicodeNotDefined(self.cidcoding, cid)
# main
def main(argv):
for fname in argv[1:]:
fp = file(fname, 'rb')
#font = TrueTypeFont(fname, fp)
font = CFFFont(fname, fp)
print font
fp.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
| gpl-2.0 |
xkfz007/binutils-gdb | gdb/copyright.py | 19 | 11540 | #! /usr/bin/env python
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""copyright.py
This script updates the list of years in the copyright notices in
most files maintained by the GDB project.
Usage: cd src/gdb && python copyright.py
Always review the output of this script before committing it!
A useful command to review the output is:
% filterdiff -x \*.c -x \*.cc -x \*.h -x \*.exp updates.diff
This removes the bulk of the changes which are most likely to be correct.
"""
import datetime
import os
import os.path
import subprocess
def get_update_list():
"""Return the list of files to update.
Assumes that the current working directory when called is the root
of the GDB source tree (NOT the gdb/ subdirectory!). The names of
the files are relative to that root directory.
"""
result = []
for gdb_dir in ('gdb', 'sim', 'include/gdb'):
for root, dirs, files in os.walk(gdb_dir, topdown=True):
for dirname in dirs:
reldirname = "%s/%s" % (root, dirname)
if (dirname in EXCLUDE_ALL_LIST
or reldirname in EXCLUDE_LIST
or reldirname in NOT_FSF_LIST
or reldirname in BY_HAND):
# Prune this directory from our search list.
dirs.remove(dirname)
for filename in files:
relpath = "%s/%s" % (root, filename)
if (filename in EXCLUDE_ALL_LIST
or relpath in EXCLUDE_LIST
or relpath in NOT_FSF_LIST
or relpath in BY_HAND):
# Ignore this file.
pass
else:
result.append(relpath)
return result
def update_files(update_list):
"""Update the copyright header of the files in the given list.
We use gnulib's update-copyright script for that.
"""
# We want to use year intervals in the copyright notices, and
# all years should be collapsed to one single year interval,
# even if there are "holes" in the list of years found in the
# original copyright notice (OK'ed by the FSF, case [gnu.org #719834]).
os.environ['UPDATE_COPYRIGHT_USE_INTERVALS'] = '2'
# Perform the update, and save the output in a string.
update_cmd = ['bash', 'gdb/gnulib/import/extra/update-copyright']
update_cmd += update_list
p = subprocess.Popen(update_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
update_out = p.communicate()[0]
# Process the output. Typically, a lot of files do not have
# a copyright notice :-(. The update-copyright script prints
# a well defined warning when it did not find the copyright notice.
# For each of those, do a sanity check and see if they may in fact
# have one. For the files that are found not to have one, we filter
# the line out from the output, since there is nothing more to do,
# short of looking at each file and seeing which notice is appropriate.
# Too much work! (~4,000 files listed as of 2012-01-03).
update_out = update_out.splitlines()
warning_string = ': warning: copyright statement not found'
warning_len = len(warning_string)
for line in update_out:
if line.endswith('\n'):
line = line[:-1]
if line.endswith(warning_string):
filename = line[:-warning_len]
if may_have_copyright_notice(filename):
print line
else:
# Unrecognized file format. !?!
print "*** " + line
def may_have_copyright_notice(filename):
"""Check that the given file does not seem to have a copyright notice.
The filename is relative to the root directory.
This function assumes that the current working directory is that root
directory.
The algorigthm is fairly crude, meaning that it might return
some false positives. I do not think it will return any false
negatives... We might improve this function to handle more
complex cases later...
"""
# For now, it may have a copyright notice if we find the word
# "Copyright" at the (reasonable) start of the given file, say
# 50 lines...
MAX_LINES = 50
fd = open(filename)
lineno = 1
for line in fd:
if 'Copyright' in line:
return True
lineno += 1
if lineno > 50:
return False
return False
def main ():
"""The main subprogram."""
if not os.path.isfile("gnulib/import/extra/update-copyright"):
print "Error: This script must be called from the gdb directory."
root_dir = os.path.dirname(os.getcwd())
os.chdir(root_dir)
update_list = get_update_list()
update_files (update_list)
# Remind the user that some files need to be updated by HAND...
if BY_HAND:
print
print "\033[31mREMINDER: The following files must be updated by hand." \
"\033[0m"
for filename in BY_HAND + MULTIPLE_COPYRIGHT_HEADERS:
print " ", filename
############################################################################
#
# Some constants, placed at the end because they take up a lot of room.
# The actual value of these constants is not significant to the understanding
# of the script.
#
############################################################################
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Filenames are relative to the root directory.
EXCLUDE_LIST = (
'gdb/nat/glibc_thread_db.h',
'gdb/CONTRIBUTE',
'gdb/gnulib/import'
)
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Matches any file or directory name anywhere. Use with caution.
# This is mostly for files that can be found in multiple directories.
# Eg: We want all files named COPYING to be left untouched.
EXCLUDE_ALL_LIST = (
"COPYING", "COPYING.LIB", "CVS", "configure", "copying.c",
"fdl.texi", "gpl.texi", "aclocal.m4",
)
# The list of files to update by hand.
BY_HAND = (
# These files are sensitive to line numbering.
"gdb/testsuite/gdb.base/step-line.inp",
"gdb/testsuite/gdb.base/step-line.c",
)
# Files containing multiple copyright headers. This script is only
# fixing the first one it finds, so we need to finish the update
# by hand.
MULTIPLE_COPYRIGHT_HEADERS = (
"gdb/doc/gdb.texinfo",
"gdb/doc/refcard.tex",
"gdb/gdbarch.sh",
)
# The list of file which have a copyright, but not head by the FSF.
# Filenames are relative to the root directory.
NOT_FSF_LIST = (
"gdb/exc_request.defs",
"gdb/gdbtk",
"gdb/testsuite/gdb.gdbtk/",
"sim/arm/armemu.h", "sim/arm/armos.c", "sim/arm/gdbhost.c",
"sim/arm/dbg_hif.h", "sim/arm/dbg_conf.h", "sim/arm/communicate.h",
"sim/arm/armos.h", "sim/arm/armcopro.c", "sim/arm/armemu.c",
"sim/arm/kid.c", "sim/arm/thumbemu.c", "sim/arm/armdefs.h",
"sim/arm/armopts.h", "sim/arm/dbg_cp.h", "sim/arm/dbg_rdi.h",
"sim/arm/parent.c", "sim/arm/armsupp.c", "sim/arm/armrdi.c",
"sim/arm/bag.c", "sim/arm/armvirt.c", "sim/arm/main.c", "sim/arm/bag.h",
"sim/arm/communicate.c", "sim/arm/gdbhost.h", "sim/arm/armfpe.h",
"sim/arm/arminit.c",
"sim/common/cgen-fpu.c", "sim/common/cgen-fpu.h",
"sim/common/cgen-accfp.c",
"sim/erc32/sis.h", "sim/erc32/erc32.c", "sim/erc32/func.c",
"sim/erc32/float.c", "sim/erc32/interf.c", "sim/erc32/sis.c",
"sim/erc32/exec.c",
"sim/mips/m16run.c", "sim/mips/sim-main.c",
"sim/moxie/moxie-gdb.dts",
# Not a single file in sim/ppc/ appears to be copyright FSF :-(.
"sim/ppc/filter.h", "sim/ppc/gen-support.h", "sim/ppc/ld-insn.h",
"sim/ppc/hw_sem.c", "sim/ppc/hw_disk.c", "sim/ppc/idecode_branch.h",
"sim/ppc/sim-endian.h", "sim/ppc/table.c", "sim/ppc/hw_core.c",
"sim/ppc/gen-support.c", "sim/ppc/gen-semantics.h", "sim/ppc/cpu.h",
"sim/ppc/sim_callbacks.h", "sim/ppc/RUN", "sim/ppc/Makefile.in",
"sim/ppc/emul_chirp.c", "sim/ppc/hw_nvram.c", "sim/ppc/dc-test.01",
"sim/ppc/hw_phb.c", "sim/ppc/hw_eeprom.c", "sim/ppc/bits.h",
"sim/ppc/hw_vm.c", "sim/ppc/cap.h", "sim/ppc/os_emul.h",
"sim/ppc/options.h", "sim/ppc/gen-idecode.c", "sim/ppc/filter.c",
"sim/ppc/corefile-n.h", "sim/ppc/std-config.h", "sim/ppc/ld-decode.h",
"sim/ppc/filter_filename.h", "sim/ppc/hw_shm.c",
"sim/ppc/pk_disklabel.c", "sim/ppc/dc-simple", "sim/ppc/misc.h",
"sim/ppc/device_table.h", "sim/ppc/ld-insn.c", "sim/ppc/inline.c",
"sim/ppc/emul_bugapi.h", "sim/ppc/hw_cpu.h", "sim/ppc/debug.h",
"sim/ppc/hw_ide.c", "sim/ppc/debug.c", "sim/ppc/gen-itable.h",
"sim/ppc/interrupts.c", "sim/ppc/hw_glue.c", "sim/ppc/emul_unix.c",
"sim/ppc/sim_calls.c", "sim/ppc/dc-complex", "sim/ppc/ld-cache.c",
"sim/ppc/registers.h", "sim/ppc/dc-test.02", "sim/ppc/options.c",
"sim/ppc/igen.h", "sim/ppc/registers.c", "sim/ppc/device.h",
"sim/ppc/emul_chirp.h", "sim/ppc/hw_register.c", "sim/ppc/hw_init.c",
"sim/ppc/sim-endian-n.h", "sim/ppc/filter_filename.c",
"sim/ppc/bits.c", "sim/ppc/idecode_fields.h", "sim/ppc/hw_memory.c",
"sim/ppc/misc.c", "sim/ppc/double.c", "sim/ppc/psim.h",
"sim/ppc/hw_trace.c", "sim/ppc/emul_netbsd.h", "sim/ppc/psim.c",
"sim/ppc/ppc-instructions", "sim/ppc/tree.h", "sim/ppc/README",
"sim/ppc/gen-icache.h", "sim/ppc/gen-model.h", "sim/ppc/ld-cache.h",
"sim/ppc/mon.c", "sim/ppc/corefile.h", "sim/ppc/vm.c",
"sim/ppc/INSTALL", "sim/ppc/gen-model.c", "sim/ppc/hw_cpu.c",
"sim/ppc/corefile.c", "sim/ppc/hw_opic.c", "sim/ppc/gen-icache.c",
"sim/ppc/events.h", "sim/ppc/os_emul.c", "sim/ppc/emul_generic.c",
"sim/ppc/main.c", "sim/ppc/hw_com.c", "sim/ppc/gen-semantics.c",
"sim/ppc/emul_bugapi.c", "sim/ppc/device.c", "sim/ppc/emul_generic.h",
"sim/ppc/tree.c", "sim/ppc/mon.h", "sim/ppc/interrupts.h",
"sim/ppc/cap.c", "sim/ppc/cpu.c", "sim/ppc/hw_phb.h",
"sim/ppc/device_table.c", "sim/ppc/lf.c", "sim/ppc/lf.c",
"sim/ppc/dc-stupid", "sim/ppc/hw_pal.c", "sim/ppc/ppc-spr-table",
"sim/ppc/emul_unix.h", "sim/ppc/words.h", "sim/ppc/basics.h",
"sim/ppc/hw_htab.c", "sim/ppc/lf.h", "sim/ppc/ld-decode.c",
"sim/ppc/sim-endian.c", "sim/ppc/gen-itable.c",
"sim/ppc/idecode_expression.h", "sim/ppc/table.h", "sim/ppc/dgen.c",
"sim/ppc/events.c", "sim/ppc/gen-idecode.h", "sim/ppc/emul_netbsd.c",
"sim/ppc/igen.c", "sim/ppc/vm_n.h", "sim/ppc/vm.h",
"sim/ppc/hw_iobus.c", "sim/ppc/inline.h",
"sim/testsuite/sim/bfin/s21.s", "sim/testsuite/sim/mips/mips32-dsp2.s",
)
if __name__ == "__main__":
main()
| gpl-2.0 |
lmiccini/sos | sos/plugins/lvm2.py | 5 | 3024 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Lvm2(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""LVM2 volume manager
"""
plugin_name = 'lvm2'
profiles = ('storage',)
option_list = [("lvmdump", 'collect an lvmdump tarball', 'fast', False),
("lvmdump-am", 'attempt to collect an lvmdump with '
'advanced options and raw metadata collection', 'slow',
False)]
def do_lvmdump(self, metadata=False):
"""Collects an lvmdump in standard format with optional metadata
archives for each physical volume present.
"""
lvmdump_cmd = "lvmdump %s -d '%s'"
lvmdump_opts = ""
if metadata:
lvmdump_opts = "-a -m"
cmd = lvmdump_cmd % (lvmdump_opts,
self.get_cmd_output_path(name="lvmdump"))
self.add_cmd_output(cmd)
def setup(self):
# use locking_type 0 (no locks) when running LVM2 commands,
# from lvm.conf:
# Turn locking off by setting to 0 (dangerous: risks metadata
# corruption if LVM2 commands get run concurrently).
# None of the commands issued by sos ever modify metadata and this
# avoids the possibility of hanging lvm commands when another process
# or node holds a conflicting lock.
lvm_opts = '--config="global{locking_type=0}"'
self.add_cmd_output(
"vgdisplay -vv %s" % lvm_opts,
root_symlink="vgdisplay"
)
pvs_cols = 'pv_mda_free,pv_mda_size,pv_mda_count,pv_mda_used_count'
pvs_cols = pvs_cols + ',' + 'pe_start'
vgs_cols = 'vg_mda_count,vg_mda_free,vg_mda_size,vg_mda_used_count'
vgs_cols = vgs_cols + ',' + 'vg_tags'
lvs_cols = 'lv_tags,devices'
self.add_cmd_output([
"vgscan -vvv %s" % lvm_opts,
"pvscan -v %s" % lvm_opts,
"pvs -a -v -o +%s %s" % (pvs_cols, lvm_opts),
"vgs -v -o +%s %s" % (vgs_cols, lvm_opts),
"lvs -a -o +%s %s" % (lvs_cols, lvm_opts)
])
self.add_copy_spec("/etc/lvm")
if self.get_option('lvmdump'):
self.do_lvmdump()
elif self.get_option('lvmdump-am'):
self.do_lvmdump(metadata=True)
# vim: et ts=4 sw=4
| gpl-2.0 |
vthorsteinsson/tensor2tensor | tensor2tensor/models/research/transformer_aux.py | 1 | 5657 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer with auxiliary losses from https://arxiv.org/abs/1803.00144."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.models import transformer
from tensor2tensor.utils import registry
import tensorflow as tf
def shift_and_pad(tensor, shift, axis=0):
"""Shifts and pads with zero along an axis.
Example:
shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2]
shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0]
Args:
tensor: Tensor; to be shifted and padded.
shift: int; number of positions to shift by.
axis: int; along which axis to shift and pad.
Returns:
A Tensor with the same shape as the input tensor.
"""
shape = tensor.shape
rank = len(shape)
assert 0 <= abs(axis) < rank
length = int(shape[axis])
assert 0 <= abs(shift) < length
paddings = [(0, 0)] * rank
begin = [0] * rank
size = [-1] * rank
if shift > 0:
paddings[axis] = (shift, 0)
size[axis] = length - shift
elif shift < 0:
paddings[axis] = (0, -shift)
begin[axis] = -shift
ret = tf.pad(tf.slice(tensor, begin, size), paddings)
return ret
@registry.register_model
class TransformerAux(transformer.Transformer):
"""Attention net. See file docstring."""
def _extract_shift_values(self):
"""Parses the shift string.
The hparams should contain the key shift_values, which maps to a
comma-separated string of integers. These integers specify the number of
timesteps to predict/reconstruct to compute auxiliary losses.
For instance, "-4,2,6" means to reconstruct the target 4 steps before and
predict the targets 2 steps and 6 steps ahead.
Returns:
List of int != 0 shift values to compute the auxiliary losses.
"""
shift_values_str = self._hparams.get("shift_values", "")
shift_values = [int(x) for x in shift_values_str.split(",")]
tf.logging.info(
"Computing auxiliary losses for the following shifts: %s",
shift_values)
return shift_values
def auxiliary_loss(self, body_output, features, shift):
"""Auxiliary predict loss.
Args:
body_output: Tensor with shape [batch_size, decoder_length, hidden_dim].
features: Map of features to the model. Must contain the following:
"targets": Target decoder outputs.
[batch_size, decoder_length, 1, hidden_dim]
shift: int != 0, amount to shift/pad the target sequence.
If shift > 0, it represents the number of previous timesteps to
reconstruct; if shift < 0, it represents the number of future timesteps
to predict.
Returns:
A 2-tuple of the numerator and denominator of the cross-entropy loss.
Raises:
ValueError: if features does not contain a targets_raw tensor.
"""
assert isinstance(shift, int) and shift != 0
name = "reconst_%d" % shift if shift > 0 else "predict_%d" % abs(shift)
if features and "targets_raw" in features:
targets = features["targets_raw"]
targets = common_layers.flatten4d3d(targets)
else:
raise ValueError(
"Feature map must contain a targets_raw tensor.")
with tf.variable_scope(name):
logits = self.top(body_output, features)
labels = shift_and_pad(targets, shift, axis=1)
return common_layers.padded_cross_entropy(
logits,
labels,
self._hparams.label_smoothing)
def body(self, features):
"""Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs.
[batch_size, input_length, 1, hidden_dim].
"targets": Target decoder outputs.
[batch_size, target_length, 1, hidden_dim]
"target_space_id": A scalar int from data_generators.problem.SpaceID.
Returns:
A 2-tuple containing:
Logit tensor. [batch_size, decoder_length, vocab_size]
Map of keys to loss tensors. Should contain the following:
"training": Training loss (shift == 0).
"auxiliary": Auxiliary loss (shift != 0).
"""
output = super(TransformerAux, self).body(features)
output, losses = self._normalize_body_output(output)
aux = 0.0
for shift in self._extract_shift_values():
loss_num, loss_den = self.auxiliary_loss(output, features, shift)
aux += loss_num / loss_den
losses["auxiliary"] = aux
return output, losses
@registry.register_hparams
def transformer_aux_base():
"""Set of hyperparameters."""
hparams = transformer.transformer_base()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2,3,4")
return hparams
@registry.register_hparams
def transformer_aux_tiny():
"""Set of hyperparameters."""
hparams = transformer.transformer_tiny()
hparams.shared_embedding_and_softmax_weights = False
hparams.add_hparam("shift_values", "1,2")
return hparams
| apache-2.0 |
mitchelljkotler/django | django/contrib/staticfiles/views.py | 581 | 1329 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import Http404
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
def serve(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
from django.contrib.staticfiles import views
url(r'^(?P<path>.*)$', views.serve)
in your URLconf.
It uses the django.views.static.serve() view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise Http404
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
| bsd-3-clause |
bluemini/kuma | vendor/packages/pygments/lexers/__init__.py | 73 | 8735 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, itervalues, guess_decode
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + list(LEXERS)
_lexer_cache = {}
_pattern_cache = {}
def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn)
def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0]
def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options)
def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules[__name__]
newmod = _automodule(__name__)
newmod.__dict__.update(oldmod.__dict__)
sys.modules[__name__] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| mpl-2.0 |
hrjn/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
jarn0ld/gnuradio | gr-vocoder/python/vocoder/qa_g721_vocoder.py | 57 | 1573 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_g721_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
data = (8,24,36,52,56,64,76,88,104,124,132,148,172,
196,220,244,280,320,372,416,468,524,580,648)
src = blocks.vector_source_s(data)
enc = vocoder.g721_encode_sb()
dec = vocoder.g721_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(data, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_g721_vocoder, "test_g721_vocoder.xml")
| gpl-3.0 |
GGFHF/NGScloud | Package/gdialogs.py | 1 | 44030 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This source contains the dialog classes corresponding to the graphical user interface of
the NGScloud software package.
'''
#-------------------------------------------------------------------------------
import os
import PIL.Image
import PIL.ImageTk
import tkinter
import tkinter.font
import tkinter.ttk
import sys
import datetime
import os
import xlib
import xssh
#-------------------------------------------------------------------------------
class DialogTable(tkinter.Toplevel):
#---------------
def __init__(self, parent, title_text, window_height, window_width, data_list, data_dict, item_dict, action=None, params=[]):
'''
Execute actions correspending to the creation of a "DialogTable" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.title_text = title_text
self.window_height = window_height
self.window_width = window_width
self.data_list = data_list
self.data_dict = data_dict
self.item_dict = item_dict
self.action = action
self.params = params
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Table.
self.create_window()
# build the graphical user interface
self.build_gui()
# populate the table with data
self.populate_table()
#---------------
def create_window(self):
'''
Create the window of "DialogTable".
'''
# define the dimensions
self.minsize(height=self.window_height, width=self.window_width)
self.maxsize(height=self.window_height, width=self.window_width)
x = round((self.winfo_screenwidth() - self.window_width) / 2)
y = round((self.winfo_screenheight() - self.window_height) / 2)
self.geometry('{}x{}+{}+{}'.format(self.window_width, self.window_height, x, y))
# set the title
self.title('{0} - {1} - Table'.format(xlib.get_project_name(), self.title_text))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogTable".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "frame_toolbar" and register it with the pack geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "treeview" and register it with the pack geometry manager
self.treeview = tkinter.ttk.Treeview(self)
self.treeview.pack(side='left', fill='both', expand=True)
# set columns in Treeview widget
self.treeview['columns'] = self.data_list
self.treeview['show'] = 'headings'
for datum in self.data_list:
# -- self.treeview.column(datum, width=self.data_dict[datum]['width'])
if self.data_dict[datum]['aligment'] == 'left':
aligment = tkinter.W
elif self.data_dict[datum]['aligment'] == 'centre':
aligment = tkinter.W + tkinter.E
elif self.data_dict[datum]['aligment'] == 'right':
aligment = tkinter.E
self.treeview.column(datum, minwidth=self.data_dict[datum]['width'], width=self.data_dict[datum]['width'], anchor=aligment, stretch=False)
self.treeview.heading(datum, text=self.data_dict[datum]['text'])
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.treeview, orient='horizontal', command=self.treeview.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.treeview.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.treeview, orient='vertical', command=self.treeview.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.treeview.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to events
self.treeview.bind("<Double-1>", self.double_click)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def populate_table(self):
'''
Populate the Treeview widget with the data of "DialogTable".
'''
# insert the items in Treeview widget
for item_key in sorted(self.item_dict.keys()):
row_values_list = []
for datum in self.data_list:
row_values_list.append(self.item_dict[item_key][datum])
self.treeview.insert('', 'end', values=row_values_list)
#---------------
def double_click(self, event):
'''
Manege the action of a dobule click on a table item.
'''
# manege the action
try:
# get the table item selected
item = self.treeview.selection()[0]
except:
message = 'There is not any action asociated with this table item.'
OK = tkinter.messagebox.showwarning(self.title(), message)
else:
if self.action == 'view_submission_logs':
run_id = self.treeview.item(item)['values'][0]
self.view_local_process_log(run_id)
elif self.action == 'view_result_logs':
experiment_id = self.treeview.item(item)['values'][0]
run_id = self.treeview.item(item)['values'][1]
self.view_log(experiment_id, run_id)
elif self.action == 'list_directory':
file_type = self.treeview.item(item)['values'][0]
file_name = self.treeview.item(item)['values'][1]
if file_type == 'directory':
self.list_directory(file_name)
else:
self.show_file_details(file_name)
else:
message = 'There is not any action asociated with this table item.'
OK = tkinter.messagebox.showwarning(self.title(), message)
#---------------
def close(self, event=None):
'''
Close the "DialogTable".
'''
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def view_local_process_log(self, run_id):
'''
View the log of a local process.
'''
# get the log file name and build cluster path
log_file = '{0}/{1}'.format(xlib.get_log_dir(), run_id)
# create and show a instance "DialogViewer" to view the log file
dialog_viewer = DialogViewer(self, log_file, None)
self.wait_window(dialog_viewer)
#---------------
def view_log(self, experiment_id, run_id):
'''
View the log of the run identification.
'''
# get the cluster name
cluster_name = self.params[0]
# get the log file name and build cluster path
log_file = xlib.get_cluster_log_file()
cluster_file_path = '{0}/{1}/{2}'.format(xlib.get_cluster_experiment_result_dir(experiment_id), run_id, log_file)
# create and show a instance "DialogViewer" to view the log file
dialog_viewer = DialogViewer(self, cluster_file_path, cluster_name)
self.wait_window(dialog_viewer)
#---------------
def list_directory(self, directory_name):
'''
View the directory of a dataset.
'''
# get the cluster name
parent_directory = self.params[0]
# get the SSH client
ssh_client = self.params[1]
# get the directory dictionary of directories in the volume
command = 'ls -la {0}/{1}'.format(parent_directory, directory_name)
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
directory_dict = {}
for line in stdout:
line = line.rstrip('\n')
if line.startswith('d') or line.startswith('-'):
file_data_list = line.split()
file_type = 'directory' if file_data_list[0][0] == 'd' else 'file'
permissions = file_data_list[0][1:]
links_number = file_data_list[1]
owner_name = file_data_list[2]
owner_group = file_data_list[3]
file_size = file_data_list[4]
modification_month = file_data_list[5]
modification_day = file_data_list[6]
modification_time = file_data_list[7]
file_name = file_data_list[8]
if file_name not in ['.', '..', 'lost+found']:
key = '{0}-{1}'.format(file_type, file_name)
directory_dict[key] = {'file_type': file_type, 'permissions': permissions, 'links_number': links_number, 'owner_name': owner_name, 'owner_group': owner_group, 'file_size': file_size, 'modification_month': modification_month, 'modification_day': modification_day, 'modification_time': modification_time, 'file_name': file_name}
# verify if there are any nodes running
if OK:
if directory_dict == {}:
message = 'There is not any file.'
tkinter.messagebox.showwarning('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# build the data list
if OK:
data_list = ['file_type', 'file_name']
# build the data dictionary
if OK:
data_dict = {}
data_dict['file_type']= {'text': 'Type', 'width': 120, 'aligment': 'left'}
data_dict['file_name'] = {'text': 'Name', 'width': 400, 'aligment': 'left'}
# create the dialog Table to show the nodes running
if OK:
dialog_table = DialogTable(self, 'Directory {0}/{1}'.format(parent_directory, directory_name), 400, 600, data_list, data_dict, directory_dict, 'list_directory', ['{0}/{1}'.format(parent_directory, directory_name), ssh_client])
self.wait_window(dialog_table)
#---------------
def show_file_details(self, file_name):
'''
View the directory of a dataset.
'''
# get the cluster name
parent_directory = self.params[0]
# get the SSH client
ssh_client = self.params[1]
# get the directory dictionary of directories in the volume
command = 'ls -la {0}/{1}'.format(parent_directory, file_name)
(OK, stdout, stderr) = xssh.execute_cluster_command(ssh_client, command)
if OK:
file_detail_dict = {}
for line in stdout:
line = line.rstrip('\n')
file_data_list = line.split()
permissions = file_data_list[0][1:]
links_number = file_data_list[1]
owner_name = file_data_list[2]
owner_group = file_data_list[3]
day = int(file_data_list[6])
try:
month = 1 + ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Sep', 'Oct', 'Nov', 'Dec'].index(file_data_list[5])
except:
month = 0
if file_data_list[7].find(':') > -1:
year = datetime.datetime.now().year
modification_date = '{0:4}-{1:02d}-{2:02d}'.format(year, month, day)
modification_time = file_data_list[7]
else:
year = int(file_data_list[7])
modification_date = '{0:4}-{1:02}-{2:02}'.format(year, month, day)
modification_time = ' '
file_name = file_data_list[8]
file_detail_dict[0] = {'data': 'directory', 'value': os.path.dirname(file_name)}
file_detail_dict[1] = {'data': 'name', 'value': os.path.basename(file_name)}
file_detail_dict[2] = {'data': 'size', 'value': file_data_list[4]}
file_detail_dict[3] = {'data': 'permissions', 'value': file_data_list[0][1:]}
file_detail_dict[4] = {'data': 'modification date', 'value': modification_date}
file_detail_dict[5] = {'data': 'modification time', 'value': modification_time}
file_detail_dict[6] = {'data': 'owner group', 'value': file_data_list[3]}
file_detail_dict[7] = {'data': 'owner name', 'value': file_data_list[2]}
# verify if there are any nodes running
if OK:
if file_detail_dict == {}:
message = 'There is not any detail.'
tkinter.messagebox.showwarning('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# build the data list
if OK:
data_list = ['data', 'value']
# build the data dictionary
if OK:
data_dict = {}
data_dict['data']= {'text': 'Data', 'width': 120, 'aligment': 'left'}
data_dict['value'] = {'text': 'Value', 'width': 400, 'aligment': 'left'}
# create the dialog Table to show the nodes running
if OK:
dialog_table = DialogTable(self, 'File {0}/{1}'.format(parent_directory, file_name), 400, 600, data_list, data_dict, file_detail_dict)
self.wait_window(dialog_table)
#---------------
#-------------------------------------------------------------------------------
class DialogLog(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 680
WINDOW_MIN_WIDTH = 680
#---------------
def __init__(self, parent, head='', calling_function=None):
'''
Execute actions correspending to the creation a "DialogLog" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.head = head
self.calling_function = calling_function
self.is_enabled_button_close = False
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Log.
self.create_window()
# build the graphical user interface
self.build_gui()
# set cursor to show busy status
self.config(cursor='watch')
self.update()
self.text.config(cursor='watch')
self.text.update()
# get the local log file
self.log_file = xlib.get_log_file(self.calling_function)
# open the local log file
try:
if not os.path.exists(os.path.dirname(self.log_file)):
os.makedirs(os.path.dirname(self.log_file))
self.log_file_id = open(self.log_file, mode='w', encoding='iso-8859-1')
except:
message = '*** ERROR: The file {0} can not be created'.format(self.log_file)
tkinter.messagebox.showerror('{0} - {1}'.format(xlib.get_project_name(), self.head), message)
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def create_window(self):
'''
Create the window of "DialogLog".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - {1} - Log'.format(xlib.get_project_name(), self.head))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogLog".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close, state='disabled')
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', state='disabled')
self.text.pack(expand=True, fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def close(self, event=None):
'''
Close "DialogLog".
'''
# close the local log file
self.log_file_id.close()
# delete all widgets and terminate the mainloop
if self.is_enabled_button_close:
self.destroy()
#---------------
def enable_button_close(self):
'''
Enable "button_close".
'''
# set cursor to show normal status
self.config(cursor='')
self.update()
self.text.config(cursor='')
self.text.update()
# set state "normal" to "button_close"
self.button_close['state'] = 'normal'
self.is_enabled_button_close = True
#---------------
def write(self, message=''):
'''
Add a message in the widget "text" and in the log file.
'''
# write the message in widget "text"
self.text.configure(state='normal')
self.text.insert('end', message)
self.text.see('end')
self.text.update()
self.text.configure(state='disabled')
# write in the log file
self.log_file_id.write(message)
self.log_file_id.flush()
os.fsync(self.log_file_id.fileno())
#---------------
def get_log_file(self):
'''
Get the current log file name
'''
return self.log_file
#---------------
#-------------------------------------------------------------------------------
class DialogViewer(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 650
WINDOW_MIN_WIDTH = 800
#---------------
def __init__(self, parent, file_path, cluster_name=None):
'''
Execute actions correspending to the creation of a "DialogViewer" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.file_path = file_path
self.cluster_name = cluster_name
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Viewer.
self.create_window()
# build the graphical user interface
self.build_gui()
self.open_file()
#---------------
def create_window(self):
'''
Create the window of "DialogViewer".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - View - {1}'.format(xlib.get_project_name(), self.file_path))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogViewer".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "imagetk_refresh"
image_refresh = PIL.Image.open('./image_refresh.png')
imagetk_refresh = PIL.ImageTk.PhotoImage(image_refresh)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "separator" and register it with the pack geometry manager
self.separator = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator.pack(side='left', fill='y', padx=2, pady=2)
# create "button_refresh" and register it with the pack geometry manager
self.button_refresh = tkinter.Button(self.frame_toolbar, command=self.open_file, relief='flat', image=imagetk_refresh)
self.button_refresh.image = imagetk_refresh
self.button_refresh.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', state='disabled')
self.text.pack(expand='yes', fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# link a handler to events
self.bind('<Alt-F4>', self.close)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def open_file(self):
'''
Open a config file in "DialogViewer".
'''
# set cursor to show busy status
self.config(cursor='watch')
self.update()
self.text.config(cursor='watch')
self.text.update()
# initialize the control variable
OK = True
# when the file is in the local computer
if self.cluster_name == None:
local_file_path = self.file_path
# when the file is in a cluster
else:
# create the SSH transport connection
if OK:
(OK, error_list, ssh_transport) = xssh.create_ssh_transport_connection(self.cluster_name, 'master')
if not OK:
message = ''
for error in error_list:
message = '{0}{1}\n'.format(message, error)
tkinter.messagebox.showerror(self.title(), message)
# create the SFTP client
if OK:
sftp_client = xssh.create_sftp_client(ssh_transport)
# create the local path
if not os.path.exists(xlib.get_temp_dir()):
os.makedirs(xlib.get_temp_dir())
# get the log file name and build local and cluster paths
if OK:
local_file_path = '{0}/{1}'.format(xlib.get_temp_dir(), os.path.basename(self.file_path))
# download the log file from the cluster
if OK:
OK = xssh.get_file(sftp_client, self.file_path, local_file_path)
if not OK:
message = 'The log file {0} could not be downloaded.'.format(self.file_path)
tkinter.messagebox.showerror(self.title(), message)
# close the SSH transport connection
xssh.close_ssh_transport_connection(ssh_transport)
# load the file content in "text"
if OK:
self.text.configure(state='normal')
self.text.delete('1.0', 'end')
try:
with open(local_file_path) as local_file_id:
self.text.insert('1.0', local_file_id.read())
except:
tkinter.messagebox.showerror('{0} - Open'.format(xlib.get_project_name()), 'The file {0} can not be opened.'.format(local_file_path))
else:
self.text.configure(state='disable')
# set cursor to show normal status
self.config(cursor='')
self.update()
self.text.config(cursor='')
self.text.update()
#---------------
def close(self, event=None):
'''
Close "DialogViewer".
'''
# deletes all widgets and terminate the mainloop
self.destroy()
#---------------
#-------------------------------------------------------------------------------
class DialogEditor(tkinter.Toplevel):
#---------------
WINDOW_MIN_HEIGHT = 650
WINDOW_MIN_WIDTH = 800
#---------------
def __init__(self, parent, file_path):
'''
Execute actions correspending to the creation of a "DialogEditor" instance.
'''
# save initial parameters in instance variables
self.parent = parent
self.file_path = file_path
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog Editor.
self.create_window()
# build the graphical user interface
self.build_gui()
self.open_file()
#---------------
def create_window(self):
'''
Create the window of "DialogEditor".
'''
# define the dimensions
self.minsize(height=self.WINDOW_MIN_HEIGHT, width=self.WINDOW_MIN_WIDTH)
self.maxsize(height=self.winfo_screenheight(), width=self.winfo_screenwidth())
x = round((self.winfo_screenwidth() - self.WINDOW_MIN_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_MIN_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_MIN_WIDTH, self.WINDOW_MIN_HEIGHT, x, y))
# set the title
self.title('{0} - Edit - {1}'.format(xlib.get_project_name(), self.file_path))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogEditor".
'''
# create "imagetk_close"
image_close = PIL.Image.open('./image_close.png')
imagetk_close = PIL.ImageTk.PhotoImage(image_close)
# create "imagetk_save"
image_save = PIL.Image.open('./image_save.png')
imagetk_save = PIL.ImageTk.PhotoImage(image_save)
# create "imagetk_undo"
image_undo = PIL.Image.open('./image_undo.gif')
imagetk_undo = PIL.ImageTk.PhotoImage(image_undo)
# create "imagetk_redo"
image_redo = PIL.Image.open('./image_redo.gif')
imagetk_redo = PIL.ImageTk.PhotoImage(image_redo)
# create "imagetk_cut"
image_cut = PIL.Image.open('./image_cut.gif')
imagetk_cut = PIL.ImageTk.PhotoImage(image_cut)
# create "imagetk_copy"
image_copy = PIL.Image.open('./image_copy.gif')
imagetk_copy = PIL.ImageTk.PhotoImage(image_copy)
# create "imagetk_paste"
image_paste = PIL.Image.open('./image_paste.gif')
imagetk_paste = PIL.ImageTk.PhotoImage(image_paste)
# create "frame_toolbar" and register it with the grid geometry manager
self.frame_toolbar = tkinter.Frame(self, borderwidth=1, relief='raised')
self.frame_toolbar.pack(side='top', fill='x')
# create "button_close" and register it with the pack geometry manager
self.button_close = tkinter.Button(self.frame_toolbar, command=self.close, relief='flat', image=imagetk_close)
self.button_close.image = imagetk_close
self.button_close.pack(side='left', padx=2, pady=5)
# create "separator_1" and register it with the pack geometry manager
self.separator_1 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_1.pack(side='left', fill='y', padx=2, pady=2)
# create "button_save" and register it with the pack geometry manager
self.button_save = tkinter.Button(self.frame_toolbar, command=self.save, relief='flat', image=imagetk_save)
self.button_save.image = imagetk_save
self.button_save.pack(side='left', padx=2, pady=5)
# create "separator_2" and register it with the pack geometry manager
self.separator_2 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_2.pack(side='left', fill='y', padx=2, pady=2)
# create "button_undo" and register it with the pack geometry manager
self.button_undo = tkinter.Button(self.frame_toolbar, command=self.undo, relief='flat', image=imagetk_undo)
self.button_undo.image = imagetk_undo
self.button_undo.pack(side='left', padx=2, pady=5)
# create "button_redo" and register it with the pack geometry manager
self.button_redo = tkinter.Button(self.frame_toolbar, command=self.redo, relief='flat', image=imagetk_redo)
self.button_redo.image = imagetk_redo
self.button_redo.pack(side='left', padx=2, pady=5)
# create "separator_3" and register it with the pack geometry manager
self.separator_3 = tkinter.ttk.Separator(self.frame_toolbar, orient='vertical')
self.separator_3.pack(side='left', fill='y', padx=2, pady=2)
# create "button_cut" and register it with the pack geometry manager
self.button_cut = tkinter.Button(self.frame_toolbar, command=self.cut, relief='flat', image=imagetk_cut)
self.button_cut.image = imagetk_cut
self.button_cut.pack(side='left', padx=2, pady=5)
# create "button_copy" and register it with the pack geometry manager
self.button_copy = tkinter.Button(self.frame_toolbar, command=self.copy, relief='flat', image=imagetk_copy)
self.button_copy.image = imagetk_copy
self.button_copy.pack(side='left', padx=2, pady=5)
# create "button_paste" and register it with the pack geometry manager
self.button_paste = tkinter.Button(self.frame_toolbar, command=self.paste, relief='flat', image=imagetk_paste)
self.button_paste.image = imagetk_paste
self.button_paste.pack(side='left', padx=2, pady=5)
# create "text" and register it with the grid geometry manager
self.text = tkinter.Text(self, wrap='none', undo=True)
self.text.pack(expand='yes', fill='both')
# create "scrollbar_x" and register it with the pack geometry manager
self.scrollbar_x = tkinter.Scrollbar(self.text, orient='horizontal', command=self.text.xview)
self.scrollbar_x.pack(side='bottom', fill='x')
self.text.configure(xscrollcommand=self.scrollbar_x.set)
# create "scrollbar_y" and register it with the pack geometry manager
self.scrollbar_y = tkinter.Scrollbar(self.text, orient='vertical', command=self.text.yview)
self.scrollbar_y.pack(side='right', fill='y')
self.text.configure(yscrollcommand=self.scrollbar_y.set)
# create "menu_popup" add add its menu items
self.menu_popup = tkinter.Menu(self.text)
self.menu_popup.add_command(label='Undo', command=self.undo, underline=0)
self.menu_popup.add_command(label='Redo', command=self.redo, underline=0)
self.menu_popup.add_separator()
self.menu_popup.add_command(label='Cut', command=self.cut, underline=0)
self.menu_popup.add_command(label='Copy', command=self.copy, underline=1)
self.menu_popup.add_command(label='Paste', command=self.paste, underline=0)
# link a handler to events
self.bind('<Alt-F4>', self.close)
# -- self.bind('<Control-c>', self.copy)
# -- self.bind('<Control-C>', self.copy)
self.bind('<Control-s>', self.save)
self.bind('<Control-S>', self.save)
# -- self.bind('<Control-v>', self.paste)
# -- self.bind('<Control-V>', self.paste)
# -- self.bind('<Control-x>', self.copy)
# -- self.bind('<Control-X>', self.copy)
self.bind('<Control-y>', self.redo)
self.bind('<Control-Y>', self.redo)
self.bind('<Control-z>', self.undo)
self.bind('<Control-Z>', self.undo)
self.text.bind('<Button-3>', self.show_menu_popup)
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
#---------------
def open_file(self):
'''
Open a config file in "DialogEditor".
'''
self.text.delete('1.0', 'end')
try:
with open(self.file_path) as id_config_file:
self.text.insert('1.0', id_config_file.read())
except:
tkinter.messagebox.showerror('{0} - Open'.format(xlib.get_project_name()), 'The file {0} can not be opened.'.format(self.file_path))
else:
self.text.edit_modified(False)
#---------------
def close(self, event=None):
'''
Close "DialogEditor".
'''
if self.text.edit_modified():
if tkinter.messagebox.askyesno('{0} - Close'.format(xlib.get_project_name()), 'The file {0} has been modified. Do you save it?'.format(self.file_path)):
self.save()
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
def save(self, event=None):
'''
Save the file opened in "DialogEditor".
'''
try:
document = self.text.get('1.0', 'end')
with open(self.file_path, 'w') as id_config_file:
id_config_file.write(document)
except IOError:
tkinter.messagebox.showwarning('{0} - Save'.formar(xlib.get_project_name()), 'The file {0} can not be saved.'.format(config_file))
else:
self.text.edit_modified(False)
#---------------
def undo(self, event=None):
'''
Undo the last change.
'''
self.text.event_generate('<<Undo>>')
return 'break'
#---------------
def redo(self, event=None):
'''
Redo the last change.
'''
self.text.event_generate("<<Redo>>")
return 'break'
#---------------
def cut(self, event=None):
'''
Cut the selected text and put in the clipboard.
'''
self.text.event_generate('<<Cut>>')
return 'break'
#---------------
def copy(self, event=None):
'''
Copy the selected text in the clipboard.
'''
self.text.event_generate('<<Copy>>')
return 'break'
#---------------
def paste(self, event=None):
'''
Paste the text from the clipboard.
'''
self.text.event_generate('<<Paste>>')
return 'break'
#---------------
def show_menu_popup(self, event=None):
'''
Show the popup menu.
'''
self.menu_popup.tk_popup(event.x_root, event.y_root)
#---------------
#-------------------------------------------------------------------------------
class DialogAbout(tkinter.Toplevel):
#---------------
WINDOW_HEIGHT = 300
WINDOW_WIDTH = 525
#---------------
def __init__(self, parent):
'''
Execute actions correspending to the creation of a "DialogAbout" instance.
'''
# save initial parameters in instance variables
self.parent = parent
# call the parent init method
tkinter.Toplevel.__init__(self)
# create the window of the Dialog About.
self.create_window()
# build the graphical user interface
self.build_gui()
#---------------
def create_window(self):
'''
Create the window of "DialogAbout".
'''
# define the dimensions
self.minsize(height=self.WINDOW_HEIGHT, width=self.WINDOW_WIDTH)
self.maxsize(height=self.WINDOW_HEIGHT, width=self.WINDOW_WIDTH)
x = round((self.winfo_screenwidth() - self.WINDOW_WIDTH) / 2)
y = round((self.winfo_screenheight() - self.WINDOW_HEIGHT) / 2)
self.geometry('{}x{}+{}+{}'.format(self.WINDOW_WIDTH, self.WINDOW_HEIGHT, x, y))
# set the title
self.title('{0} - About'.format(xlib.get_project_name()))
# set the icon
image_app = PIL.Image.open(xlib.get_project_image_file())
self.photoimage_app = PIL.ImageTk.PhotoImage(image_app)
self.tk.call('wm', 'iconphoto', self._w, self.photoimage_app)
# associate this window with the parent window
self.transient(self.parent)
#---------------
def build_gui(self):
'''
Build the graphical interface user of "DialogAbout".
'''
# create "label_proyect" and register it with the grid geometry manager
self.label_proyect = tkinter.Label(self, text='{0} v{1}'.format(xlib.get_project_name(), xlib.get_project_version()), font=tkinter.font.Font(size=10, weight='bold'))
self.label_proyect.grid(row=0, column=1, padx=(5,5), pady=(20,5), sticky='w')
# create "canvas_photoimage_app" and register it with the grid geometry manager
self.canvas_photoimage_app = tkinter.Canvas(self)
self.canvas_photoimage_app.create_image(128/2, 128/2, image=self.parent.photoimage_app)
self.canvas_photoimage_app.config(width=128, height=128)
self.canvas_photoimage_app.grid(row=1, column=0, rowspan=6, padx=(5,5), pady=(40,5), sticky='nsew')
# create "label_group" and register it with the grid geometry manager
self.label_group = tkinter.Label(self, text='GI Genética, Fisiología e Historia Forestal')
self.label_group.grid(row=1, column=1, padx=(5,5), pady=(20,5), sticky='w')
# create "label_department" and register it with the grid geometry manager
self.label_department = tkinter.Label(self, text='Dpto. Sistemas y Recursos Naturales')
self.label_department.grid(row=2, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_school" and register it with the grid geometry manager
self.label_school = tkinter.Label(self, text='ETSI Montes, Forestal y del Medio Natural')
self.label_school.grid(row=3, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_university" and register it with the grid geometry manager
self.label_university = tkinter.Label(self, text='Universidad Politécnica de Madrid')
self.label_university.grid(row=4, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_www1" and register it with the grid geometry manager
self.label_www1 = tkinter.Label(self, text='http://gfhforestal.com/')
self.label_www1.grid(row=5, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_www2" and register it with the grid geometry manager
self.label_www2 = tkinter.Label(self, text='https://github.com/ggfhf/')
self.label_www2.grid(row=6, column=1, padx=(5,5), pady=(5,5), sticky='w')
# create "label_fit" and register it with the grid geometry manager
self.label_fit = tkinter.Label(self, text=' '*5)
self.label_fit.grid(row=7, column=2, padx=(0,0), pady=(20,5), sticky='e')
# create "label_separator" and register it with the grid geometry manager
self.button_close = tkinter.ttk.Button(self, text='Close', underline=0, command=self.close)
self.button_close.grid(row=7, column=3, padx=(5,5), pady=(20,5), sticky='e')
# link a handler to events
self.bind('<Alt-c>', (lambda evento: self.button_close.invoke()))
self.bind('<Alt-C>', (lambda evento: self.button_close.invoke()))
self.bind('<KP_Enter>', (lambda evento: self.button_close.invoke()))
self.bind('<Return>', (lambda evento: self.button_close.invoke()))
# link a handler to interactions between the application and the window manager
self.protocol('WM_DELETE_WINDOW', self.close)
# set the focus in "button_close"
self.button_close.focus_set()
#---------------
def close(self):
'''
Close "DialogAbout".
'''
# delete all widgets and terminate the mainloop
self.destroy()
#---------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print('This file contains the dialog classes corresponding to the graphical user interface of the NGScloud software package.')
sys.exit(0)
#-------------------------------------------------------------------------------
| gpl-3.0 |
xen0l/ansible | lib/ansible/modules/remote_management/ucs/ucs_ntp_server.py | 11 | 4688 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_ntp_server
short_description: Configures NTP server on Cisco UCS Manager
extends_documentation_fragment:
- ucs
description:
- Configures NTP server on Cisco UCS Manager.
- Examples can be used with the L(UCS Platform Emulator,https://communities.cisco.com/ucspe).
options:
state:
description:
- If C(absent), will remove an NTP server.
- If C(present), will add or update an NTP server.
choices: [absent, present]
default: present
ntp_server:
description:
- NTP server IP address or hostname.
- Enter up to 63 characters that form a valid hostname.
- Enter a valid IPV4 Address.
aliases: [ name ]
default: ""
description:
description:
- A user-defined description of the NTP server.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
default: ""
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.7"
'''
EXAMPLES = r'''
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
description: Internal NTP Server by IP address
state: present
- name: Configure NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
description: External NTP Server by hostname
state: present
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: 10.10.10.10
state: absent
- name: Remove NTP server
ucs_ntp_server:
hostname: 172.16.143.150
username: admin
password: password
ntp_server: pool.ntp.org
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def run_module():
argument_spec = ucs_argument_spec
argument_spec.update(
ntp_server=dict(type='str', aliases=['name']),
description=dict(type='str', aliases=['descr'], default=''),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['ntp_server']],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure. Imports are below ucs object creation.
ucs = UCSModule(module)
err = False
from ucsmsdk.mometa.comm.CommNtpProvider import CommNtpProvider
changed = False
try:
mo_exists = False
props_match = False
dn = 'sys/svc-ext/datetime-svc/ntp-' + module.params['ntp_server']
mo = ucs.login_handle.query_dn(dn)
if mo:
mo_exists = True
if module.params['state'] == 'absent':
if mo_exists:
if not module.check_mode:
ucs.login_handle.remove_mo(mo)
ucs.login_handle.commit()
changed = True
else:
if mo_exists:
# check top-level mo props
kwargs = dict(descr=module.params['description'])
if mo.check_prop_match(**kwargs):
props_match = True
if not props_match:
if not module.check_mode:
# update/add mo
mo = CommNtpProvider(parent_mo_or_dn='sys/svc-ext/datetime-svc',
name=module.params['ntp_server'],
descr=module.params['description'])
ucs.login_handle.add_mo(mo, modify_present=True)
ucs.login_handle.commit()
changed = True
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
def main():
run_module()
if __name__ == '__main__':
main()
| gpl-3.0 |
basepi/hubble | hubblestack/files/hubblestack_nova/misc.py | 2 | 45373 | # -*- encoding: utf-8 -*-
'''
Hubble Nova plugin for running miscellaneous one-off python functions to
run more complex nova audits without allowing arbitrary command execution
from within the yaml profiles.
Sample YAML data, with inline comments:
# Top level key lets the module know it should look at this data
misc:
# Unique ID for this set of audits
nodev:
data:
# 'osfinger' grain, for multiplatform support
'Red Hat Enterprise Linux Server-6':
# tag is required
tag: CIS-1.1.10
function: misc_function_name
args: # optional
- first_arg
- second_arg
kwargs: # optional
first_kwarg: value
second_kwarg: value
labels:
- critical
- raiseticket
# Catch-all, if no other osfinger match was found
'*':
tag: generic_tag
function: misc_function_name
args: # optional
- first_arg
- second_arg
kwargs: # optional
first_kwarg: value
second_kwarg: value
# Description will be output with the results
description: '/home should be nodev'
'''
from __future__ import absolute_import
import logging
import fnmatch
import os
import re
import salt.utils
from salt.ext import six
from salt.exceptions import CommandExecutionError
from collections import Counter
log = logging.getLogger(__name__)
def __virtual__():
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
ret={}
if labels:
labelled_test_cases=[]
for test_case in __data__.get('misc', []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if test_case_body.get('labels') and set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
ret['misc']=labelled_test_cases
else:
ret=__data__
return ret
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Run the misc audits contained in the data_list
'''
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('misc audit __data__:')
log.debug(__data__)
log.debug('misc audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
if 'function' not in tag_data:
continue
function = FUNCTION_MAP.get(tag_data['function'])
if not function:
if 'Errors' not in ret:
ret['Errors'] = []
ret['Errors'].append({tag: 'No function {0} found'
.format(tag_data['function'])})
continue
args = tag_data.get('args', [])
kwargs = tag_data.get('kwargs', {})
# Call the function
try:
result = function(*args, **kwargs)
except Exception as exc:
if 'Errors' not in ret:
ret['Errors'] = []
ret['Errors'].append({tag: 'An error occurred exeuction function {0}: {1}'
.format(tag_data['function'], str(exc))})
continue
if result is True:
ret['Success'].append(tag_data)
elif isinstance(result, six.string_types):
tag_data['failure_reason'] = result
ret['Failure'].append(tag_data)
else:
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the misc level
'''
if 'misc' not in ret:
ret['misc'] = []
if 'misc' in data:
for key, val in data['misc'].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['misc'].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for audit_dict in data.get('misc', []):
# misc:0
for audit_id, audit_data in audit_dict.iteritems():
# misc:0:nodev
tags_dict = audit_data.get('data', {})
# misc:0:nodev:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', {})
# misc:0:nodev:data:Debian-8
if 'tag' not in tags:
tags['tag'] = ''
tag = tags['tag']
if tag not in ret:
ret[tag] = []
formatted_data = {'tag': tag,
'module': 'misc'}
formatted_data.update(audit_data)
formatted_data.update(tags)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
############################
# Begin function definitions
############################
def _execute_shell_command(cmd, python_shell=False):
'''
This function will execute passed command in /bin/shell
'''
return __salt__['cmd.run'](cmd, python_shell=python_shell, shell='/bin/bash', ignore_retcode=True)
def _is_valid_home_directory(directory_path, check_slash_home=False):
directory_path = None if directory_path is None else directory_path.strip()
if directory_path is not None and directory_path != "" and os.path.isdir(directory_path):
if check_slash_home and directory_path == "/":
return False
else:
return True
return False
def _is_permission_in_limit(max_permission, given_permission):
'''
Return true only if given_permission is not more lenient that max_permission. In other words, if
r or w or x is present in given_permission but absent in max_permission, it should return False
Takes input two integer values from 0 to 7.
'''
max_permission = int(max_permission)
given_permission = int(given_permission)
allowed_r = False
allowed_w = False
allowed_x = False
given_r = False
given_w = False
given_x = False
if max_permission >= 4:
allowed_r = True
max_permission = max_permission - 4
if max_permission >= 2:
allowed_w = True
max_permission = max_permission - 2
if max_permission >= 1:
allowed_x = True
if given_permission >= 4:
given_r = True
given_permission = given_permission - 4
if given_permission >= 2:
given_w = True
given_permission = given_permission - 2
if given_permission >= 1:
given_x = True
if given_r and (not allowed_r):
return False
if given_w and (not allowed_w):
return False
if given_x and (not allowed_x):
return False
return True
def check_all_ports_firewall_rules(reason=''):
'''
Ensure firewall rule for all open ports
'''
start_open_ports = (_execute_shell_command('netstat -ln | grep "Active Internet connections (only servers)" -n | cut -d ":" -f1', python_shell=True)).strip()
end_open_ports = (_execute_shell_command('netstat -ln | grep "Active UNIX domain sockets (only servers)" -n | cut -d ":" -f1', python_shell=True)).strip()
open_ports = (_execute_shell_command('netstat -ln | awk \'FNR > ' + start_open_ports + ' && FNR < ' + end_open_ports + ' && $6 == "LISTEN" && $4 !~ /127.0.0.1/ {print $4}\' | sed -e "s/.*://"', python_shell=True)).strip()
open_ports = open_ports.split('\n') if open_ports != "" else []
firewall_ports = (_execute_shell_command('iptables -L INPUT -v -n | awk \'FNR > 2 && $11 != "" && $11 ~ /^dpt:/ {print $11}\' | sed -e "s/.*://"', python_shell=True)).strip()
firewall_ports = firewall_ports.split('\n') if firewall_ports != "" else []
no_firewall_ports = []
for open_port in open_ports:
if open_port not in firewall_ports:
no_firewall_ports.append(open_port)
return True if len(no_firewall_ports) == 0 else str(no_firewall_ports)
def check_password_fields_not_empty(reason=''):
'''
Ensure password fields are not empty
'''
result = _execute_shell_command('cat /etc/shadow | awk -F: \'($2 == "" ) { print $1 " does not have a password "}\'', python_shell=True)
return True if result == '' else result
def ungrouped_files_or_dir(reason=''):
'''
Ensure no ungrouped files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -nogroup', python_shell=True)
return True if result == '' else result
def unowned_files_or_dir(reason=''):
'''
Ensure no unowned files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -nouser', python_shell=True)
return True if result == '' else result
def world_writable_file(reason=''):
'''
Ensure no world writable files exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type f -perm -0002', python_shell=True)
return True if result == '' else result
def system_account_non_login(non_login_shell='/sbin/nologin', max_system_uid='500', except_for_users=''):
'''
Ensure system accounts are non-login
'''
users_list = ['root','halt','sync','shutdown']
for user in except_for_users.split(","):
if user.strip() != "":
users_list.append(user.strip())
result = []
cmd = __salt__["cmd.run_all"]('egrep -v "^\+" /etc/passwd ')
for line in cmd['stdout'].split('\n'):
tokens = line.split(':')
if tokens[0] not in users_list and int(tokens[2]) < int(max_system_uid) and tokens[6] not in ( non_login_shell , "/bin/false" ):
result.append(line)
return True if result == [] else str(result)
def sticky_bit_on_world_writable_dirs(reason=''):
'''
Ensure sticky bit is set on all world-writable directories
'''
raise CommandExecutionError('Module disabled due to performance concerns')
result = _execute_shell_command('df --local -P | awk {\'if (NR!=1) print $6\'} | xargs -I \'{}\' find \'{}\' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) 2>/dev/null', python_shell=True)
return True if result == '' else "There are failures"
def default_group_for_root(reason=''):
'''
Ensure default group for the root account is GID 0
'''
result = _execute_shell_command('grep "^root:" /etc/passwd | cut -f4 -d:', python_shell=True)
result = result.strip()
return True if result == '0' else False
def root_is_only_uid_0_account(reason=''):
'''
Ensure root is the only UID 0 account
'''
result = _execute_shell_command('cat /etc/passwd | awk -F: \'($3 == 0) { print $1 }\'', python_shell=True)
return True if result.strip() == 'root' else result
def test_mount_attrs(mount_name, attribute, check_type='hard'):
'''
Ensure that a given directory is mounted with appropriate attributes
If check_type is soft, then in absence of volume, True will be returned
If check_type is hard, then in absence of volume, False will be returned
'''
# check that the path exists on system
command = 'test -e ' + mount_name
results = __salt__['cmd.run_all'](command, ignore_retcode=True)
retcode = results['retcode']
if str(retcode) == '1':
return True if check_type == "soft" else (mount_name + " folder does not exist")
# if the path exits, proceed with following code
output = __salt__['cmd.run']('cat /proc/mounts')
if not re.search(mount_name, output, re.M):
return True if check_type == "soft" else (mount_name + " is not mounted")
else:
for line in output.splitlines():
if mount_name in line and attribute not in line:
return str(line)
return True
def check_time_synchronization(reason=''):
'''
Ensure that some service is running to synchronize the system clock
'''
command = 'systemctl status systemd-timesyncd ntpd | grep "Active: active (running)"'
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
return "neither ntpd nor timesyncd is running"
else:
return True
def restrict_permissions(path, permission):
'''
Ensure that the file permissions on path are equal or more strict than the pemissions given in argument
'''
path_details = __salt__['file.stats'](path)
given_permission = path_details.get('mode')
given_permission = given_permission[-3:]
max_permission = str(permission)
if (_is_permission_in_limit(max_permission[0], given_permission[0]) and _is_permission_in_limit(max_permission[1], given_permission[1]) and _is_permission_in_limit(max_permission[2], given_permission[2])):
return True
return given_permission
def check_path_integrity(reason=''):
'''
Ensure that system PATH variable is not malformed.
'''
script = """
if [ "`echo $PATH | grep ::`" != "" ]; then
echo "Empty Directory in PATH (::)"
fi
if [ "`echo $PATH | grep :$`" != "" ]; then
echo "Trailing : in PATH"
fi
p=`echo $PATH | sed -e 's/::/:/' -e 's/:$//' -e 's/:/ /g'`
set -- $p
while [ "$1" != "" ]; do
if [ "$1" = "." ]; then
echo "PATH contains ."
shift
continue
fi
if [ -d $1 ]; then
dirperm=`ls -ldH $1 | cut -f1 -d" "`
if [ `echo $dirperm | cut -c6` != "-" ]; then
echo "Group Write permission set on directory $1"
fi
if [ `echo $dirperm | cut -c9` != "-" ]; then
echo "Other Write permission set on directory $1"
fi
dirown=`ls -ldH $1 | awk '{print $3}'`
if [ "$dirown" != "root" ] ; then
echo $1 is not owned by root
fi
else
echo $1 is not a directory
fi
shift
done
"""
output = _execute_shell_command(script, python_shell=True)
return True if output.strip() == '' else output
def check_duplicate_uids(reason=''):
'''
Return False if any duplicate user id exist in /etc/group file, else return True
'''
uids = _execute_shell_command("cat /etc/passwd | cut -f3 -d\":\"", python_shell=True).strip()
uids = uids.split('\n') if uids != "" else []
duplicate_uids = [k for k, v in Counter(uids).items() if v > 1]
if duplicate_uids is None or duplicate_uids == []:
return True
return str(duplicate_uids)
def check_duplicate_gids(reason=''):
'''
Return False if any duplicate group id exist in /etc/group file, else return True
'''
gids = _execute_shell_command("cat /etc/group | cut -f3 -d\":\"", python_shell=True).strip()
gids = gids.split('\n') if gids != "" else []
duplicate_gids = [k for k, v in Counter(gids).items() if v > 1]
if duplicate_gids is None or duplicate_gids == []:
return True
return str(duplicate_gids)
def check_duplicate_unames(reason=''):
'''
Return False if any duplicate user names exist in /etc/group file, else return True
'''
unames = _execute_shell_command("cat /etc/passwd | cut -f1 -d\":\"", python_shell=True).strip()
unames = unames.split('\n') if unames != "" else []
duplicate_unames = [k for k, v in Counter(unames).items() if v > 1]
if duplicate_unames is None or duplicate_unames == []:
return True
return str(duplicate_unames)
def check_duplicate_gnames(reason=''):
'''
Return False if any duplicate group names exist in /etc/group file, else return True
'''
gnames = _execute_shell_command("cat /etc/group | cut -f1 -d\":\"", python_shell=True).strip()
gnames = gnames.split('\n') if gnames != "" else []
duplicate_gnames = [k for k, v in Counter(gnames).items() if v > 1]
if duplicate_gnames is None or duplicate_gnames == []:
return True
return str(duplicate_gnames)
def check_directory_files_permission(path, permission):
'''
Check all files permission inside a directory
'''
blacklisted_characters = '[^a-zA-Z0-9-_/]'
if "-exec" in path or re.findall(blacklisted_characters, path):
raise CommandExecutionError("Profile parameter '{0}' not a safe pattern".format(path))
files_list = _execute_shell_command("find {0} -type f".format(path)).strip()
files_list = files_list.split('\n') if files_list != "" else []
bad_permission_files = []
for file_in_directory in files_list:
per = restrict_permissions(file_in_directory, permission)
if per is not True:
bad_permission_files += [file_in_directory + ": Bad Permission - " + per + ":"]
return True if bad_permission_files == [] else str(bad_permission_files)
def check_core_dumps(reason=''):
'''
Ensure core dumps are restricted
'''
hard_core_dump_value = _execute_shell_command("grep -R -E \"hard +core\" /etc/security/limits.conf /etc/security/limits.d/ | awk '{print $4}'", python_shell=True).strip()
hard_core_dump_value = hard_core_dump_value.split('\n') if hard_core_dump_value != "" else []
if '0' in hard_core_dump_value:
return True
if hard_core_dump_value is None or hard_core_dump_value == [] or hard_core_dump_value == "":
return "'hard core' not found in any file"
return str(hard_core_dump_value)
def check_service_status(service_name, state):
'''
Ensure that the given service is in the required state. Return False if it is not in desired state
Return True otherwise
state can be enabled or disabled.
'''
all_services = __salt__['cmd.run']('systemctl list-unit-files')
if re.search(service_name, all_services, re.M):
output = __salt__['cmd.retcode']('systemctl is-enabled ' + service_name, ignore_retcode=True)
if (state == "disabled" and str(output) == "1") or (state == "enabled" and str(output) == "0"):
return True
else:
return __salt__['cmd.run_stdout']('systemctl is-enabled ' + service_name, ignore_retcode=True)
else:
if state == "disabled":
return True
else:
return 'Looks like ' + service_name + ' does not exists. Please check.'
def check_ssh_timeout_config(reason=''):
'''
Ensure SSH Idle Timeout Interval is configured
'''
client_alive_interval = _execute_shell_command("grep \"^ClientAliveInterval\" /etc/ssh/sshd_config | awk '{print $NF}'", python_shell=True).strip()
if client_alive_interval != '' and int(client_alive_interval) <= 300:
client_alive_count_max = _execute_shell_command("grep \"^ClientAliveCountMax\" /etc/ssh/sshd_config | awk '{print $NF}'", python_shell=True).strip()
if client_alive_count_max != '' and int(client_alive_count_max) <= 3:
return True
else:
return "ClientAliveCountMax value should be less than equal to 3"
else:
return "ClientAliveInterval value should be less than equal to 300"
def check_unowned_files(reason=''):
'''
Ensure no unowned files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
unowned_files = _execute_shell_command("df --local -P | awk 'NR!=1 {print $6}' | xargs -I '{}' find '{}' -xdev -nouser 2>/dev/null", python_shell=True).strip()
unowned_files = unowned_files.split('\n') if unowned_files != "" else []
# The command above only searches local filesystems, there may still be compromised items on network
# mounted partitions.
# Following command will check each partition for unowned files
unowned_partition_files = _execute_shell_command("mount | awk '{print $3}' | xargs -I '{}' find '{}' -xdev -nouser 2>/dev/null", python_shell=True).strip()
unowned_partition_files = unowned_partition_files.split('\n') if unowned_partition_files != "" else []
unowned_files = unowned_files + unowned_partition_files
return True if unowned_files == [] else str(list(set(unowned_files)))
def check_ungrouped_files(reason=''):
'''
Ensure no ungrouped files or directories exist
'''
raise CommandExecutionError('Module disabled due to performance concerns')
ungrouped_files = _execute_shell_command("df --local -P | awk 'NR!=1 {print $6}' | xargs -I '{}' find '{}' -xdev -nogroup 2>/dev/null", python_shell=True).strip()
ungrouped_files = ungrouped_files.split('\n') if ungrouped_files != "" else []
# The command above only searches local filesystems, there may still be compromised items on network
# mounted partitions.
# Following command will check each partition for unowned files
ungrouped_partition_files = _execute_shell_command("mount | awk '{print $3}' | xargs -I '{}' find '{}' -xdev -nogroup 2>/dev/null", python_shell=True).strip()
ungrouped_partition_files = ungrouped_partition_files.split('\n') if ungrouped_partition_files != "" else []
ungrouped_files = ungrouped_files + ungrouped_partition_files
return True if ungrouped_files == [] else str(list(set(ungrouped_files)))
def check_all_users_home_directory(max_system_uid):
'''
Ensure all users' home directories exist
'''
max_system_uid = int(max_system_uid)
users_uids_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1 \" \" $3 \" \" $6 \" \" $7}'", python_shell=True).strip()
users_uids_dirs = users_uids_dirs.split('\n') if users_uids_dirs else []
error = []
for user_data in users_uids_dirs:
user_uid_dir = user_data.strip().split(" ")
if len(user_uid_dir) < 4:
user_uid_dir = user_uid_dir + [''] * (4 - len(user_uid_dir))
if user_uid_dir[1].isdigit():
if not _is_valid_home_directory(user_uid_dir[2], True) and int(user_uid_dir[1]) >= max_system_uid and user_uid_dir[0] != "nfsnobody" \
and 'nologin' not in user_uid_dir[3] and 'false' not in user_uid_dir[3]:
error += ["Either home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is invalid or does not exist."]
else:
error += ["User " + user_uid_dir[0] + " has invalid uid " + user_uid_dir[1]]
return True if not error else str(error)
def check_users_home_directory_permissions(max_allowed_permission='750', except_for_users=''):
'''
Ensure users' home directories permissions are 750 or more restrictive
'''
users_list = ['root','halt','sync','shutdown']
for user in except_for_users.split(","):
if user.strip() != "":
users_list.append(user.strip())
users_dirs = []
cmd = __salt__["cmd.run_all"]('egrep -v "^\+" /etc/passwd ')
for line in cmd['stdout'].split('\n'):
tokens = line.split(':')
if tokens[0] not in users_list and 'nologin' not in tokens[6] and 'false' not in tokens[6]:
users_dirs.append(tokens[0] + " " + tokens[5])
error = []
for user_dir in users_dirs:
user_dir = user_dir.split(" ")
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
result = restrict_permissions(user_dir[1], max_allowed_permission)
if result is not True:
error += ["permission on home directory " + user_dir[1] + " of user " + user_dir[0] + " is wrong: " + result]
return True if error == [] else str(error)
def check_users_own_their_home(max_system_uid):
'''
Ensure users own their home directories
'''
max_system_uid = int(max_system_uid)
users_uids_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1 \" \" $3 \" \" $6 \" \" $7}'", python_shell=True).strip()
users_uids_dirs = users_uids_dirs.split('\n') if users_uids_dirs != "" else []
error = []
for user_data in users_uids_dirs:
user_uid_dir = user_data.strip().split(" ")
if len(user_uid_dir) < 4:
user_uid_dir = user_uid_dir + [''] * (4 - len(user_uid_dir))
if user_uid_dir[1].isdigit():
if not _is_valid_home_directory(user_uid_dir[2]):
if int(user_uid_dir[1]) >= max_system_uid and 'nologin' not in user_uid_dir[3] and 'false' not in user_uid_dir[3]:
error += ["Either home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is invalid or does not exist."]
elif int(user_uid_dir[1]) >= max_system_uid and user_uid_dir[0] != "nfsnobody" and 'nologin' not in user_uid_dir[3] \
and 'false' not in user_uid_dir[3]:
owner = __salt__['cmd.run']("stat -L -c \"%U\" \"" + user_uid_dir[2] + "\"")
if owner != user_uid_dir[0]:
error += ["The home directory " + user_uid_dir[2] + " of user " + user_uid_dir[0] + " is owned by " + owner]
else:
error += ["User " + user_uid_dir[0] + " has invalid uid " + user_uid_dir[1]]
return True if not error else str(error)
def check_users_dot_files(reason=''):
'''
Ensure users' dot files are not group or world writable
'''
users_dirs = _execute_shell_command("cat /etc/passwd | egrep -v '(root|halt|sync|shutdown)' | awk -F: '($7 != \"/sbin/nologin\") {print $1\" \"$6}'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
dot_files = _execute_shell_command("find " + user_dir[1] + " -name \".*\"").strip()
dot_files = dot_files.split('\n') if dot_files != "" else []
for dot_file in dot_files:
if os.path.isfile(dot_file):
path_details = __salt__['file.stats'](dot_file)
given_permission = path_details.get('mode')
file_permission = given_permission[-3:]
if file_permission[1] in ["2", "3", "6", "7"]:
error += ["Group Write permission set on file " + dot_file + " for user " + user_dir[0]]
if file_permission[2] in ["2", "3", "6", "7"]:
error += ["Other Write permission set on file " + dot_file + " for user " + user_dir[0]]
return True if error == [] else str(error)
def check_users_forward_files(reason=''):
'''
Ensure no users have .forward files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1\" \"$6 }'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
forward_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".forward\"").strip()
if forward_file is not None and os.path.isfile(forward_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has " + forward_file + " file"]
return True if error == [] else str(error)
def check_users_netrc_files(reason=''):
'''
Ensure no users have .netrc files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | awk -F: '{ print $1\" \"$6 }'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
netrc_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".netrc\"").strip()
if netrc_file is not None and os.path.isfile(netrc_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has .netrc file"]
return True if error == [] else str(error)
def check_groups_validity(reason=''):
'''
Ensure all groups in /etc/passwd exist in /etc/group
'''
group_ids_in_passwd = _execute_shell_command("cut -s -d: -f4 /etc/passwd 2>/dev/null", python_shell=True).strip()
group_ids_in_passwd = group_ids_in_passwd.split('\n') if group_ids_in_passwd != "" else []
group_ids_in_passwd = list(set(group_ids_in_passwd))
invalid_groups = []
for group_id in group_ids_in_passwd:
group_presence_validity = _execute_shell_command("getent group " + group_id + " 2>/dev/null 1>/dev/null; echo $?", python_shell=True).strip()
if str(group_presence_validity) != "0":
invalid_groups += ["Invalid groupid: " + group_id + " in /etc/passwd file"]
return True if invalid_groups == [] else str(invalid_groups)
def ensure_reverse_path_filtering(reason=''):
'''
Ensure Reverse Path Filtering is enabled
'''
error_list = []
command = "sysctl net.ipv4.conf.all.rp_filter 2> /dev/null"
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
error_list.append("net.ipv4.conf.all.rp_filter not found")
search_results = re.findall("rp_filter = (\d+)", output)
result = int(search_results[0])
if result < 1:
error_list.append("net.ipv4.conf.all.rp_filter value set to " + str(result))
command = "sysctl net.ipv4.conf.default.rp_filter 2> /dev/null"
output = _execute_shell_command(command, python_shell=True)
if output.strip() == '':
error_list.append("net.ipv4.conf.default.rp_filter not found")
search_results = re.findall("rp_filter = (\d+)", output)
result = int(search_results[0])
if result < 1:
error_list.append("net.ipv4.conf.default.rp_filter value set to " + str(result))
if len(error_list) > 0:
return str(error_list)
else:
return True
def check_users_rhosts_files(reason=''):
'''
Ensure no users have .rhosts files
'''
users_dirs = _execute_shell_command("cat /etc/passwd | egrep -v '(root|halt|sync|shutdown)' | awk -F: '($7 != \"/sbin/nologin\") {print $1\" \"$6}'", python_shell=True).strip()
users_dirs = users_dirs.split('\n') if users_dirs != "" else []
error = []
for user_dir in users_dirs:
user_dir = user_dir.split()
if len(user_dir) < 2:
user_dir = user_dir + [''] * (2 - len(user_dir))
if _is_valid_home_directory(user_dir[1]):
rhosts_file = _execute_shell_command("find " + user_dir[1] + " -maxdepth 1 -name \".rhosts\"").strip()
if rhosts_file is not None and os.path.isfile(rhosts_file):
error += ["Home directory: " + user_dir[1] + ", for user: " + user_dir[0] + " has .rhosts file"]
return True if error == [] else str(error)
def check_netrc_files_accessibility(reason=''):
'''
Ensure users' .netrc Files are not group or world accessible
'''
script = """
for dir in `cat /etc/passwd | egrep -v '(root|sync|halt|shutdown)' | awk -F: '($7 != "/sbin/nologin") { print $6 }'`; do
for file in $dir/.netrc; do
if [ ! -h "$file" -a -f "$file" ]; then
fileperm=`ls -ld $file | cut -f1 -d" "`
if [ `echo $fileperm | cut -c5` != "-" ]; then
echo "Group Read set on $file"
fi
if [ `echo $fileperm | cut -c6` != "-" ]; then
echo "Group Write set on $file"
fi
if [ `echo $fileperm | cut -c7` != "-" ]; then
echo "Group Execute set on $file"
fi
if [ `echo $fileperm | cut -c8` != "-" ]; then
echo "Other Read set on $file"
fi
if [ `echo $fileperm | cut -c9` != "-" ]; then
echo "Other Write set on $file"
fi
if [ `echo $fileperm | cut -c10` != "-" ]; then
echo "Other Execute set on $file"
fi
fi
done
done
"""
output = _execute_shell_command(script, python_shell=True)
return True if output.strip() == '' else output
def _grep(path,
pattern,
*args):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
if args:
options = ' '.join(args)
else:
options = ''
cmd = (
r'''grep {options} {pattern} {path}'''
.format(
options=options,
pattern=pattern,
path=path,
)
)
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def check_list_values(file_path, match_pattern, value_pattern, grep_arg, white_list, black_list, value_delimter):
'''
This function will first get the line matching given match_pattern.
After this value pattern will be extracted from the above line.
value pattern will be splitted by value_delimiter to get the list of values.
match_pattern will be regex patter for grep command.
value_pattern will be regex for re module of python to get matched values.
Only one of white_list and blacklist is allowed.
white_list and black_list should have comma(,) seperated values.
Example for CIS-2.2.1.2
ensure_ntp_configured:
data:
CentOS Linux-7:
tag: 2.2.1.2
function: check_list_values
args:
- /etc/ntp.conf
- '^restrict.*default'
- '^restrict.*default(.*)$'
- null
- kod,nomodify,notrap,nopeer,noquery
- null
- ' '
description: Ensure ntp is configured
'''
list_delimter = ","
if black_list is not None and white_list is not None:
return "Both black_list and white_list values are not allowed."
grep_args = [] if grep_arg is None else [grep_arg]
matched_lines = _grep(file_path, match_pattern, *grep_args).get('stdout')
if not matched_lines:
return "No match found for the given pattern: " + str(match_pattern)
matched_lines = matched_lines.split('\n') if matched_lines is not None else []
error = []
for matched_line in matched_lines:
regexp = re.compile(value_pattern)
matched_values = regexp.search(matched_line).group(1)
matched_values = matched_values.strip().split(value_delimter) if matched_values is not None else []
if white_list is not None:
values_not_in_white_list = list(set(matched_values) - set(white_list.strip().split(list_delimter)))
if values_not_in_white_list != []:
error += ["values not in whitelist: " + str(values_not_in_white_list)]
else:
values_in_black_list = list(set(matched_values).intersection(set(black_list.strip().split(list_delimter))))
if values_in_black_list != []:
error += ["values in blacklist: " + str(values_in_black_list)]
return True if error == [] else str(error)
def mail_conf_check(reason=''):
'''
Ensure mail transfer agent is configured for local-only mode
'''
valid_addresses = ["localhost", "127.0.0.1", "::1"]
mail_addresses = _execute_shell_command("grep '^[[:blank:]]*inet_interfaces' /etc/postfix/main.cf | awk -F'=' '{print $2}'", python_shell=True).strip()
mail_addresses = str(mail_addresses)
mail_addresses = mail_addresses.split(',') if mail_addresses != "" else []
mail_addresses = map(str.strip, mail_addresses)
invalid_addresses = list(set(mail_addresses) - set(valid_addresses))
return str(invalid_addresses) if invalid_addresses != [] else True
def check_if_any_pkg_installed(args):
'''
:param args: Comma separated list of packages those needs to be verified
:return: True if any of the input package is installed else False
'''
result = False
for pkg in args.split(','):
if __salt__['pkg.version'](pkg):
result = True
break
return result
def ensure_max_password_expiration(allow_max_days, except_for_users=''):
'''
Ensure max password expiration days is set to the value less than or equal to that given in args
'''
grep_args = []
pass_max_days_output = _grep('/etc/login.defs', '^PASS_MAX_DAYS', *grep_args).get('stdout')
if not pass_max_days_output:
return "PASS_MAX_DAYS must be set"
system_pass_max_days = pass_max_days_output.split()[1]
if not _is_int(system_pass_max_days):
return "PASS_MAX_DAYS must be set properly"
if int(system_pass_max_days) > allow_max_days:
return "PASS_MAX_DAYS must be less than or equal to " + str(allow_max_days)
#fetch all users with passwords
grep_args.append('-E')
all_users = _grep('/etc/shadow', '^[^:]+:[^\!*]', *grep_args).get('stdout')
except_for_users_list=[]
for user in except_for_users.split(","):
if user.strip() != "":
except_for_users_list.append(user.strip())
result = []
for line in all_users.split('\n'):
user = line.split(':')[0]
#As per CIS doc, 5th field is the password max expiry days
user_passwd_expiry = line.split(':')[4]
if not user in except_for_users_list and _is_int(user_passwd_expiry) and int(user_passwd_expiry) > allow_max_days:
result.append('User ' + user + ' has max password expiry days ' + user_passwd_expiry + ', which is more than ' + str(allow_max_days))
return True if result == [] else str(result)
def _is_int(input):
try:
num = int(input)
except ValueError:
return False
return True
def check_sshd_paramters(pattern, values=None, comparetype='regex'):
'''
This function will check if any pattern passed is present in ssh service
User can also check for the values for that pattern
To check for values in any order, then use comparetype as 'only'
Example:
1) To check for INFO for LogLevel
check_log_level:
data:
'*':
tag: CIS-1.1.1
function: check_sshd_paramters
args:
- '^LogLevel\s+INFO'
description: Ensure SSH LogLevel is set to INFO
2) To check for only approved ciphers in any order
sshd_approved_cipher:
data:
'*':
tag: CIS-1.1.2
function: check_sshd_paramters
args:
- '^Ciphers'
kwargs:
values: aes256-ctr,aes192-ctr,aes128-ctr
comparetype: only
description: Ensure only approved ciphers are used
'''
output = __salt__['cmd.run']('sshd -T')
if comparetype == 'only':
if not values:
return "You need to provide values for comparetype 'only'."
else:
for line in output.splitlines():
if re.match(pattern, line, re.I):
expected_values = values.split(',')
found_values = line[len(pattern):].strip().split(',')
for found_value in found_values:
if found_value in expected_values:
continue
else:
return "Allowed values for pattern: " + pattern + " are " + values
return True
return "Looks like pattern i.e. " + pattern + " not found in sshd -T. Please check."
elif comparetype == 'regex':
if re.search(pattern, output, re.M | re.I):
return True
else:
return "Looks like pattern i.e. " + pattern + " not found in sshd -T. Please check."
else:
return "The comparetype: " + comparetype + " not found. It can be 'regex' or 'only'. Please check."
def test_success():
'''
Automatically returns success
'''
return True
def test_failure():
'''
Automatically returns failure, no reason
'''
return False
def test_failure_reason(reason):
'''
Automatically returns failure, with a reason (first arg)
'''
return reason
FUNCTION_MAP = {
'check_all_ports_firewall_rules': check_all_ports_firewall_rules,
'check_password_fields_not_empty': check_password_fields_not_empty,
'ungrouped_files_or_dir': ungrouped_files_or_dir,
'unowned_files_or_dir': unowned_files_or_dir,
'world_writable_file': world_writable_file,
'system_account_non_login': system_account_non_login,
'sticky_bit_on_world_writable_dirs': sticky_bit_on_world_writable_dirs,
'default_group_for_root': default_group_for_root,
'root_is_only_uid_0_account': root_is_only_uid_0_account,
'test_success': test_success,
'test_failure': test_failure,
'test_failure_reason': test_failure_reason,
'test_mount_attrs': test_mount_attrs,
'check_path_integrity': check_path_integrity,
'restrict_permissions': restrict_permissions,
'check_time_synchronization': check_time_synchronization,
'check_core_dumps': check_core_dumps,
'check_directory_files_permission': check_directory_files_permission,
'check_duplicate_gnames': check_duplicate_gnames,
'check_duplicate_unames': check_duplicate_unames,
'check_duplicate_gids': check_duplicate_gids,
'check_duplicate_uids': check_duplicate_uids,
'check_service_status': check_service_status,
'check_ssh_timeout_config': check_ssh_timeout_config,
'check_unowned_files': check_unowned_files,
'check_ungrouped_files': check_ungrouped_files,
'check_all_users_home_directory': check_all_users_home_directory,
'check_users_home_directory_permissions': check_users_home_directory_permissions,
'check_users_own_their_home': check_users_own_their_home,
'check_users_dot_files': check_users_dot_files,
'check_users_forward_files': check_users_forward_files,
'check_users_netrc_files': check_users_netrc_files,
'check_groups_validity': check_groups_validity,
'ensure_reverse_path_filtering': ensure_reverse_path_filtering,
'check_users_rhosts_files': check_users_rhosts_files,
'check_netrc_files_accessibility': check_netrc_files_accessibility,
'check_list_values': check_list_values,
'mail_conf_check': mail_conf_check,
'check_if_any_pkg_installed': check_if_any_pkg_installed,
'ensure_max_password_expiration': ensure_max_password_expiration,
'check_sshd_paramters': check_sshd_paramters,
}
| apache-2.0 |
sschiau/swift | utils/swift-bench.py | 28 | 15119 | #!/usr/bin/env python
# ===--- swift-bench.py ------------------------------*- coding: utf-8 -*-===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
# This file implements a test harness for running Swift performance benchmarks.
#
# Its input is a set of swift files, containing functions named 'bench_*' that
# take no arguments and returns Int. The harness makes a separate test from
# each of these functions, runs all the tests and reports aggregate results.
#
# The workflow of the harness is the following:
# o Basing on the input files, generate 'processed' files. These files
# contain a main function with simple arguments parsing, time measurement
# utilities and a loop in which the bench-functions are called.
# o When all files are processed, the harness begins to compile them, keeping
# track of all compile fails for later results reporting.
# o When all files are compiled, the harness begins to run the tests. The
# harness chooses a number of iterations for each tests to achieve the best
# accuracy in the given time limit (in order to do that, it performs
# several auxiliary test runs). When the iteration number is chosen, the
# measurement of execution time is actually performed.
# o At this point everything is ready, and the harness simply reports the
# results.
#
# Ideas for the harness improvement and development are welcomed here:
# rdar://problem/18072938
from __future__ import print_function
import argparse
import math
import os
import re
import subprocess
import sys
# This regular expression is looking for Swift functions named `bench_*`
# that take no arguments and return an Int. The Swift code for such
# a function is:
#
# func bench_myname() {
# // function body goes here
# }
BENCH_RE = re.compile(
r"^\s*" # whitespace at the start of the line
r"func\s+" # 'func' keyword, which must be followed by
# at least one space
r"bench_([a-zA-Z0-9_]+)\s*"
# name of the function
r"\s*\(\s*\)" # argument list
r"\s*->\s*Int\s*" # return type
r"({)?" # opening brace of the function body
r"\s*$" # whitespace ot the end of the line
)
def pstdev(sample):
"""Given a list of numbers, return the population standard deviation.
For a population x_1, x_2, ..., x_N with mean M, the standard deviation
is defined as
sqrt( 1/N * [ (x_1 - M)^2 + (x_2 - M)^2 + ... + (x_N - M)^2 ] )
"""
if len(sample) == 0:
raise ValueError("Cannot calculate the standard deviation of an "
"empty list!")
mean = sum(sample) / float(len(sample))
inner = 1.0 / len(sample) * (sum((x - mean) ** 2 for x in sample))
return math.sqrt(inner)
class SwiftBenchHarness(object):
sources = []
verbose_level = 0
compiler = ""
tests = {}
time_limit = 1000
min_sample_time = 100
min_iter_time = 1
opt_flags = []
def log(self, str, level):
if self.verbose_level >= level:
for _ in range(1, level):
sys.stdout.write(' ')
print(str)
def run_command(self, cmd):
self.log(' Executing: ' + ' '.join(cmd), 1)
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def parse_arguments(self):
self.log("Parsing arguments.", 2)
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--verbosity",
help="increase output verbosity", type=int)
parser.add_argument("files", help="input files", nargs='+')
parser.add_argument(
'-c', '--compiler',
help="compiler to use", default="swiftc")
parser.add_argument(
'-t', '--timelimit',
help="Time limit for every test", type=int)
parser.add_argument(
'-s', '--sampletime',
help="Minimum time for every sample", type=int)
parser.add_argument(
'-f', '--flags', help="Compilation flags", nargs='+')
args = parser.parse_args()
if args.verbosity:
self.verbose_level = args.verbosity
self.sources = args.files
self.compiler = args.compiler
if args.flags:
self.opt_flags = args.flags
if args.timelimit and args.timelimit > 0:
self.time_limit = args.timelimit
if args.sampletime and args.sampletime > 0:
self.min_sample_time = args.sampletime
self.log("Sources: %s." % ', '.join(self.sources), 3)
self.log("Compiler: %s." % self.compiler, 3)
self.log("Opt flags: %s." % ', '.join(self.opt_flags), 3)
self.log("Verbosity: %s." % self.verbose_level, 3)
self.log("Time limit: %s." % self.time_limit, 3)
self.log("Min sample time: %s." % self.min_sample_time, 3)
def process_source(self, name):
self.log("Processing source file: %s." % name, 2)
header = """
@_silgen_name("mach_absolute_time") func __mach_absolute_time__() -> UInt64
@_silgen_name("opaqueGetInt32")
func _opaqueGetInt32(x: Int) -> Int
@_silgen_name("opaqueGetInt64")
func _opaqueGetInt64(x: Int) -> Int
@inline(never)
public func getInt(x: Int) -> Int {
#if arch(i386) || arch(arm)
return _opaqueGetInt32(x)
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || \
arch(powerpc64le) || arch(s390x)
return _opaqueGetInt64(x)
#else
return x
#endif
}
@inline(never)
func False() -> Bool { return getInt(1) == 0 }
@inline(never)
func Consume(x: Int) { if False() { println(x) } }
"""
before_bench = """
@inline(never)
"""
into_bench = """
if False() { return 0 }
"""
main_begin = """
func main() {
var N = 1
var name = ""
if CommandLine.arguments.count > 1 {
N = CommandLine.arguments[1].toInt()!
}
"""
main_body = r"""
name = "%s"
if CommandLine.arguments.count <= 2 || CommandLine.arguments[2] == name {
let start = __mach_absolute_time__()
for _ in 1...N {
bench_%s()
}
let end = __mach_absolute_time__()
println("\(name),\(N),\(end - start)")
}
"""
main_end = """
}
main()
"""
with open(name) as f:
lines = list(f)
output = header
looking_for_curly_brace = False
test_names = []
for lineno, l in enumerate(lines, start=1):
if looking_for_curly_brace:
output += l
if "{" not in l:
continue
looking_for_curly_brace = False
output += into_bench
continue
m = BENCH_RE.match(l)
if m:
output += before_bench
output += l
bench_name = m.group(1)
self.log("Benchmark found: %s (line %d)" %
(bench_name, lineno), 3)
self.tests[
name + ":" +
bench_name] = Test(bench_name, name, "", "")
test_names.append(bench_name)
if m.group(2):
output += into_bench
else:
looking_for_curly_brace = True
else:
output += l
output += main_begin
for n in test_names:
output += main_body % (n, n)
processed_name = 'processed_' + os.path.basename(name)
output += main_end
with open(processed_name, 'w') as f:
f.write(output)
for n in test_names:
self.tests[name + ":" + n].processed_source = processed_name
def process_sources(self):
self.log("Processing sources: %s." % self.sources, 2)
for s in self.sources:
self.process_source(s)
def compile_opaque_cfile(self):
self.log("Generating and compiling C file with opaque functions.", 3)
file_body = """
#include <stdint.h>
extern "C" int32_t opaqueGetInt32(int32_t x) { return x; }
extern "C" int64_t opaqueGetInt64(int64_t x) { return x; }
"""
with open('opaque.cpp', 'w') as f:
f.write(file_body)
# TODO: Handle subprocess.CalledProcessError for this call:
self.run_command(
['clang++', 'opaque.cpp', '-o', 'opaque.o', '-c', '-O2'])
compiled_files = {}
def compile_source(self, name):
self.tests[name].binary = "./" + \
self.tests[name].processed_source.split(os.extsep)[0]
if not self.tests[name].processed_source in self.compiled_files:
try:
self.run_command([
self.compiler,
self.tests[name].processed_source,
"-o",
self.tests[name].binary + '.o',
'-c'
] + self.opt_flags)
self.run_command([
self.compiler,
'-o',
self.tests[name].binary,
self.tests[name].binary + '.o',
'opaque.o'
])
self.compiled_files[
self.tests[name].processed_source] = ('', '')
except subprocess.CalledProcessError as e:
self.compiled_files[self.tests[name].processed_source] = (
'COMPFAIL', e.output)
(status, output) = self.compiled_files[
self.tests[name].processed_source]
self.tests[name].status = status
self.tests[name].output = output
def compile_sources(self):
self.log("Compiling processed sources.", 2)
self.compile_opaque_cfile()
for t in self.tests:
self.compile_source(t)
def run_benchmarks(self):
self.log("Running benchmarks.", 2)
for t in self.tests:
self.run_bench(t)
def parse_benchmark_output(self, res):
# Parse lines like
# TestName,NNN,MMM
# where NNN - performed iterations number, MMM - execution time (in ns)
results_re = re.compile(r"(\w+),[ \t]*(\d+),[ \t]*(\d+)")
m = results_re.match(res)
if not m:
return ("", 0, 0)
return (m.group(1), m.group(2), m.group(3))
def compute_iters_number(self, name):
scale = 1
spent = 0
# Measure time for one iteration
# If it's too small, increase number of iteration until it's measurable
while (spent <= self.min_iter_time):
try:
r = self.run_command([
self.tests[name].binary, str(scale),
self.tests[name].name])
(test_name, iters_computed, exec_time) = \
self.parse_benchmark_output(r)
# Convert ns to ms
spent = int(exec_time) / 1000000
if spent <= self.min_iter_time:
scale *= 2
if scale > sys.maxint:
return (0, 0)
except subprocess.CalledProcessError as e:
r = e.output
break
if spent == 0:
spent = 1
# Now compute number of samples we can take in the given time limit
mult = int(self.min_sample_time / spent)
if mult == 0:
mult = 1
scale *= mult
spent *= mult
samples = int(self.time_limit / spent)
if samples == 0:
samples = 1
return (samples, scale)
def run_bench(self, name):
if not self.tests[name].status == "":
return
(num_samples, iter_scale) = self.compute_iters_number(name)
if (num_samples, iter_scale) == (0, 0):
self.tests[name].status = "CAN'T MEASURE"
self.tests[name].output = (
"Can't find number of iterations for the test to last " +
"longer than %d ms." % self.min_iter_time)
return
samples = []
self.log("Running bench: %s, numsamples: %d" % (name, num_samples), 2)
for _ in range(0, num_samples):
try:
r = self.run_command([self.tests[name].binary, str(iter_scale),
self.tests[name].name])
(test_name, iters_computed, exec_time) = \
self.parse_benchmark_output(r)
# TODO: Verify test_name and iters_computed
samples.append(int(exec_time) / iter_scale)
self.tests[name].output = r
except subprocess.CalledProcessError as e:
self.tests[name].status = "RUNFAIL"
self.tests[name].output = e.output
break
res = TestResults(name, samples)
self.tests[name].results = res
def report_results(self):
self.log("\nReporting results.", 2)
print("==================================================")
for t in self.tests:
self.tests[t].do_print()
class Test(object):
def __init__(self, name, source, processed_source, binary):
self.name = name
self.source = source
self.processed_source = processed_source
self.binary = binary
self.status = ""
self.results = None
self.output = None
def do_print(self):
print("NAME: %s" % self.name)
print("SOURCE: %s" % self.source)
if self.status == "":
if self.results is not None:
self.results.do_print()
else:
print("STATUS: %s" % self.status)
print("OUTPUT:")
print(self.output)
print("END OF OUTPUT")
print("")
class TestResults(object):
def __init__(self, name, samples):
self.name = name
self.samples = samples
if len(samples) > 0:
self.process()
def process(self):
self.minimum = min(self.samples)
self.maximum = max(self.samples)
self.avg = sum(self.samples) / len(self.samples)
self.std = pstdev(self.samples)
self.err = self.std / math.sqrt(len(self.samples))
self.int_min = self.avg - self.err * 1.96
self.int_max = self.avg + self.err * 1.96
def do_print(self):
print("SAMPLES: %d" % len(self.samples))
print("MIN: %3.2e" % self.minimum)
print("MAX: %3.2e" % self.maximum)
print("AVG: %3.2e" % self.avg)
print("STD: %3.2e" % self.std)
print("ERR: %3.2e (%2.1f%%)" % (self.err, self.err * 100 / self.avg))
print("CONF INT 0.95: (%3.2e, %3.2e)" % (self.int_min, self.int_max))
print("")
def main():
harness = SwiftBenchHarness()
harness.parse_arguments()
harness.process_sources()
harness.compile_sources()
harness.run_benchmarks()
harness.report_results()
main()
| apache-2.0 |
jumpstarter-io/nova | nova/cmd/api_ec2.py | 26 | 1537 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova EC2 API."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup("nova")
utils.monkey_patch()
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
should_use_ssl = 'ec2' in CONF.enabled_ssl_apis
server = service.WSGIService('ec2', use_ssl=should_use_ssl,
max_url_len=16384)
service.serve(server, workers=server.workers)
service.wait()
| apache-2.0 |
godfather1103/WeiboRobot | python27/1.0/lib/test/test_scope.py | 114 | 15536 | import unittest
from test.test_support import check_syntax_error, check_py3k_warnings, \
check_warnings, run_unittest
class ScopeTests(unittest.TestCase):
def testSimpleNesting(self):
def make_adder(x):
def adder(y):
return x + y
return adder
inc = make_adder(1)
plus10 = make_adder(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testExtraNesting(self):
def make_adder2(x):
def extra(): # check freevars passing through non-use scopes
def adder(y):
return x + y
return adder
return extra()
inc = make_adder2(1)
plus10 = make_adder2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testSimpleAndRebinding(self):
def make_adder3(x):
def adder(y):
return x + y
x = x + 1 # check tracking of assignment to x in defining scope
return adder
inc = make_adder3(0)
plus10 = make_adder3(9)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingGlobalNoFree(self):
def make_adder4(): # XXX add exta level of indirection
def nest():
def nest():
def adder(y):
return global_x + y # check that plain old globals work
return adder
return nest()
return nest()
global_x = 1
adder = make_adder4()
self.assertEqual(adder(1), 2)
global_x = 10
self.assertEqual(adder(-2), 8)
def testNestingThroughClass(self):
def make_adder5(x):
class Adder:
def __call__(self, y):
return x + y
return Adder()
inc = make_adder5(1)
plus10 = make_adder5(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(-2), 8)
def testNestingPlusFreeRefToGlobal(self):
def make_adder6(x):
global global_nest_x
def adder(y):
return global_nest_x + y
global_nest_x = x
return adder
inc = make_adder6(1)
plus10 = make_adder6(10)
self.assertEqual(inc(1), 11) # there's only one global
self.assertEqual(plus10(-2), 8)
def testNearestEnclosingScope(self):
def f(x):
def g(y):
x = 42 # check that this masks binding in f()
def h(z):
return x + z
return h
return g(2)
test_func = f(10)
self.assertEqual(test_func(5), 47)
def testMixedFreevarsAndCellvars(self):
def identity(x):
return x
def f(x, y, z):
def g(a, b, c):
a = a + x # 3
def h():
# z * (4 + 9)
# 3 * 13
return identity(z * (b + y))
y = c + z # 9
return h
return g
g = f(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 39)
def testFreeVarInMethod(self):
def test():
method_and_var = "var"
class Test:
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
return Test()
t = test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
method_and_var = "var"
class Test:
# this class is not nested, so the rules are different
def method_and_var(self):
return "method"
def test(self):
return method_and_var
def actual_global(self):
return str("global")
def str(self):
return str(self)
t = Test()
self.assertEqual(t.test(), "var")
self.assertEqual(t.method_and_var(), "method")
self.assertEqual(t.actual_global(), "global")
def testRecursion(self):
def f(x):
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
if x >= 0:
return fact(x)
else:
raise ValueError, "x must be >= 0"
self.assertEqual(f(6), 720)
def testUnoptimizedNamespaces(self):
check_syntax_error(self, """\
def unoptimized_clash1(strip):
def f(s):
from string import *
return strip(s) # ambiguity: free or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
check_syntax_error(self, """\
def unoptimized_clash2():
from string import *
def g():
def f(s):
return strip(s) # ambiguity: global or local
return f
""")
# XXX could allow this for exec with const argument, but what's the point
check_syntax_error(self, """\
def error(y):
exec "a = 1"
def f(x):
return x + y
return f
""")
check_syntax_error(self, """\
def f(x):
def g():
return x
del x # can't del name
""")
check_syntax_error(self, """\
def f():
def g():
from string import *
return strip # global or local?
""")
# and verify a few cases that should work
exec """
def noproblem1():
from string import *
f = lambda x:x
def noproblem2():
from string import *
def f(x):
return x + 1
def noproblem3():
from string import *
def f(x):
global y
y = x
"""
def testLambdas(self):
f1 = lambda x: lambda y: x + y
inc = f1(1)
plus10 = f1(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f2 = lambda x: (lambda : lambda y: x + y)()
inc = f2(1)
plus10 = f2(10)
self.assertEqual(inc(1), 2)
self.assertEqual(plus10(5), 15)
f3 = lambda x: lambda y: global_x + y
global_x = 1
inc = f3(None)
self.assertEqual(inc(2), 3)
f8 = lambda x, y, z: lambda a, b, c: lambda : z * (b + y)
g = f8(1, 2, 3)
h = g(2, 4, 6)
self.assertEqual(h(), 18)
def testUnboundLocal(self):
def errorInOuter():
print y
def inner():
return y
y = 1
def errorInInner():
def inner():
return y
inner()
y = 1
self.assertRaises(UnboundLocalError, errorInOuter)
self.assertRaises(NameError, errorInInner)
# test for bug #1501934: incorrect LOAD/STORE_GLOBAL generation
exec """
global_x = 1
def f():
global_x += 1
try:
f()
except UnboundLocalError:
pass
else:
fail('scope of global_x not correctly determined')
""" in {'fail': self.fail}
def testComplexDefinitions(self):
def makeReturner(*lst):
def returner():
return lst
return returner
self.assertEqual(makeReturner(1,2,3)(), (1,2,3))
def makeReturner2(**kwargs):
def returner():
return kwargs
return returner
self.assertEqual(makeReturner2(a=11)()['a'], 11)
with check_py3k_warnings(("tuple parameter unpacking has been removed",
SyntaxWarning)):
exec """\
def makeAddPair((a, b)):
def addPair((c, d)):
return (a + c, b + d)
return addPair
""" in locals()
self.assertEqual(makeAddPair((1, 2))((100, 200)), (101,202))
def testScopeOfGlobalStmt(self):
# Examples posted by Samuele Pedroni to python-dev on 3/1/2001
exec """\
# I
x = 7
def f():
x = 1
def g():
global x
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 7)
self.assertEqual(x, 7)
# II
x = 7
def f():
x = 1
def g():
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 7)
# III
x = 7
def f():
x = 1
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# IV
x = 7
def f():
x = 3
def g():
global x
x = 2
def i():
def h():
return x
return h()
return i()
return g()
self.assertEqual(f(), 2)
self.assertEqual(x, 2)
# XXX what about global statements in class blocks?
# do they affect methods?
x = 12
class Global:
global x
x = 13
def set(self, val):
x = val
def get(self):
return x
g = Global()
self.assertEqual(g.get(), 13)
g.set(15)
self.assertEqual(g.get(), 13)
"""
def testLeaks(self):
class Foo:
count = 0
def __init__(self):
Foo.count += 1
def __del__(self):
Foo.count -= 1
def f1():
x = Foo()
def f2():
return x
f2()
for i in range(100):
f1()
self.assertEqual(Foo.count, 0)
def testClassAndGlobal(self):
exec """\
def test(x):
class Foo:
global x
def __call__(self, y):
return x + y
return Foo()
x = 0
self.assertEqual(test(6)(2), 8)
x = -1
self.assertEqual(test(3)(2), 5)
looked_up_by_load_name = False
class X:
# Implicit globals inside classes are be looked up by LOAD_NAME, not
# LOAD_GLOBAL.
locals()['looked_up_by_load_name'] = True
passed = looked_up_by_load_name
self.assertTrue(X.passed)
"""
def testLocalsFunction(self):
def f(x):
def g(y):
def h(z):
return y + z
w = x + y
y += 3
return locals()
return g
d = f(2)(4)
self.assertIn('h', d)
del d['h']
self.assertEqual(d, {'x': 2, 'y': 7, 'w': 6})
def testLocalsClass(self):
# This test verifies that calling locals() does not pollute
# the local namespace of the class with free variables. Old
# versions of Python had a bug, where a free variable being
# passed through a class namespace would be inserted into
# locals() by locals() or exec or a trace function.
#
# The real bug lies in frame code that copies variables
# between fast locals and the locals dict, e.g. when executing
# a trace function.
def f(x):
class C:
x = 12
def m(self):
return x
locals()
return C
self.assertEqual(f(1).x, 12)
def f(x):
class C:
y = x
def m(self):
return x
z = list(locals())
return C
varnames = f(1).z
self.assertNotIn("x", varnames)
self.assertIn("y", varnames)
def testLocalsClass_WithTrace(self):
# Issue23728: after the trace function returns, the locals()
# dictionary is used to update all variables, this used to
# include free variables. But in class statements, free
# variables are not inserted...
import sys
sys.settrace(lambda a,b,c:None)
try:
x = 12
class C:
def f(self):
return x
self.assertEqual(x, 12) # Used to raise UnboundLocalError
finally:
sys.settrace(None)
def testBoundAndFree(self):
# var is bound and free in class
def f(x):
class C:
def m(self):
return x
a = x
return C
inst = f(3)()
self.assertEqual(inst.a, inst.m())
def testInteractionWithTraceFunc(self):
import sys
def tracer(a,b,c):
return tracer
def adaptgetter(name, klass, getter):
kind, des = getter
if kind == 1: # AV happens when stepping from this line to next
if des == "":
des = "_%s__%s" % (klass.__name__, name)
return lambda obj: getattr(obj, des)
class TestClass:
pass
sys.settrace(tracer)
adaptgetter("foo", TestClass, (1, ""))
sys.settrace(None)
self.assertRaises(TypeError, sys.settrace)
def testEvalExecFreeVars(self):
def f(x):
return lambda: x + 1
g = f(3)
self.assertRaises(TypeError, eval, g.func_code)
try:
exec g.func_code in {}
except TypeError:
pass
else:
self.fail("exec should have failed, because code contained free vars")
def testListCompLocalVars(self):
try:
print bad
except NameError:
pass
else:
print "bad should not be defined"
def x():
[bad for s in 'a b' for bad in s.split()]
x()
try:
print bad
except NameError:
pass
def testEvalFreeVars(self):
def f(x):
def g():
x
eval("x + 1")
return g
f(4)()
def testFreeingCell(self):
# Test what happens when a finalizer accesses
# the cell where the object was stored.
class Special:
def __del__(self):
nestedcell_get()
def f():
global nestedcell_get
def nestedcell_get():
return c
c = (Special(),)
c = 2
f() # used to crash the interpreter...
def testGlobalInParallelNestedFunctions(self):
# A symbol table bug leaked the global statement from one
# function to other nested functions in the same block.
# This test verifies that a global statement in the first
# function does not affect the second function.
CODE = """def f():
y = 1
def g():
global y
return y
def h():
return y + 1
return g, h
y = 9
g, h = f()
result9 = g()
result2 = h()
"""
local_ns = {}
global_ns = {}
exec CODE in local_ns, global_ns
self.assertEqual(2, global_ns["result2"])
self.assertEqual(9, global_ns["result9"])
def testTopIsNotSignificant(self):
# See #9997.
def top(a):
pass
def b():
global a
def test_main():
with check_warnings(("import \* only allowed at module level",
SyntaxWarning)):
run_unittest(ScopeTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
fevangelou/namebench | nb_third_party/dns/query.py | 215 | 15983 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Talk to a DNS server."""
from __future__ import generators
import errno
import select
import socket
import struct
import sys
import time
import dns.exception
import dns.inet
import dns.name
import dns.message
import dns.rdataclass
import dns.rdatatype
class UnexpectedSource(dns.exception.DNSException):
"""Raised if a query response comes from an unexpected address or port."""
pass
class BadResponse(dns.exception.FormError):
"""Raised if a query response does not respond to the question asked."""
pass
def _compute_expiration(timeout):
if timeout is None:
return None
else:
return time.time() + timeout
def _wait_for(ir, iw, ix, expiration):
done = False
while not done:
if expiration is None:
timeout = None
else:
timeout = expiration - time.time()
if timeout <= 0.0:
raise dns.exception.Timeout
try:
if timeout is None:
(r, w, x) = select.select(ir, iw, ix)
else:
(r, w, x) = select.select(ir, iw, ix, timeout)
except select.error, e:
if e.args[0] != errno.EINTR:
raise e
done = True
if len(r) == 0 and len(w) == 0 and len(x) == 0:
raise dns.exception.Timeout
def _wait_for_readable(s, expiration):
_wait_for([s], [], [s], expiration)
def _wait_for_writable(s, expiration):
_wait_for([], [s], [s], expiration)
def _addresses_equal(af, a1, a2):
# Convert the first value of the tuple, which is a textual format
# address into binary form, so that we are not confused by different
# textual representations of the same address
n1 = dns.inet.inet_pton(af, a1[0])
n2 = dns.inet.inet_pton(af, a2[0])
return n1 == n2 and a1[1:] == a2[1:]
def udp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
ignore_unexpected=False, one_rr_per_rrset=False):
"""Return the response obtained after sending a query via UDP.
@param q: the query
@type q: dns.message.Message
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param ignore_unexpected: If True, ignore responses from unexpected
sources. The default is False.
@type ignore_unexpected: bool
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_DGRAM, 0)
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
if source is not None:
s.bind(source)
_wait_for_writable(s, expiration)
s.sendto(wire, destination)
while 1:
_wait_for_readable(s, expiration)
(wire, from_address) = s.recvfrom(65535)
if _addresses_equal(af, from_address, destination) or \
(dns.inet.is_multicast(where) and \
from_address[1:] == destination[1:]):
break
if not ignore_unexpected:
raise UnexpectedSource('got a response from '
'%s instead of %s' % (from_address,
destination))
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
one_rr_per_rrset=one_rr_per_rrset)
if not q.is_response(r):
raise BadResponse
return r
def _net_read(sock, count, expiration):
"""Read the specified number of bytes from sock. Keep trying until we
either get the desired amount, or we hit EOF.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
s = ''
while count > 0:
_wait_for_readable(sock, expiration)
n = sock.recv(count)
if n == '':
raise EOFError
count = count - len(n)
s = s + n
return s
def _net_write(sock, data, expiration):
"""Write the specified data to the socket.
A Timeout exception will be raised if the operation is not completed
by the expiration time.
"""
current = 0
l = len(data)
while current < l:
_wait_for_writable(sock, expiration)
current += sock.send(data[current:])
def _connect(s, address):
try:
s.connect(address)
except socket.error:
(ty, v) = sys.exc_info()[:2]
if v[0] != errno.EINPROGRESS and \
v[0] != errno.EWOULDBLOCK and \
v[0] != errno.EALREADY:
raise v
def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,
one_rr_per_rrset=False):
"""Return the response obtained after sending a query via TCP.
@param q: the query
@type q: dns.message.Message object
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param timeout: The number of seconds to wait before the query times out.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@rtype: dns.message.Message object
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param one_rr_per_rrset: Put each RR into its own RRset
@type one_rr_per_rrset: bool
"""
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
s = socket.socket(af, socket.SOCK_STREAM, 0)
try:
expiration = _compute_expiration(timeout)
s.setblocking(0)
if source is not None:
s.bind(source)
_connect(s, destination)
l = len(wire)
# copying the wire into tcpmsg is inefficient, but lets us
# avoid writev() or doing a short write that would get pushed
# onto the net
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
ldata = _net_read(s, 2, expiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, expiration)
finally:
s.close()
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
one_rr_per_rrset=one_rr_per_rrset)
if not q.is_response(r):
raise BadResponse
return r
def xfr(where, zone, rdtype=dns.rdatatype.AXFR, rdclass=dns.rdataclass.IN,
timeout=None, port=53, keyring=None, keyname=None, relativize=True,
af=None, lifetime=None, source=None, source_port=0, serial=0,
use_udp=False, keyalgorithm=dns.tsig.default_algorithm):
"""Return a generator for the responses to a zone transfer.
@param where: where to send the message
@type where: string containing an IPv4 or IPv6 address
@param zone: The name of the zone to transfer
@type zone: dns.name.Name object or string
@param rdtype: The type of zone transfer. The default is
dns.rdatatype.AXFR.
@type rdtype: int or string
@param rdclass: The class of the zone transfer. The default is
dns.rdatatype.IN.
@type rdclass: int or string
@param timeout: The number of seconds to wait for each response message.
If None, the default, wait forever.
@type timeout: float
@param port: The port to which to send the message. The default is 53.
@type port: int
@param keyring: The TSIG keyring to use
@type keyring: dict
@param keyname: The name of the TSIG key to use
@type keyname: dns.name.Name object or string
@param relativize: If True, all names in the zone will be relativized to
the zone origin. It is essential that the relativize setting matches
the one specified to dns.zone.from_xfr().
@type relativize: bool
@param af: the address family to use. The default is None, which
causes the address family to use to be inferred from the form of of where.
If the inference attempt fails, AF_INET is used.
@type af: int
@param lifetime: The total number of seconds to spend doing the transfer.
If None, the default, then there is no limit on the time the transfer may
take.
@type lifetime: float
@rtype: generator of dns.message.Message objects.
@param source: source address. The default is the IPv4 wildcard address.
@type source: string
@param source_port: The port from which to send the message.
The default is 0.
@type source_port: int
@param serial: The SOA serial number to use as the base for an IXFR diff
sequence (only meaningful if rdtype == dns.rdatatype.IXFR).
@type serial: int
@param use_udp: Use UDP (only meaningful for IXFR)
@type use_udp: bool
@param keyalgorithm: The TSIG algorithm to use; defaults to
dns.tsig.default_algorithm
@type keyalgorithm: string
"""
if isinstance(zone, (str, unicode)):
zone = dns.name.from_text(zone)
if isinstance(rdtype, str):
rdtype = dns.rdatatype.from_text(rdtype)
q = dns.message.make_query(zone, rdtype, rdclass)
if rdtype == dns.rdatatype.IXFR:
rrset = dns.rrset.from_text(zone, 0, 'IN', 'SOA',
'. . %u 0 0 0 0' % serial)
q.authority.append(rrset)
if not keyring is None:
q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
wire = q.to_wire()
if af is None:
try:
af = dns.inet.af_for_address(where)
except:
af = dns.inet.AF_INET
if af == dns.inet.AF_INET:
destination = (where, port)
if source is not None:
source = (source, source_port)
elif af == dns.inet.AF_INET6:
destination = (where, port, 0, 0)
if source is not None:
source = (source, source_port, 0, 0)
if use_udp:
if rdtype != dns.rdatatype.IXFR:
raise ValueError('cannot do a UDP AXFR')
s = socket.socket(af, socket.SOCK_DGRAM, 0)
else:
s = socket.socket(af, socket.SOCK_STREAM, 0)
s.setblocking(0)
if source is not None:
s.bind(source)
expiration = _compute_expiration(lifetime)
_connect(s, destination)
l = len(wire)
if use_udp:
_wait_for_writable(s, expiration)
s.send(wire)
else:
tcpmsg = struct.pack("!H", l) + wire
_net_write(s, tcpmsg, expiration)
done = False
soa_rrset = None
soa_count = 0
if relativize:
origin = zone
oname = dns.name.empty
else:
origin = None
oname = zone
tsig_ctx = None
first = True
while not done:
mexpiration = _compute_expiration(timeout)
if mexpiration is None or mexpiration > expiration:
mexpiration = expiration
if use_udp:
_wait_for_readable(s, expiration)
(wire, from_address) = s.recvfrom(65535)
else:
ldata = _net_read(s, 2, mexpiration)
(l,) = struct.unpack("!H", ldata)
wire = _net_read(s, l, mexpiration)
r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
xfr=True, origin=origin, tsig_ctx=tsig_ctx,
multi=True, first=first,
one_rr_per_rrset=(rdtype==dns.rdatatype.IXFR))
tsig_ctx = r.tsig_ctx
first = False
answer_index = 0
delete_mode = False
expecting_SOA = False
if soa_rrset is None:
if not r.answer or r.answer[0].name != oname:
raise dns.exception.FormError
rrset = r.answer[0]
if rrset.rdtype != dns.rdatatype.SOA:
raise dns.exception.FormError("first RRset is not an SOA")
answer_index = 1
soa_rrset = rrset.copy()
if rdtype == dns.rdatatype.IXFR:
if soa_rrset[0].serial == serial:
#
# We're already up-to-date.
#
done = True
else:
expecting_SOA = True
#
# Process SOAs in the answer section (other than the initial
# SOA in the first message).
#
for rrset in r.answer[answer_index:]:
if done:
raise dns.exception.FormError("answers after final SOA")
if rrset.rdtype == dns.rdatatype.SOA and rrset.name == oname:
if expecting_SOA:
if rrset[0].serial != serial:
raise dns.exception.FormError("IXFR base serial mismatch")
expecting_SOA = False
elif rdtype == dns.rdatatype.IXFR:
delete_mode = not delete_mode
if rrset == soa_rrset and not delete_mode:
done = True
elif expecting_SOA:
#
# We made an IXFR request and are expecting another
# SOA RR, but saw something else, so this must be an
# AXFR response.
#
rdtype = dns.rdatatype.AXFR
expecting_SOA = False
if done and q.keyring and not r.had_tsig:
raise dns.exception.FormError("missing TSIG")
yield r
s.close()
| apache-2.0 |
mne-tools/mne-python | mne/viz/circle.py | 14 | 15879 | """Functions to plot on circle as for connectivity."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
from functools import partial
import numpy as np
from .utils import plt_show
def circular_layout(node_names, node_order, start_pos=90, start_between=True,
group_boundaries=None, group_sep=10):
"""Create layout arranging nodes on a circle.
Parameters
----------
node_names : list of str
Node names.
node_order : list of str
List with node names defining the order in which the nodes are
arranged. Must have the elements as node_names but the order can be
different. The nodes are arranged clockwise starting at "start_pos"
degrees.
start_pos : float
Angle in degrees that defines where the first node is plotted.
start_between : bool
If True, the layout starts with the position between the nodes. This is
the same as adding "180. / len(node_names)" to start_pos.
group_boundaries : None | array-like
List of of boundaries between groups at which point a "group_sep" will
be inserted. E.g. "[0, len(node_names) / 2]" will create two groups.
group_sep : float
Group separation angle in degrees. See "group_boundaries".
Returns
-------
node_angles : array, shape=(n_node_names,)
Node angles in degrees.
"""
n_nodes = len(node_names)
if len(node_order) != n_nodes:
raise ValueError('node_order has to be the same length as node_names')
if group_boundaries is not None:
boundaries = np.array(group_boundaries, dtype=np.int64)
if np.any(boundaries >= n_nodes) or np.any(boundaries < 0):
raise ValueError('"group_boundaries" has to be between 0 and '
'n_nodes - 1.')
if len(boundaries) > 1 and np.any(np.diff(boundaries) <= 0):
raise ValueError('"group_boundaries" must have non-decreasing '
'values.')
n_group_sep = len(group_boundaries)
else:
n_group_sep = 0
boundaries = None
# convert it to a list with indices
node_order = [node_order.index(name) for name in node_names]
node_order = np.array(node_order)
if len(np.unique(node_order)) != n_nodes:
raise ValueError('node_order has repeated entries')
node_sep = (360. - n_group_sep * group_sep) / n_nodes
if start_between:
start_pos += node_sep / 2
if boundaries is not None and boundaries[0] == 0:
# special case when a group separator is at the start
start_pos += group_sep / 2
boundaries = boundaries[1:] if n_group_sep > 1 else None
node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep
node_angles[0] = start_pos
if boundaries is not None:
node_angles[boundaries] += group_sep
node_angles = np.cumsum(node_angles)[node_order]
return node_angles
def _plot_connectivity_circle_onpick(event, fig=None, axes=None, indices=None,
n_nodes=0, node_angles=None,
ylim=[9, 10]):
"""Isolate connections around a single node when user left clicks a node.
On right click, resets all connections.
"""
if event.inaxes != axes:
return
if event.button == 1: # left click
# click must be near node radius
if not ylim[0] <= event.ydata <= ylim[1]:
return
# all angles in range [0, 2*pi]
node_angles = node_angles % (np.pi * 2)
node = np.argmin(np.abs(event.xdata - node_angles))
patches = event.inaxes.patches
for ii, (x, y) in enumerate(zip(indices[0], indices[1])):
patches[ii].set_visible(node in [x, y])
fig.canvas.draw()
elif event.button == 3: # right click
patches = event.inaxes.patches
for ii in range(np.size(indices, axis=1)):
patches[ii].set_visible(True)
fig.canvas.draw()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of array | None
Two arrays with indices of connections for which the connections
strengths are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape (n_node_names,) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuple | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str | instance of matplotlib.colors.LinearSegmentedColormap
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : tuple, shape (2,)
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.figure.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | tuple, shape (3,)
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure handle.
axes : instance of matplotlib.projections.polar.PolarAxes
The subplot handle.
Notes
-----
This code is based on a circle graph example by Nicolas P. Rougier
By default, :func:`matplotlib.pyplot.savefig` does not take ``facecolor``
into account when saving, even if set when a figure is generated. This
can be addressed via, e.g.::
>>> fig.savefig(fname_fig, facecolor='black') # doctest:+SKIP
If ``facecolor`` is not set via :func:`matplotlib.pyplot.savefig`, the
figure labels, title, and legend may be cut off in the output figure.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
try:
spectral = plt.cm.spectral
except AttributeError:
spectral = plt.cm.Spectral
node_colors = [spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True)
axes.set_facecolor(facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int64)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
| bsd-3-clause |
seaotterman/tensorflow | tensorflow/contrib/factorization/python/ops/clustering_ops.py | 22 | 25599 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Clustering Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.factorization.python.ops import gen_clustering_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.factorization.python.ops.gen_clustering_ops import *
# pylint: enable=wildcard-import
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.platform import resource_loader
_clustering_ops = loader.load_op_library(
resource_loader.get_path_to_datafile('_clustering_ops.so'))
# Euclidean distance between vectors U and V is defined as ||U - V||_F which is
# the square root of the sum of the absolute squares of the elements difference.
SQUARED_EUCLIDEAN_DISTANCE = 'squared_euclidean'
# Cosine distance between vectors U and V is defined as
# 1 - (U \dot V) / (||U||_F ||V||_F)
COSINE_DISTANCE = 'cosine'
RANDOM_INIT = 'random'
KMEANS_PLUS_PLUS_INIT = 'kmeans_plus_plus'
class KMeans(object):
"""Creates the graph for k-means clustering."""
def __init__(self,
inputs,
num_clusters,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=False,
mini_batch_steps_per_iteration=1,
random_seed=0,
kmeans_plus_plus_num_retries=2):
"""Creates an object for generating KMeans clustering graph.
This class implements the following variants of K-means algorithm:
If use_mini_batch is False, it runs standard full batch K-means. Each step
runs a single iteration of K-Means. This step can be run sharded across
multiple workers by passing a list of sharded inputs to this class. Note
however that a single step needs to process the full input at once.
If use_mini_batch is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of mini_batch_steps_per_iteration steps. Two copies of cluster
centers are maintained: one that is updated at the end of each iteration,
and one that is updated every step. The first copy is used to compute
cluster allocations for each step, and for inference, while the second copy
is the one updated each step using the mini-batch update rule. After each
iteration is complete, this second copy is copied back the first copy.
Note that for use_mini_batch=True, when mini_batch_steps_per_iteration=1,
the algorithm reduces to the standard mini-batch algorithm. Also by setting
mini_batch_steps_per_iteration = num_inputs / batch_size, the algorithm
becomes an asynchronous version of the full-batch algorithm. Note however
that there is no guarantee by this implementation that each input is seen
exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not behave
exactly like a full-batch version.
Args:
inputs: An input tensor or list of input tensors
num_clusters: number of clusters.
initial_clusters: Specifies the clusters used during initialization. Can
be a tensor or numpy array, or a function that generates the clusters.
Can also be "random" to specify that clusters should be chosen randomly
from input data.
distance_metric: distance metric used for clustering.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy.
random_seed: Seed for PRNG used to initialize seeds.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
"""
self._inputs = inputs if isinstance(inputs, list) else [inputs]
assert num_clusters > 0, num_clusters
self._num_clusters = num_clusters
if initial_clusters is None:
initial_clusters = RANDOM_INIT
self._initial_clusters = initial_clusters
assert distance_metric in [SQUARED_EUCLIDEAN_DISTANCE, COSINE_DISTANCE]
self._distance_metric = distance_metric
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = int(mini_batch_steps_per_iteration)
self._random_seed = random_seed
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
@classmethod
def _distance_graph(cls, inputs, clusters, distance_metric):
"""Computes distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
distance_metric: distance metric used for clustering
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
Currently only Euclidean distance and cosine distance are supported.
"""
assert isinstance(inputs, list)
if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:
return cls._compute_euclidean_distance(inputs, clusters)
elif distance_metric == COSINE_DISTANCE:
return cls._compute_cosine_distance(
inputs, clusters, inputs_normalized=True)
else:
assert False, ('Unsupported distance metric passed to Kmeans %s' %
str(distance_metric))
@classmethod
def _compute_euclidean_distance(cls, inputs, clusters):
"""Computes Euclidean distance between each input and each cluster center.
Args:
inputs: list of input Tensors.
clusters: cluster Tensor.
Returns:
list of Tensors, where each element corresponds to each element in inputs.
The value is the distance of each row to all the cluster centers.
"""
output = []
for inp in inputs:
with ops.colocate_with(inp):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (math_ops.reduce_sum(
math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
inp, clusters, transpose_b=True) + array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
output.append(squared_distance)
return output
@classmethod
def _compute_cosine_distance(cls, inputs, clusters, inputs_normalized=True):
"""Computes cosine distance between each input and each cluster center.
Args:
inputs: list of input Tensor.
clusters: cluster Tensor
inputs_normalized: if True, it assumes that inp and clusters are
normalized and computes the dot product which is equivalent to the cosine
distance. Else it L2 normalizes the inputs first.
Returns:
list of Tensors, where each element corresponds to each element in inp.
The value is the distance of each row to all the cluster centers.
"""
output = []
if not inputs_normalized:
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp in inputs:
with ops.colocate_with(inp):
if not inputs_normalized:
inp = nn_impl.l2_normalize(inp, dim=1)
output.append(1 - math_ops.matmul(inp, clusters, transpose_b=True))
return output
def _infer_graph(self, inputs, clusters):
"""Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
"""
assert isinstance(inputs, list)
# Pairwise distances are used only by transform(). In all other cases, this
# sub-graph is not evaluated.
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if (self._distance_metric == COSINE_DISTANCE and
not self._clusters_l2_normalized()):
# The cosine distance between normalized vectors x and y is the same as
# 2 * squared_euclidian_distance. We are using this fact and reusing the
# nearest_neighbors op.
# TODO(ands): Support COSINE distance in nearest_neighbors and remove
# this.
with ops.colocate_with(clusters):
clusters = nn_impl.l2_normalize(clusters, dim=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp):
(indices,
distances) = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append(
(score, array_ops.squeeze(distances), array_ops.squeeze(indices)))
return zip(*output)
def _init_clusters_random(self):
"""Does random initialization of clusters.
Returns:
Tensor of randomly initialized clusters.
"""
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
# Note that for mini-batch k-means, we should ensure that the batch size of
# data used during initialization is sufficiently large to avoid duplicated
# clusters.
with ops.control_dependencies(
[check_ops.assert_less_equal(self._num_clusters, num_data)]):
indices = random_ops.random_uniform(
array_ops.reshape(self._num_clusters, [-1]),
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=self._random_seed,
dtype=dtypes.int64)
clusters_init = embedding_lookup(
self._inputs, indices, partition_strategy='div')
return clusters_init
def _clusters_l2_normalized(self):
"""Returns True if clusters centers are kept normalized."""
return (self._distance_metric == COSINE_DISTANCE and
(not self._use_mini_batch or
self._mini_batch_steps_per_iteration > 1))
def _initialize_clusters(self,
cluster_centers,
cluster_centers_initialized,
cluster_centers_updated):
"""Returns an op to initialize the cluster centers."""
init = self._initial_clusters
if init == RANDOM_INIT:
clusters_init = self._init_clusters_random()
elif init == KMEANS_PLUS_PLUS_INIT:
# Points from only the first shard are used for initializing centers.
# TODO(ands): Use all points.
inp = self._inputs[0]
if self._distance_metric == COSINE_DISTANCE:
inp = nn_impl.l2_normalize(inp, dim=1)
clusters_init = gen_clustering_ops.kmeans_plus_plus_initialization(
inp, self._num_clusters, self._random_seed,
self._kmeans_plus_plus_num_retries)
elif callable(init):
clusters_init = init(self._inputs, self._num_clusters)
elif not isinstance(init, str):
clusters_init = init
else:
assert False, 'Unsupported init passed to Kmeans %s' % str(init)
if self._distance_metric == COSINE_DISTANCE and clusters_init is not None:
clusters_init = nn_impl.l2_normalize(clusters_init, dim=1)
with ops.colocate_with(cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[clusters_init],
array_ops.identity(cluster_centers_initialized))
with ops.colocate_with(cluster_centers):
assign_centers = state_ops.assign(cluster_centers, clusters_init,
validate_shape=False)
if cluster_centers_updated != cluster_centers:
assign_centers = control_flow_ops.group(
assign_centers,
state_ops.assign(cluster_centers_updated, clusters_init,
validate_shape=False))
assign_centers = control_flow_ops.with_dependencies(
[assign_centers],
state_ops.assign(cluster_centers_initialized, True))
return control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: assign_centers).op
def _create_variables(self):
"""Creates variables.
Returns:
Tuple with following elements:
cluster_centers: a Tensor for storing cluster centers
cluster_centers_initialized: bool Variable indicating whether clusters
are initialized.
cluster_counts: a Tensor for storing counts of points assigned to this
cluster. This is used by mini-batch training.
cluster_centers_updated: Tensor representing copy of cluster centers that
are updated every step.
update_in_steps: numbers of steps left before we sync
cluster_centers_updated back to cluster_centers.
"""
init_value = array_ops.constant([], dtype=dtypes.float32)
cluster_centers = variable_scope.variable(init_value,
name='clusters',
validate_shape=False)
cluster_centers_initialized = variable_scope.variable(False,
dtype=dtypes.bool,
name='initialized')
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
# Copy of cluster centers actively updated each step according to
# mini-batch update rule.
cluster_centers_updated = variable_scope.variable(init_value,
name='clusters_updated',
validate_shape=False)
# How many steps till we copy the updated clusters to cluster_centers.
update_in_steps = variable_scope.variable(
self._mini_batch_steps_per_iteration,
dtype=dtypes.int64,
name='update_in_steps')
# Count of points assigned to cluster_centers_updated.
cluster_counts = variable_scope.variable(
array_ops.zeros([self._num_clusters],
dtype=dtypes.int64))
else:
cluster_centers_updated = cluster_centers
update_in_steps = None
cluster_counts = (variable_scope.variable(array_ops.ones(
[self._num_clusters],
dtype=dtypes.int64))
if self._use_mini_batch else None)
return (cluster_centers,
cluster_centers_initialized,
cluster_counts,
cluster_centers_updated,
update_in_steps)
@classmethod
def _l2_normalize_data(cls, inputs):
"""Normalized the input data."""
output = []
for inp in inputs:
with ops.colocate_with(inp):
output.append(nn_impl.l2_normalize(inp, dim=1))
return output
def training_graph(self):
"""Generate a training graph for kmeans algorithm.
Returns:
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
cluster_idx: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to cluster_idx but specifies the distance to the
assigned cluster instead.
cluster_centers_initialized: scalar indicating whether clusters have been
initialized.
init_op: an op to initialize the clusters.
training_op: an op that runs an iteration of training.
"""
# Implementation of kmeans.
inputs = self._inputs
(cluster_centers_var,
cluster_centers_initialized,
total_counts,
cluster_centers_updated,
update_in_steps) = self._create_variables()
init_op = self._initialize_clusters(cluster_centers_var,
cluster_centers_initialized,
cluster_centers_updated)
cluster_centers = cluster_centers_var
if self._distance_metric == COSINE_DISTANCE:
inputs = self._l2_normalize_data(inputs)
if not self._clusters_l2_normalized():
cluster_centers = nn_impl.l2_normalize(cluster_centers, dim=1)
all_scores, scores, cluster_idx = self._infer_graph(inputs, cluster_centers)
if self._use_mini_batch:
sync_updates_op = self._mini_batch_sync_updates_op(
update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts)
assert sync_updates_op is not None
with ops.control_dependencies([sync_updates_op]):
training_op = self._mini_batch_training_op(
inputs, cluster_idx, cluster_centers_updated, total_counts)
else:
assert cluster_centers == cluster_centers_var
training_op = self._full_batch_training_op(inputs, cluster_idx,
cluster_centers_var)
return (all_scores, cluster_idx, scores,
cluster_centers_initialized, init_op, training_op)
def _mini_batch_sync_updates_op(self, update_in_steps,
cluster_centers_var, cluster_centers_updated,
total_counts):
if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:
assert update_in_steps is not None
with ops.colocate_with(update_in_steps):
def _f():
# Note that there is a race condition here, so we do a best effort
# updates here. We reset update_in_steps first so that other workers
# don't duplicate the updates. Also we update cluster_center_vars
# before resetting total_counts to avoid large updates to
# cluster_centers_updated based on partially updated
# cluster_center_vars.
with ops.control_dependencies([state_ops.assign(
update_in_steps,
self._mini_batch_steps_per_iteration - 1)]):
with ops.colocate_with(cluster_centers_updated):
if self._distance_metric == COSINE_DISTANCE:
cluster_centers = nn_impl.l2_normalize(cluster_centers_updated,
dim=1)
else:
cluster_centers = cluster_centers_updated
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([state_ops.assign(
cluster_centers_var,
cluster_centers)]):
with ops.colocate_with(cluster_centers_var):
with ops.control_dependencies([
state_ops.assign(total_counts,
array_ops.zeros_like(total_counts))]):
return array_ops.identity(update_in_steps)
return control_flow_ops.cond(
update_in_steps <= 0,
_f,
lambda: state_ops.assign_sub(update_in_steps, 1))
else:
return control_flow_ops.no_op()
def _mini_batch_training_op(self, inputs, cluster_idx_list,
cluster_centers, total_counts):
"""Creates an op for training for mini batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
total_counts: Tensor Ref of cluster counts.
Returns:
An op for doing an update of mini-batch k-means.
"""
update_ops = []
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
assert total_counts is not None
cluster_idx = array_ops.reshape(cluster_idx, [-1])
# Dedupe the unique ids of cluster_centers being updated so that updates
# can be locally aggregated.
unique_ids, unique_idx = array_ops.unique(cluster_idx)
num_unique_cluster_idx = array_ops.size(unique_ids)
# Fetch the old values of counts and cluster_centers.
with ops.colocate_with(total_counts):
old_counts = array_ops.gather(total_counts, unique_ids)
# TODO(agarwal): This colocation seems to run into problems. Fix it.
# with ops.colocate_with(cluster_centers):
old_cluster_centers = array_ops.gather(cluster_centers, unique_ids)
# Locally aggregate the increment to counts.
count_updates = math_ops.unsorted_segment_sum(
array_ops.ones_like(
unique_idx, dtype=total_counts.dtype),
unique_idx,
num_unique_cluster_idx)
# Locally compute the sum of inputs mapped to each id.
# For a cluster with old cluster value x, old count n, and with data
# d_1,...d_k newly assigned to it, we recompute the new value as
# x += (sum_i(d_i) - k * x) / (n + k).
# Compute sum_i(d_i), see comment above.
cluster_center_updates = math_ops.unsorted_segment_sum(
inp, unique_idx, num_unique_cluster_idx)
# Shape to enable broadcasting count_updates and learning_rate to inp.
# It extends the shape with 1's to match the rank of inp.
broadcast_shape = array_ops.concat(
[
array_ops.reshape(num_unique_cluster_idx, [1]), array_ops.ones(
array_ops.reshape(array_ops.rank(inp) - 1, [1]),
dtype=dtypes.int32)
],
0)
# Subtract k * x, see comment above.
cluster_center_updates -= math_ops.cast(
array_ops.reshape(count_updates, broadcast_shape),
inp.dtype) * old_cluster_centers
learning_rate = math_ops.reciprocal(
math_ops.cast(old_counts + count_updates, inp.dtype))
learning_rate = array_ops.reshape(learning_rate, broadcast_shape)
# scale by 1 / (n + k), see comment above.
cluster_center_updates *= learning_rate
# Apply the updates.
update_counts = state_ops.scatter_add(
total_counts,
unique_ids,
count_updates)
update_cluster_centers = state_ops.scatter_add(
cluster_centers,
unique_ids,
cluster_center_updates)
update_ops.extend([update_counts, update_cluster_centers])
return control_flow_ops.group(*update_ops)
def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
"""Creates an op for training for full batch case.
Args:
inputs: list of input Tensors.
cluster_idx_list: A vector (or list of vectors). Each element in the
vector corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
cluster_centers: Tensor Ref of cluster centers.
Returns:
An op for doing an update of mini-batch k-means.
"""
cluster_sums = []
cluster_counts = []
epsilon = constant_op.constant(1e-6, dtype=inputs[0].dtype)
for inp, cluster_idx in zip(inputs, cluster_idx_list):
with ops.colocate_with(inp):
cluster_sums.append(
math_ops.unsorted_segment_sum(inp, cluster_idx, self._num_clusters))
cluster_counts.append(
math_ops.unsorted_segment_sum(
array_ops.reshape(
array_ops.ones(
array_ops.reshape(array_ops.shape(inp)[0], [-1])),
[-1, 1]), cluster_idx, self._num_clusters))
with ops.colocate_with(cluster_centers):
new_clusters_centers = math_ops.add_n(cluster_sums) / (math_ops.cast(
math_ops.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
if self._clusters_l2_normalized():
new_clusters_centers = nn_impl.l2_normalize(new_clusters_centers, dim=1)
return state_ops.assign(cluster_centers, new_clusters_centers)
| apache-2.0 |
xzturn/tensorflow | tensorflow/python/data/kernel_tests/optional_test.py | 4 | 19765 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Optional`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _optional_spec_test_combinations():
# pylint: disable=g-long-lambda
cases = [
("Dense", lambda: constant_op.constant(37.0),
tensor_spec.TensorSpec([], dtypes.float32)),
("Sparse", lambda: sparse_tensor.SparseTensor(
indices=[[0, 1]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[10, 10]),
sparse_tensor.SparseTensorSpec([10, 10], dtypes.int32)),
("Nest", lambda: {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
}),
("Optional", lambda: optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32))),
]
def reduce_fn(x, y):
name, value_fn, expected_structure = y
return x + combinations.combine(
tf_value_fn=combinations.NamedObject(name, value_fn),
expected_value_structure=expected_structure)
return functools.reduce(reduce_fn, cases, [])
def _get_next_as_optional_test_combinations():
# pylint: disable=g-long-lambda
cases = [
("Dense", np.array([1, 2, 3], dtype=np.int32),
lambda: constant_op.constant([4, 5, 6], dtype=dtypes.int32), True),
("Sparse",
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2]),
lambda: sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2]),
False),
("Nest", {
"a":
np.array([1, 2, 3], dtype=np.int32),
"b":
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]],
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=[2, 2])
}, lambda: {
"a":
constant_op.constant([4, 5, 6], dtype=dtypes.int32),
"b":
sparse_tensor.SparseTensor(
indices=[[0, 1], [1, 0]],
values=[37.0, 42.0],
dense_shape=[2, 2])
}, False),
]
def reduce_fn(x, y):
name, value, value_fn, gpu_compatible = y
return x + combinations.combine(
np_value=value, tf_value_fn=combinations.NamedObject(name, value_fn),
gpu_compatible=gpu_compatible)
return functools.reduce(reduce_fn, cases, [])
class OptionalTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testFromValue(self):
opt = optional_ops.Optional.from_value(constant_op.constant(37.0))
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual(37.0, self.evaluate(opt.get_value()))
@combinations.generate(test_base.default_test_combinations())
def testFromStructuredValue(self):
opt = optional_ops.Optional.from_value({
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
})
self.assertTrue(self.evaluate(opt.has_value()))
self.assertEqual({
"a": 37.0,
"b": ([b"Foo"], b"Bar")
}, self.evaluate(opt.get_value()))
@combinations.generate(test_base.default_test_combinations())
def testFromSparseTensor(self):
st_0 = sparse_tensor.SparseTensorValue(
indices=np.array([[0]]),
values=np.array([0], dtype=np.int64),
dense_shape=np.array([1]))
st_1 = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 1]]),
values=np.array([-1., 1.], dtype=np.float32),
dense_shape=np.array([2, 2]))
opt = optional_ops.Optional.from_value((st_0, st_1))
self.assertTrue(self.evaluate(opt.has_value()))
val_0, val_1 = opt.get_value()
for expected, actual in [(st_0, val_0), (st_1, val_1)]:
self.assertAllEqual(expected.indices, self.evaluate(actual.indices))
self.assertAllEqual(expected.values, self.evaluate(actual.values))
self.assertAllEqual(expected.dense_shape,
self.evaluate(actual.dense_shape))
@combinations.generate(test_base.default_test_combinations())
def testFromNone(self):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops.Optional.none_from_structure(value_structure)
self.assertTrue(opt.value_structure.is_compatible_with(value_structure))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([1], dtypes.float32)))
self.assertFalse(
opt.value_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.int32)))
self.assertFalse(self.evaluate(opt.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(opt.get_value())
@combinations.generate(test_base.default_test_combinations())
def testAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt1 = optional_ops.Optional.from_value((1.0, 2.0))
opt2 = optional_ops.Optional.from_value((3.0, 4.0))
add_tensor = math_ops.add_n([opt1._variant_tensor,
opt2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt1.value_structure)
self.assertAllEqual(self.evaluate(add_opt.get_value()), (4.0, 6.0))
# Without value
opt_none1 = optional_ops.Optional.none_from_structure(
opt1.value_structure)
opt_none2 = optional_ops.Optional.none_from_structure(
opt2.value_structure)
add_tensor = math_ops.add_n([opt_none1._variant_tensor,
opt_none2._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor,
opt_none1.value_structure)
self.assertFalse(self.evaluate(add_opt.has_value()))
@combinations.generate(test_base.default_test_combinations())
def testNestedAddN(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value([1, 2.0])
opt2 = optional_ops.Optional.from_value([3, 4.0])
opt3 = optional_ops.Optional.from_value((5.0, opt1._variant_tensor))
opt4 = optional_ops.Optional.from_value((6.0, opt2._variant_tensor))
add_tensor = math_ops.add_n([opt3._variant_tensor,
opt4._variant_tensor])
add_opt = optional_ops._OptionalImpl(add_tensor, opt3.value_structure)
self.assertEqual(self.evaluate(add_opt.get_value()[0]), 11.0)
inner_add_opt = optional_ops._OptionalImpl(add_opt.get_value()[1],
opt1.value_structure)
self.assertAllEqual(inner_add_opt.get_value(), [4, 6.0])
@combinations.generate(test_base.default_test_combinations())
def testZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
# With value
opt = optional_ops.Optional.from_value((1.0, 2.0))
zeros_tensor = array_ops.zeros_like(opt._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt.value_structure)
self.assertAllEqual(self.evaluate(zeros_opt.get_value()),
(0.0, 0.0))
# Without value
opt_none = optional_ops.Optional.none_from_structure(
opt.value_structure)
zeros_tensor = array_ops.zeros_like(opt_none._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt_none.value_structure)
self.assertFalse(self.evaluate(zeros_opt.has_value()))
@combinations.generate(test_base.default_test_combinations())
def testNestedZerosLike(self):
devices = ["/cpu:0"]
if test_util.is_gpu_available():
devices.append("/gpu:0")
for device in devices:
with ops.device(device):
opt1 = optional_ops.Optional.from_value(1.0)
opt2 = optional_ops.Optional.from_value(opt1._variant_tensor)
zeros_tensor = array_ops.zeros_like(opt2._variant_tensor)
zeros_opt = optional_ops._OptionalImpl(zeros_tensor,
opt2.value_structure)
inner_zeros_opt = optional_ops._OptionalImpl(zeros_opt.get_value(),
opt1.value_structure)
self.assertEqual(self.evaluate(inner_zeros_opt.get_value()), 0.0)
@combinations.generate(test_base.default_test_combinations())
def testCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
with ops.device("/gpu:0"):
gpu_optional_with_value = optional_ops._OptionalImpl(
array_ops.identity(optional_with_value._variant_tensor),
optional_with_value.value_structure)
gpu_optional_none = optional_ops._OptionalImpl(
array_ops.identity(optional_none._variant_tensor),
optional_none.value_structure)
gpu_optional_with_value_has_value = gpu_optional_with_value.has_value()
gpu_optional_with_value_values = gpu_optional_with_value.get_value()
gpu_optional_none_has_value = gpu_optional_none.has_value()
self.assertTrue(self.evaluate(gpu_optional_with_value_has_value))
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(gpu_optional_with_value_values))
self.assertFalse(self.evaluate(gpu_optional_none_has_value))
@combinations.generate(test_base.default_test_combinations())
def testNestedCopyToGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/cpu:0"):
optional_with_value = optional_ops.Optional.from_value(
(constant_op.constant(37.0), constant_op.constant("Foo"),
constant_op.constant(42)))
optional_none = optional_ops.Optional.none_from_structure(
tensor_spec.TensorSpec([], dtypes.float32))
nested_optional = optional_ops.Optional.from_value(
(optional_with_value._variant_tensor, optional_none._variant_tensor,
1.0))
with ops.device("/gpu:0"):
gpu_nested_optional = optional_ops._OptionalImpl(
array_ops.identity(nested_optional._variant_tensor),
nested_optional.value_structure)
gpu_nested_optional_has_value = gpu_nested_optional.has_value()
gpu_nested_optional_values = gpu_nested_optional.get_value()
self.assertTrue(self.evaluate(gpu_nested_optional_has_value))
inner_with_value = optional_ops._OptionalImpl(
gpu_nested_optional_values[0], optional_with_value.value_structure)
inner_none = optional_ops._OptionalImpl(
gpu_nested_optional_values[1], optional_none.value_structure)
self.assertEqual((37.0, b"Foo", 42),
self.evaluate(inner_with_value.get_value()))
self.assertFalse(self.evaluate(inner_none.has_value()))
self.assertEqual(1.0, self.evaluate(gpu_nested_optional_values[2]))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
_optional_spec_test_combinations()))
def testOptionalSpec(self, tf_value_fn, expected_value_structure):
tf_value = tf_value_fn()
opt = optional_ops.Optional.from_value(tf_value)
self.assertTrue(
structure.are_compatible(opt.value_structure, expected_value_structure))
opt_structure = structure.type_spec_from_value(opt)
self.assertIsInstance(opt_structure, optional_ops.OptionalSpec)
self.assertTrue(structure.are_compatible(opt_structure, opt_structure))
self.assertTrue(
structure.are_compatible(opt_structure._value_structure,
expected_value_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(opt_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(opt_structure))
# All OptionalSpec objects are not compatible with a non-optional
# value.
non_optional_structure = structure.type_spec_from_value(
constant_op.constant(42.0))
self.assertFalse(opt_structure.is_compatible_with(non_optional_structure))
# Assert that the optional survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_opt = opt_structure._from_tensor_list(
opt_structure._to_tensor_list(opt))
if isinstance(tf_value, optional_ops.Optional):
self.assertValuesEqual(
self.evaluate(tf_value.get_value()),
self.evaluate(round_trip_opt.get_value().get_value()))
else:
self.assertValuesEqual(
self.evaluate(tf_value),
self.evaluate(round_trip_opt.get_value()))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
_get_next_as_optional_test_combinations()))
def testIteratorGetNextAsOptional(self, np_value, tf_value_fn,
gpu_compatible):
if not gpu_compatible and test.is_gpu_available():
self.skipTest("Test case not yet supported on GPU.")
ds = dataset_ops.Dataset.from_tensors(np_value).repeat(3)
if context.executing_eagerly():
iterator = dataset_ops.make_one_shot_iterator(ds)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
self.assertTrue(next_elem.has_value())
self.assertValuesEqual(np_value, next_elem.get_value())
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertFalse(self.evaluate(next_elem.has_value()))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_elem.get_value())
else:
iterator = dataset_ops.make_initializable_iterator(ds)
next_elem = iterator_ops.get_next_as_optional(iterator)
self.assertIsInstance(next_elem, optional_ops.Optional)
self.assertTrue(structure.are_compatible(
next_elem.value_structure,
structure.type_spec_from_value(tf_value_fn())))
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError. This is only relevant in graph mode.
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# Now we initialize the iterator.
self.evaluate(iterator.initializer)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
for _ in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertValuesEqual(np_value, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
@combinations.generate(test_base.default_test_combinations())
def testFunctionBoundaries(self):
@def_function.function
def get_optional():
x = constant_op.constant(1.0)
opt = optional_ops.Optional.from_value(x)
# TODO(skyewm): support returning Optionals from functions?
return opt._variant_tensor
# TODO(skyewm): support Optional arguments?
@def_function.function
def consume_optional(opt_tensor):
value_structure = tensor_spec.TensorSpec([], dtypes.float32)
opt = optional_ops._OptionalImpl(opt_tensor, value_structure)
return opt.get_value()
opt_tensor = get_optional()
val = consume_optional(opt_tensor)
self.assertEqual(self.evaluate(val), 1.0)
@combinations.generate(test_base.default_test_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(opt):
trace_count[0] += 1
return opt.get_value()
opt1 = optional_ops.Optional.from_value(constant_op.constant(37.0))
opt2 = optional_ops.Optional.from_value(constant_op.constant(42.0))
for _ in range(10):
self.assertEqual(self.evaluate(f(opt1)), 37.0)
self.assertEqual(self.evaluate(f(opt2)), 42.0)
self.assertEqual(trace_count[0], 1)
if __name__ == "__main__":
test.main()
| apache-2.0 |
40123248/w16b_test | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/constants.py | 603 | 15297 | #!/usr/bin/env python
'''Constants defined by SDL, and needed in pygame.
Note that many of the flags for SDL are not needed in pygame, and are not
included here. These constants are generally accessed from the
`pygame.locals` module. This module is automatically placed in the pygame
namespace, but you will usually want to place them directly into your module's
namespace with the following command::
from pygame.locals import *
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
#import SDL.constants
# SDL constants taken from https://wiki.libsdl.org/SDLKeycodeLookup
'''
YV12_OVERLAY = SDL.constants.SDL_YV12_OVERLAY
IYUV_OVERLAY = SDL.constants.SDL_IYUV_OVERLAY
YUY2_OVERLAY = SDL.constants.SDL_YUY2_OVERLAY
UYVY_OVERLAY = SDL.constants.SDL_UYVY_OVERLAY
YVYU_OVERLAY = SDL.constants.SDL_YVYU_OVERLAY
SWSURFACE = SDL.constants.SDL_SWSURFACE
HWSURFACE = SDL.constants.SDL_HWSURFACE
RESIZABLE = SDL.constants.SDL_RESIZABLE
ASYNCBLIT = SDL.constants.SDL_ASYNCBLIT
OPENGL = SDL.constants.SDL_OPENGL
OPENGLBLIT = SDL.constants.SDL_OPENGLBLIT
ANYFORMAT = SDL.constants.SDL_ANYFORMAT
HWPALETTE = SDL.constants.SDL_HWPALETTE
DOUBLEBUF = SDL.constants.SDL_DOUBLEBUF
#FULLSCREEN = SDL.constants.SDL_FULLSCREEN
'''
FULLSCREEN = 0
'''
HWACCEL = SDL.constants.SDL_HWACCEL
SRCCOLORKEY = SDL.constants.SDL_SRCCOLORKEY
'''
RLEACCELOK = 254
RLEACCEL = 255
'''
SRCALPHA = SDL.constants.SDL_SRCALPHA
PREALLOC = SDL.constants.SDL_PREALLOC
NOFRAME = SDL.constants.SDL_NOFRAME
GL_RED_SIZE = SDL.constants.SDL_GL_RED_SIZE
GL_GREEN_SIZE = SDL.constants.SDL_GL_GREEN_SIZE
GL_BLUE_SIZE = SDL.constants.SDL_GL_BLUE_SIZE
GL_ALPHA_SIZE = SDL.constants.SDL_GL_ALPHA_SIZE
GL_BUFFER_SIZE = SDL.constants.SDL_GL_BUFFER_SIZE
GL_DOUBLEBUFFER = SDL.constants.SDL_GL_DOUBLEBUFFER
GL_DEPTH_SIZE = SDL.constants.SDL_GL_DEPTH_SIZE
GL_STENCIL_SIZE = SDL.constants.SDL_GL_STENCIL_SIZE
GL_ACCUM_RED_SIZE = SDL.constants.SDL_GL_ACCUM_RED_SIZE
GL_ACCUM_GREEN_SIZE = SDL.constants.SDL_GL_ACCUM_GREEN_SIZE
GL_ACCUM_BLUE_SIZE = SDL.constants.SDL_GL_ACCUM_BLUE_SIZE
GL_ACCUM_ALPHA_SIZE = SDL.constants.SDL_GL_ACCUM_ALPHA_SIZE
GL_STEREO = SDL.constants.SDL_GL_STEREO
GL_MULTISAMPLEBUFFERS = SDL.constants.SDL_GL_MULTISAMPLEBUFFERS
GL_MULTISAMPLESAMPLES = SDL.constants.SDL_GL_MULTISAMPLESAMPLES
TIMER_RESOLUTION = SDL.constants.TIMER_RESOLUTION
AUDIO_U8 = SDL.constants.AUDIO_U8
AUDIO_S8 = SDL.constants.AUDIO_S8
AUDIO_U16LSB = SDL.constants.AUDIO_U16LSB
AUDIO_S16LSB = SDL.constants.AUDIO_S16LSB
AUDIO_U16MSB = SDL.constants.AUDIO_U16MSB
AUDIO_S16MSB = SDL.constants.AUDIO_S16MSB
AUDIO_U16 = SDL.constants.AUDIO_U16
AUDIO_S16 = SDL.constants.AUDIO_S16
AUDIO_U16SYS = SDL.constants.AUDIO_U16SYS
AUDIO_S16SYS = SDL.constants.AUDIO_S16SYS
'''
def _t(a, b, c, d):
return (ord(a) << 24) | (ord(b) << 16) | (ord(c) << 8) | ord(d)
SCRAP_TEXT = _t('T', 'E', 'X', 'T')
SCRAP_BMP = _t('B', 'M', 'P', ' ')
BLEND_ADD = 0x01
BLEND_SUB = 0x02
BLEND_MULT = 0x03
BLEND_MIN = 0x04
BLEND_MAX = 0x05
"""
NOEVENT = SDL.constants.SDL_NOEVENT
ACTIVEEVENT = SDL.constants.SDL_ACTIVEEVENT
KEYDOWN = SDL.constants.SDL_KEYDOWN
KEYUP = SDL.constants.SDL_KEYUP
MOUSEMOTION = SDL.constants.SDL_MOUSEMOTION
MOUSEBUTTONDOWN = SDL.constants.SDL_MOUSEBUTTONDOWN
MOUSEBUTTONUP = SDL.constants.SDL_MOUSEBUTTONUP
JOYAXISMOTION = SDL.constants.SDL_JOYAXISMOTION
JOYBALLMOTION = SDL.constants.SDL_JOYBALLMOTION
JOYHATMOTION = SDL.constants.SDL_JOYHATMOTION
JOYBUTTONDOWN = SDL.constants.SDL_JOYBUTTONDOWN
JOYBUTTONUP = SDL.constants.SDL_JOYBUTTONUP
VIDEORESIZE = SDL.constants.SDL_VIDEORESIZE
VIDEOEXPOSE = SDL.constants.SDL_VIDEOEXPOSE
QUIT = SDL.constants.SDL_QUIT
SYSWMEVENT = SDL.constants.SDL_SYSWMEVENT
USEREVENT = SDL.constants.SDL_USEREVENT
NUMEVENTS = SDL.constants.SDL_NUMEVENTS
HAT_CENTERED = SDL.constants.SDL_HAT_CENTERED
HAT_UP = SDL.constants.SDL_HAT_UP
HAT_RIGHTUP = SDL.constants.SDL_HAT_RIGHTUP
HAT_RIGHT = SDL.constants.SDL_HAT_RIGHT
HAT_RIGHTDOWN = SDL.constants.SDL_HAT_RIGHTDOWN
HAT_DOWN = SDL.constants.SDL_HAT_DOWN
HAT_LEFTDOWN = SDL.constants.SDL_HAT_LEFTDOWN
HAT_LEFT = SDL.constants.SDL_HAT_LEFT
HAT_LEFTUP = SDL.constants.SDL_HAT_LEFTUP
"""
#BEGIN GENERATED CONSTANTS; see support/make_pygame_keyconstants.py
K_0 = 48
K_1 = 49
K_2 = 50
K_3 = 51
K_4 = 52
K_5 = 53
K_6 = 54
K_7 = 55
K_8 = 56
K_9 = 57
K_AMPERSAND = 38
K_ASTERISK = 42
K_AT = 64
K_BACKQUOTE = 96
K_BACKSLASH = 92
K_BACKSPACE = 8
#K_BREAK = SDL.constants.SDLK_BREAK
K_CAPSLOCK = 1073741881
K_CARET = 94
K_CLEAR = 1073742040
K_COLON = 58
K_COMMA = 44
#K_COMPOSE = SDL.constants.SDLK_COMPOSE
K_DELETE = 127
K_DOLLAR = 36
K_DOWN = 1073741905
K_END = 1073741901
K_EQUALS = 1073741927
K_ESCAPE = 27
#K_EURO = SDL.constants.SDLK_EURO
K_EXCLAIM = 33
K_F1 = 1073741882
K_F10 = 1073741891
K_F11 = 1073741892
K_F12 = 1073741893
K_F13 = 1073741928
K_F14 = 1073741929
K_F15 = 1073741930
K_F2 = 1073741883
K_F3 = 1073741884
K_F4 = 1073741885
K_F5 = 1073741886
K_F6 = 1073741887
K_F7 = 1073741888
K_F8 = 1073741889
K_F9 = 1073741890
#K_FIRST = SDL.constants.SDLK_FIRST
K_GREATER = 1073742022
K_HASH = 1073742028
K_HELP = 1073741941
K_HOME = 1073741898
K_INSERT = 1073741897
K_KP0 = 1073741922
K_KP1 = 1073741913
K_KP2 = 1073741914
K_KP3 = 1073741915
K_KP4 = 1073741916
K_KP5 = 1073741917
K_KP6 = 1073741918
K_KP7 = 1073741919
K_KP8 = 1073741920
K_KP9 = 1073741921
K_KP_DIVIDE = 1073741908
K_KP_ENTER = 1073741912
K_KP_EQUALS = 1073741927
K_KP_MINUS = 1073741910
K_KP_MULTIPLY = 1073741909
K_KP_PERIOD = 1073741923
K_KP_PLUS = 1073741911
K_LALT = 1073742050
#K_LAST = SDL.constants.SDLK_LAST
K_LCTRL = 1073742048
K_LEFT = 1073741904
#K_LEFTBRACKET = SDL.constants.SDLK_LEFTBRACKET
K_LEFTPAREN = 1073742006
#K_LESS = SDL.constants.SDLK_LESS
#K_LMETA = SDL.constants.SDLK_LMETA
K_LSHIFT = 1073742049
#K_LSUPER = SDL.constants.SDLK_LSUPER
K_MENU = 1073741942
K_MINUS = 45
K_MODE = 1073742081
#K_NUMLOCK = SDL.constants.SDLK_NUMLOCK
K_PAGEDOWN = 1073741902
K_PAGEUP = 1073741899
K_PAUSE = 1073741896
#K_PERIOD = SDL.constants.SDLK_PERIOD
K_PLUS = 43
#K_POWER = SDL.constants.SDLK_POWER
#K_PRINT = SDL.constants.SDLK_PRINT
K_QUESTION = 63
K_QUOTE = 39
K_QUOTEDBL = 34
K_RALT = 1073742054
K_RCTRL = 1073742052
K_RETURN = 13
K_RIGHT = 1073741903
#K_RIGHTBRACKET = SDL.constants.SDLK_RIGHTBRACKET
K_RIGHTPAREN = 41
#K_RMETA = SDL.constants.SDLK_RMETA
K_RSHIFT = 1073742053
#K_RSUPER = SDL.constants.SDLK_RSUPER
K_SCROLLOCK = 1073741895
K_SEMICOLON = 59
K_SLASH = 47
K_SPACE = 1073742029
K_SYSREQ = 1073741978
K_TAB = 9
K_UNDERSCORE = 95
K_UNDO = 1073741946
K_UNKNOWN = 0
K_UP = 1073741906
"""
K_WORLD_0 = SDL.constants.SDLK_WORLD_0
K_WORLD_1 = SDL.constants.SDLK_WORLD_1
K_WORLD_10 = SDL.constants.SDLK_WORLD_10
K_WORLD_11 = SDL.constants.SDLK_WORLD_11
K_WORLD_12 = SDL.constants.SDLK_WORLD_12
K_WORLD_13 = SDL.constants.SDLK_WORLD_13
K_WORLD_14 = SDL.constants.SDLK_WORLD_14
K_WORLD_15 = SDL.constants.SDLK_WORLD_15
K_WORLD_16 = SDL.constants.SDLK_WORLD_16
K_WORLD_17 = SDL.constants.SDLK_WORLD_17
K_WORLD_18 = SDL.constants.SDLK_WORLD_18
K_WORLD_19 = SDL.constants.SDLK_WORLD_19
K_WORLD_2 = SDL.constants.SDLK_WORLD_2
K_WORLD_20 = SDL.constants.SDLK_WORLD_20
K_WORLD_21 = SDL.constants.SDLK_WORLD_21
K_WORLD_22 = SDL.constants.SDLK_WORLD_22
K_WORLD_23 = SDL.constants.SDLK_WORLD_23
K_WORLD_24 = SDL.constants.SDLK_WORLD_24
K_WORLD_25 = SDL.constants.SDLK_WORLD_25
K_WORLD_26 = SDL.constants.SDLK_WORLD_26
K_WORLD_27 = SDL.constants.SDLK_WORLD_27
K_WORLD_28 = SDL.constants.SDLK_WORLD_28
K_WORLD_29 = SDL.constants.SDLK_WORLD_29
K_WORLD_3 = SDL.constants.SDLK_WORLD_3
K_WORLD_30 = SDL.constants.SDLK_WORLD_30
K_WORLD_31 = SDL.constants.SDLK_WORLD_31
K_WORLD_32 = SDL.constants.SDLK_WORLD_32
K_WORLD_33 = SDL.constants.SDLK_WORLD_33
K_WORLD_34 = SDL.constants.SDLK_WORLD_34
K_WORLD_35 = SDL.constants.SDLK_WORLD_35
K_WORLD_36 = SDL.constants.SDLK_WORLD_36
K_WORLD_37 = SDL.constants.SDLK_WORLD_37
K_WORLD_38 = SDL.constants.SDLK_WORLD_38
K_WORLD_39 = SDL.constants.SDLK_WORLD_39
K_WORLD_4 = SDL.constants.SDLK_WORLD_4
K_WORLD_40 = SDL.constants.SDLK_WORLD_40
K_WORLD_41 = SDL.constants.SDLK_WORLD_41
K_WORLD_42 = SDL.constants.SDLK_WORLD_42
K_WORLD_43 = SDL.constants.SDLK_WORLD_43
K_WORLD_44 = SDL.constants.SDLK_WORLD_44
K_WORLD_45 = SDL.constants.SDLK_WORLD_45
K_WORLD_46 = SDL.constants.SDLK_WORLD_46
K_WORLD_47 = SDL.constants.SDLK_WORLD_47
K_WORLD_48 = SDL.constants.SDLK_WORLD_48
K_WORLD_49 = SDL.constants.SDLK_WORLD_49
K_WORLD_5 = SDL.constants.SDLK_WORLD_5
K_WORLD_50 = SDL.constants.SDLK_WORLD_50
K_WORLD_51 = SDL.constants.SDLK_WORLD_51
K_WORLD_52 = SDL.constants.SDLK_WORLD_52
K_WORLD_53 = SDL.constants.SDLK_WORLD_53
K_WORLD_54 = SDL.constants.SDLK_WORLD_54
K_WORLD_55 = SDL.constants.SDLK_WORLD_55
K_WORLD_56 = SDL.constants.SDLK_WORLD_56
K_WORLD_57 = SDL.constants.SDLK_WORLD_57
K_WORLD_58 = SDL.constants.SDLK_WORLD_58
K_WORLD_59 = SDL.constants.SDLK_WORLD_59
K_WORLD_6 = SDL.constants.SDLK_WORLD_6
K_WORLD_60 = SDL.constants.SDLK_WORLD_60
K_WORLD_61 = SDL.constants.SDLK_WORLD_61
K_WORLD_62 = SDL.constants.SDLK_WORLD_62
K_WORLD_63 = SDL.constants.SDLK_WORLD_63
K_WORLD_64 = SDL.constants.SDLK_WORLD_64
K_WORLD_65 = SDL.constants.SDLK_WORLD_65
K_WORLD_66 = SDL.constants.SDLK_WORLD_66
K_WORLD_67 = SDL.constants.SDLK_WORLD_67
K_WORLD_68 = SDL.constants.SDLK_WORLD_68
K_WORLD_69 = SDL.constants.SDLK_WORLD_69
K_WORLD_7 = SDL.constants.SDLK_WORLD_7
K_WORLD_70 = SDL.constants.SDLK_WORLD_70
K_WORLD_71 = SDL.constants.SDLK_WORLD_71
K_WORLD_72 = SDL.constants.SDLK_WORLD_72
K_WORLD_73 = SDL.constants.SDLK_WORLD_73
K_WORLD_74 = SDL.constants.SDLK_WORLD_74
K_WORLD_75 = SDL.constants.SDLK_WORLD_75
K_WORLD_76 = SDL.constants.SDLK_WORLD_76
K_WORLD_77 = SDL.constants.SDLK_WORLD_77
K_WORLD_78 = SDL.constants.SDLK_WORLD_78
K_WORLD_79 = SDL.constants.SDLK_WORLD_79
K_WORLD_8 = SDL.constants.SDLK_WORLD_8
K_WORLD_80 = SDL.constants.SDLK_WORLD_80
K_WORLD_81 = SDL.constants.SDLK_WORLD_81
K_WORLD_82 = SDL.constants.SDLK_WORLD_82
K_WORLD_83 = SDL.constants.SDLK_WORLD_83
K_WORLD_84 = SDL.constants.SDLK_WORLD_84
K_WORLD_85 = SDL.constants.SDLK_WORLD_85
K_WORLD_86 = SDL.constants.SDLK_WORLD_86
K_WORLD_87 = SDL.constants.SDLK_WORLD_87
K_WORLD_88 = SDL.constants.SDLK_WORLD_88
K_WORLD_89 = SDL.constants.SDLK_WORLD_89
K_WORLD_9 = SDL.constants.SDLK_WORLD_9
K_WORLD_90 = SDL.constants.SDLK_WORLD_90
K_WORLD_91 = SDL.constants.SDLK_WORLD_91
K_WORLD_92 = SDL.constants.SDLK_WORLD_92
K_WORLD_93 = SDL.constants.SDLK_WORLD_93
K_WORLD_94 = SDL.constants.SDLK_WORLD_94
K_WORLD_95 = SDL.constants.SDLK_WORLD_95
"""
K_a = 97
K_b = 98
K_c = 99
K_d = 100
K_e = 101
K_f = 102
K_g = 103
K_h = 104
K_i = 105
K_j = 106
K_k = 107
K_l = 108
K_m = 109
K_n = 110
K_o = 111
K_p = 112
K_q = 113
K_r = 114
K_s = 115
K_t = 116
K_u = 117
K_v = 118
K_w = 119
K_x = 120
K_y = 121
K_z = 122
#END GENERATED CONSTANTS
| gpl-3.0 |
alfonsokim/nupic | examples/opf/experiments/params/EnsembleOnline.py | 10 | 15348 | import random
import multiprocessing
import numpy as np
from nupic.frameworks.opf import helpers
from nupic.frameworks.opf.client import Client
from random import shuffle
from random import randrange, uniform
import copy
windowSize=36
r=30
predictedField='pounds'
inertia=0.25
socRate=1.0
class Worker(multiprocessing.Process):
def __init__(self, work_queue, result_queue, stableSize, windowSize, predictedField, modeldata, iden):
multiprocessing.Process.__init__(self)
# job management
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
#Model State
self.stableSize=stableSize
self.windowSize=windowSize
self.stableUpdateStepSize=1
self.iden=iden
self.truth=[]
self.predictedField=predictedField
self.modeldata=modeldata
self.numModels=len(modeldata)
self.M={}
self.Scores={}
self.predictionStreams={}
self.median=True
self.index=-1
self.modelCapacity=len(modelData)
def run(self):
self.initM(modelData)
while not self.kill_received:
jobaux = self.work_queue.get()
command=jobaux[0]
if command=='predict':
self.index=self.index+1
self.updateModelStats()
self.result_queue.put([(self.Scores[m], self.predictionStreams[m][-1], self.truth[self.index], m) for m in self.M.keys()])
if command=='getPredictionStreams':
self.result_queue.put(dict([(m, self.predictionStreams[m][:-windowSize]) for m in self.predictionStreams.keys()]))
if command=='delete':
delList=jobaux[1]
for d in delList:
if(d in self.M):
del self.M[d]
del self.Scores[d]
del self.predictionStreams[d]
print 'deleted Model'+str(d)+" in process "+str(self.iden)
print "number of models remaining in "+str(self.iden)+": "+str(len(self.M))
self.result_queue.put(self.iden)
if command=='getAAEs':
self.result_queue.put([(m, computeAAE(self.truth, self.predictionStreams[m],r ), self.getModelState(self.M[m]), self.M[m]['modelDescription']) for m in self.M.keys()])
if command=='addPSOVariants':
for t in jobaux[1]:
if(t[0]==self.iden):
name=t[2]
modelDescription=t[1][0]
x=t[1][1]
v=t[1][2]
self.M[name]={}
self.M[name]['modelDescription']=modelDescription
self.M[name]['client']=Client(**modelDescription)
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.M[name]['x']=x
self.M[name]['v']=v
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
print "added new model "+str(name)+" to process"+str(self.iden)
# store the result
def getModelState(self, d):
return d['x'], d['v']
def initM(self, modelDatList):
for modelData in modelDatList:
name=modelData[0]
self.M[name]={}
self.M[name]['modelDescription']=modelData[1]
self.M[name]['client']=Client(**modelData[1])
alpha=modelData[1]['modelConfig']['modelParams']['clParams']['alpha']
n=0
for encoder in modelData[1]['modelConfig']['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
n=encoder['n']
synPermInactiveDec=modelData[1]['modelConfig']['modelParams']['spParams']['synPermInactiveDec']
activationThreshold=modelData[1]['modelConfig']['modelParams']['tmParams']['activationThreshold']
pamLength=modelData[1]['modelConfig']['modelParams']['tmParams']['pamLength']
self.M[name]['x']=np.array([alpha, n,synPermInactiveDec,activationThreshold, pamLength ])
vAlpha=uniform(0.01, 0.15)
vN=randrange(30, 200, 5)
vSynPermInactiveDec=uniform(0.01, 0.15)
vActivationThreshold=randrange(12, 17, 1)
vPamLength=randrange(1, 6, 1)
self.M[name]['v']=np.array([vAlpha, vN,vSynPermInactiveDec,vActivationThreshold,vPamLength])
self.M[name]['alive']=True
self.M[name]['start']=0
self.M[name]['end']=None
self.Scores[name]=10000
self.predictionStreams[name]=[0,]
def updateModelStats(self):
updatedTruth=False
for m in self.M.keys():
truth, prediction=self.M[m]['client'].nextTruthPrediction(self.predictedField)
if(not updatedTruth):
self.truth.append(truth)
updatedTruth=True
self.predictionStreams[m].append(prediction)
self.Scores[m]=computeAAE(self.truth, self.predictionStreams[m],windowSize)
def getStableVote(scores, stableSize, votes, currModel):
scores = sorted(scores, key=lambda t: t[0])[:stableSize]
median=True
if not median:
for s in scores:
if s[3]==currModel:
print [(score[0], score[3]) for score in scores]
return s[1], currModel
print [(s[0], s[3]) for s in scores], "switching voting Model!"
return scores[0][1], scores[0][3]
else:
print [(s[0], s[3]) for s in scores]
voters = sorted(scores, key=lambda t: t[1])
for voter in voters:
votes[voter[3]]=votes[voter[3]]+1
vote=voters[int(stableSize/2)][1]
return vote, currModel
def getFieldPermutations(config, predictedField):
encoders=config['modelParams']['sensorParams']['encoders']
encoderList=[]
for encoder in encoders:
if encoder==None:
continue
if encoder['name']==predictedField:
encoderList.append([encoder])
for e in encoders:
if e==None:
continue
if e['name'] != predictedField:
encoderList.append([encoder, e])
return encoderList
def getModelDescriptionLists(numProcesses, experiment):
config, control = helpers.loadExperiment(experiment)
encodersList=getFieldPermutations(config, 'pounds')
ns=range(50, 140, 120)
clAlphas=np.arange(0.01, 0.16, 0.104)
synPermInactives=np.arange(0.01, 0.16, 0.105)
tpPamLengths=range(5, 8, 2)
tpSegmentActivations=range(13, 17, 12)
if control['environment'] == 'opfExperiment':
experimentTasks = control['tasks']
task = experimentTasks[0]
datasetURI = task['dataset']['streams'][0]['source']
elif control['environment'] == 'nupic':
datasetURI = control['dataset']['streams'][0]['source']
metricSpecs = control['metrics']
datasetPath = datasetURI[len("file://"):]
ModelSetUpData=[]
name=0
for n in ns:
for clAlpha in clAlphas:
for synPermInactive in synPermInactives:
for tpPamLength in tpPamLengths:
for tpSegmentActivation in tpSegmentActivations:
for encoders in encodersList:
encodersmod=copy.deepcopy(encoders)
configmod=copy.deepcopy(config)
configmod['modelParams']['sensorParams']['encoders']=encodersmod
configmod['modelParams']['clParams']['alpha']=clAlpha
configmod['modelParams']['spParams']['synPermInactiveDec']=synPermInactive
configmod['modelParams']['tmParams']['pamLength']=tpPamLength
configmod['modelParams']['tmParams']['activationThreshold']=tpSegmentActivation
for encoder in encodersmod:
if encoder['name']==predictedField:
encoder['n']=n
ModelSetUpData.append((name,{'modelConfig':configmod, 'inferenceArgs':control['inferenceArgs'], 'metricSpecs':metricSpecs, 'sourceSpec':datasetPath,'sinkSpec':None,}))
name=name+1
#print modelInfo['modelConfig']['modelParams']['tmParams']
#print modelInfo['modelConfig']['modelParams']['sensorParams']['encoders'][4]['n']
print "num Models"+str( len(ModelSetUpData))
shuffle(ModelSetUpData)
#print [ (m[1]['modelConfig']['modelParams']['tmParams']['pamLength'], m[1]['modelConfig']['modelParams']['sensorParams']['encoders']) for m in ModelSetUpData]
return list(chunk(ModelSetUpData,numProcesses))
def chunk(l, n):
""" Yield n successive chunks from l.
"""
newn = int(1.0 * len(l) / n + 0.5)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
def command(command, work_queues, aux):
for queue in work_queues:
queue.put((command, aux))
def getDuplicateList(streams, delta):
delList=[]
keys=streams.keys()
for key1 in keys:
if key1 in streams:
for key2 in streams.keys():
if(key1 !=key2):
print 'comparing model'+str(key1)+" to "+str(key2)
dist=sum([(a-b)**2 for a, b in zip(streams[key1], streams[key2])])
print dist
if(dist<delta):
delList.append(key2)
del streams[key2]
return delList
def slice_sampler(px, N = 1, x = None):
"""
Provides samples from a user-defined distribution.
slice_sampler(px, N = 1, x = None)
Inputs:
px = A discrete probability distribution.
N = Number of samples to return, default is 1
x = Optional list/array of observation values to return, where prob(x) = px.
Outputs:
If x=None (default) or if len(x) != len(px), it will return an array of integers
between 0 and len(px)-1. If x is supplied, it will return the
samples from x according to the distribution px.
"""
values = np.zeros(N, dtype=np.int)
samples = np.arange(len(px))
px = np.array(px) / (1.*sum(px))
u = uniform(0, max(px))
for n in xrange(N):
included = px>=u
choice = random.sample(range(np.sum(included)), 1)[0]
values[n] = samples[included][choice]
u = uniform(0, px[included][choice])
if x:
if len(x) == len(px):
x=np.array(x)
values = x[values]
else:
print "px and x are different lengths. Returning index locations for px."
return values
def getPSOVariants(modelInfos, votes, n):
# get x, px lists for sampling
norm=sum(votes.values())
xpx =[(m, float(votes[m])/norm) for m in votes.keys()]
x,px = [[z[i] for z in xpx] for i in (0,1)]
#sample form set of models
variantIDs=slice_sampler(px, n, x)
print "variant IDS"
print variantIDs
#best X
x_best=modelInfos[0][2][0]
# create PSO variates of models
modelDescriptions=[]
for variantID in variantIDs:
t=modelInfos[[i for i, v in enumerate(modelInfos) if v[0] == variantID][0]]
x=t[2][0]
v=t[2][1]
print "old x"
print x
modelDescriptionMod=copy.deepcopy(t[3])
configmod=modelDescriptionMod['modelConfig']
v=inertia*v+socRate*np.random.random_sample(len(v))*(x_best-x)
x=x+v
print "new x"
print x
configmod['modelParams']['clParams']['alpha']=max(0.01, x[0])
configmod['modelParams']['spParams']['synPermInactiveDec']=max(0.01, x[2])
configmod['modelParams']['tmParams']['pamLength']=int(round(max(1, x[4])))
configmod['modelParams']['tmParams']['activationThreshold']=int(round(max(1, x[3])))
for encoder in configmod['modelParams']['sensorParams']['encoders']:
if encoder['name']==predictedField:
encoder['n']=int(round(max(encoder['w']+1, x[1]) ))
modelDescriptions.append((modelDescriptionMod, x, v))
return modelDescriptions
def computeAAE(truth, predictions, windowSize):
windowSize=min(windowSize, len(truth))
zipped=zip(truth[-windowSize:], predictions[-windowSize-1:])
AAE=sum([abs(a - b) for a, b in zipped])/windowSize
return AAE
if __name__ == "__main__":
cutPercentage=0.1
currModel=0
stableSize=3
delta=1
predictedField='pounds'
truth=[]
ensemblePredictions=[0,]
divisor=4
ModelSetUpData=getModelDescriptionLists(divisor, './')
num_processes=len(ModelSetUpData)
print num_processes
work_queues=[]
votes={}
votingParameterStats={"tpSegmentActivationThreshold":[], "tpPamLength":[], "synPermInactiveDec":[], "clAlpha":[], "numBuckets":[]}
# create a queue to pass to workers to store the results
result_queue = multiprocessing.Queue(len(ModelSetUpData))
# spawn workers
workerName=0
modelNameCount=0
for modelData in ModelSetUpData:
print len(modelData)
modelNameCount+=len(modelData)
work_queue= multiprocessing.Queue()
work_queues.append(work_queue)
worker = Worker(work_queue, result_queue, stableSize, windowSize, predictedField, modelData, workerName)
worker.start()
workerName=workerName+1
#init votes dict
for dataList in ModelSetUpData:
for data in dataList:
votes[data[0]]=0
for i in range(2120):
command('predict', work_queues, i)
scores=[]
for j in range(num_processes):
subscore=result_queue.get()
scores.extend(subscore)
print ""
print i
ensemblePrediction, currModel=getStableVote(scores, stableSize, votes, currModel)
ensemblePredictions.append(ensemblePrediction)
truth.append(scores[0][2])
print computeAAE(truth,ensemblePredictions, windowSize), int(currModel)
assert(result_queue.empty())
if i%r==0 and i!=0: #refresh ensemble
assert(result_queue.empty())
#get AAES of models over last i records
command('getAAEs', work_queues, None)
AAEs=[]
for j in range(num_processes):
subAAEs=result_queue.get()
AAEs.extend(subAAEs)
AAEs=sorted(AAEs, key=lambda t: t[1])
numToDelete=int(round(cutPercentage*len(AAEs)))
print "Single Model AAES"
print [(aae[0], aae[1]) for aae in AAEs]
print "Ensemble AAE"
print computeAAE(truth, ensemblePredictions, r)
#add bottom models to delList
print "Vote counts"
print votes
delList=[t[0] for t in AAEs[-numToDelete:]]
print "delList"
print delList
#find duplicate models(now unnecessary)
#command('getPredictionStreams', work_queues, None)
#streams={}
#for j in range(num_processes):
# subList=result_queue.get()
# streams.update(subList)
#delList.extend(getDuplicateList(streams, delta))
#print delList
command('delete', work_queues, delList)
for iden in delList:
del votes[iden]
print votes
#wait for deletion to finish and collect processIndices for addition
processIndices=[]
for j in range(num_processes):
processIndices.append( result_queue.get())
# pick new set of models for PSO variants
newModelDescriptions=getPSOVariants(AAEs, votes, len(delList))
assert(result_queue.empty())
#send new model dscriptions to queue and have processess pick them up
aux=[]
for i in range(len(newModelDescriptions)):
votes[modelNameCount]=0
aux.append((processIndices[i],newModelDescriptions[i],modelNameCount) )
modelNameCount=modelNameCount+1
command('addPSOVariants', work_queues, aux)
#set votes to 0
for key in votes.keys():
votes[key]=0
print "AAE over full stream"
print computeAAE(truth, ensemblePredictions, len(truth))
print "AAE1000"
print computeAAE(truth, ensemblePredictions, 1000)
| agpl-3.0 |
Iconik/eve-suite | src/model/static/map/constellation.py | 1 | 1590 | from model.flyweight import Flyweight
from model.static.database import database
class Constellation(Flyweight):
def __init__(self, constellation_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.constellation_id = constellation_id
cursor = database.get_cursor(
"select * from mapConstellations where constellationID={};".format(
self.constellation_id))
row = cursor.fetchone()
self.region_id = row["regionID"]
self.constellation_name = row["constellationName"]
self.x_pos = row["x"]
self.y_pos = row["y"]
self.z_pos = row["z"]
self.x_min = row["xMin"]
self.x_max = row["xMax"]
self.y_min = row["yMin"]
self.y_max = row["yMax"]
self.z_min = row["zMin"]
self.z_max = row["zMax"]
self.faction_id = row["factionID"]
self.radius = row["radius"]
cursor.close()
self._region = None
self._faction = None
def get_region(self):
"""Populates and returns the _region"""
if self._region is None:
from model.static.map.region import Region
self._region = Region(self.region_id)
return self._region
def get_faction(self):
"""Populates and returns the _faction"""
if self._faction is None:
from model.static.chr.faction import Faction
self._faction = Faction(self.faction_id)
return self._faction
| gpl-3.0 |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/mailbox.py | 64 | 80751 | #! /usr/bin/env python
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import sys
import os
import time
import calendar
import socket
import errno
import copy
import email
import email.message
import email.generator
import StringIO
try:
if sys.platform == 'os2emx':
# OS/2 EMX fcntl() not adequate
raise ImportError
import fcntl
except ImportError:
fcntl = None
import warnings
with warnings.catch_warnings():
if sys.py3kwarning:
warnings.filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import rfc822
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage', 'UnixMailbox',
'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __contains__(self, key):
return self.has_key(key)
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.iterkeys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = StringIO.StringIO()
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read().replace('\n', os.linesep)
target.write(data)
if self._append_newline and not data.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
if self._append_newline and not message.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
elif hasattr(message, 'read'):
lastline = None
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(os.linesep):
# Make sure the message ends with a newline
target.write(os.linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError, e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
if isinstance(message, MaildirMessage):
os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except KeyError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
raise
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
new_path = os.path.join(self._path, subdir, key + suffix)
os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
os.utime(new_path, (os.path.getatime(new_path),
message.get_date()))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
f = open(os.path.join(self._path, subpath), 'r')
try:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
finally:
f.close()
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'r')
try:
return f.read()
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
Maildir._count += 1
try:
return _create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[self._onetime_keys.next()]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
for key in self._toc.keys():
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if buffer == '':
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(self._path)
os.rename(new_file.name, self._path)
else:
raise
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(os.linesep, '')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(os.linesep, '\n'))
msg.set_from(from_line[5:])
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(os.linesep, '\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str) and message.startswith('From '):
newline = message.find('\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = ''
elif isinstance(message, _mboxMMDFMessage):
from_line = 'From ' + message.get_from()
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is None:
from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
start = self._file.tell()
self._file.write(from_line + os.linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(os.linesep))
else:
stops.append(line_pos)
break
elif line == os.linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\001\001\001\001' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\001\001\001\001' + os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith('\001\001\001\001' + os.linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\001\001\001\001' + os.linesep:
stops.append(line_pos - len(os.linesep))
break
elif line == '':
stops.append(line_pos)
break
elif line == '':
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
return f.read()
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
f = open(os.path.join(self._path, '.mh_sequences'), 'r')
try:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
finally:
f.close()
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.iteritems():
if len(keys) == 0:
continue
f.write('%s:' % name)
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.iteritems():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
visible_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
visible_headers.write(line.replace(os.linesep, '\n'))
body = self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
return original_headers.getvalue() + \
self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\037\014' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(',')
if label.strip() != '']
label_lists.append(labels)
elif line == '\037' or line == '\037' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
elif line == '':
stops.append(line_pos - len(os.linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
(os.linesep, os.linesep, ','.join(self.get_labels()),
os.linesep))
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\014' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write('1')
for label in special_labels:
self._file.write(', ' + label)
self._file.write(',,')
for label in labels:
self._file.write(' ' + label + ',')
self._file.write(os.linesep)
else:
self._file.write('1,,' + os.linesep)
if isinstance(message, email.message.Message):
orig_buffer = StringIO.StringIO()
orig_generator = email.generator.Generator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
self._file.write('*** EOOH ***' + os.linesep)
if isinstance(message, BabylMessage):
vis_buffer = StringIO.StringIO()
vis_generator = email.generator.Generator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
elif isinstance(message, str):
body_start = message.find('\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write('*** EOOH ***' + os.linesep)
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write(message[body_start:].replace('\n',
os.linesep))
else:
self._file.write('*** EOOH ***' + os.linesep + os.linesep)
self._file.write(message.replace('\n', os.linesep))
elif hasattr(message, 'readline'):
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
if first_pass:
first_pass = False
self._file.write('*** EOOH ***' + os.linesep)
message.seek(original_pos)
else:
break
while True:
buffer = message.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
for name in ('_headers', '_unixfrom', '_payload', '_charset',
'preamble', 'epilogue', 'defects', '_default_type'):
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags() != '':
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence must be a string: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
return iter(self.readline, "")
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
if hasattr(self._file, 'close'):
self._file.close()
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return ''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except IOError, e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
else:
raise
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
## Start: classes from the original module (for backward compatibility).
# Note that the Maildir class, whose name is unchanged, itself offers a next()
# method for backward compatibility.
class _Mailbox:
def __init__(self, fp, factory=rfc822.Message):
self.fp = fp
self.seekp = 0
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start != stop:
break
return self.factory(_PartialFile(self.fp, start, stop))
# Recommended to use PortableUnixMailbox instead!
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
def _search_end(self):
self.fp.readline() # Throw away header line
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness. You can either
# specify a different regular expression or define a whole new
# _isrealfromline() method. Note that this only gets called for lines
# starting with the 5 characters "From ".
#
# BAW: According to
#http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
# the only portable, reliable way to find message delimiters in a BSD (i.e
# Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
# beginning of the file, "^From .*\n". While _fromlinepattern below seems
# like a good idea, in practice, there are too many variations for more
# strict parsing of the line to be completely accurate.
#
# _strict_isrealfromline() is the old version which tries to do stricter
# parsing of the From_ line. _portable_isrealfromline() simply returns
# true, since it's never called if the line doesn't already start with
# "From ".
#
# This algorithm, and the way it interacts with _search_start() and
# _search_end() may not be completely correct, because it doesn't check
# that the two characters preceding "From " are \n\n or the beginning of
# the file. Fixing this would require a more extensive rewrite than is
# necessary. For convenience, we've added a PortableUnixMailbox class
# which does no checking of the format of the 'From' line.
_fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
r"[^\s]*\s*"
"$")
_regexp = None
def _strict_isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
def _portable_isrealfromline(self, line):
return True
_isrealfromline = _strict_isrealfromline
class PortableUnixMailbox(UnixMailbox):
_isrealfromline = UnixMailbox._portable_isrealfromline
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname, factory=rfc822.Message):
import re
pat = re.compile('^[1-9][0-9]*$')
self.dirname = dirname
# the three following lines could be combined into:
# list = map(long, filter(pat.match, os.listdir(self.dirname)))
list = os.listdir(self.dirname)
list = filter(pat.match, list)
list = map(long, list)
list.sort()
# This only works in Python 1.6 or later;
# before that str() added 'L':
self.boxes = map(str, list)
self.boxes.reverse()
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
if not self.boxes:
return None
fn = self.boxes.pop()
fp = open(os.path.join(self.dirname, fn))
msg = self.factory(fp)
try:
msg._mh_msgno = fn
except (AttributeError, TypeError):
pass
return msg
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n' or line == '\037':
self.fp.seek(pos)
return
## End: classes from the original module (for backward compatibility).
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| apache-2.0 |
ElectroweakGroup/Database_Extraction_Tool | Main.py | 1 | 1572 | import IsotopeDataExporting as ided
import os
import glob
import time
import sys
import renormalize as renorm
def function(option):
#Exports data requested by the user into text files (necessary to generate plots)
userInput = ided.datExp(option,True,True)
#Prints the user input allowing user to make sure they inputted allowing user
#to check what they input against the plot they are viewing
#The sleep is a pause so the timestamps used work correctly
renorm.renormalize(userInput[0],userInput[1],userInput[2],userInput[3])
time.sleep(0.01)
#Makes plot (.012 s)
ided.pltFileExp(option,userInput[6],userInput[4],userInput[0],userInput[1],userInput[2],userInput[7],userInput[3],True)
#This code creates the .git file which is the actual plot
os.chdir("Output/gnuPlot")
directory = os.getcwd()
try:
newest = max(glob.iglob(directory+"/*.plt"),key=os.path.getctime)
newest = newest.replace(os.getcwd()+"/","")
os.system("gnuplot "+newest)
except:
print('No new plot')
#This code puts restarts the program so it can be used again
os.chdir("..")
os.chdir("..")
os.system("python3 Main.py "+option)
newest = "Output/gnuPlot/"+newest.replace(".plt",".gif")
if os.path.isfile(newest):
os.system("rm "+newest)
try:
os.system("mv Output/gnuPlot/*.dat Output/gnuPlot/OutputData")
os.system("mv Output/gnuPlot/*.plt Output/gnuPlot/OutputData")
except:
pass
option = sys.argv[-1]
function(option)
| mit |
javierder/dogestart.me | django/db/models/fields/files.py | 105 | 15978 | import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> instance.file = File(...)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def contribute_to_class(self, cls, name):
super(ImageField, self).contribute_to_class(cls, name)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| mit |
GenericStudent/home-assistant | homeassistant/components/cast/home_assistant_cast.py | 9 | 2387 | """Home Assistant Cast integration for Cast."""
from typing import Optional
from pychromecast.controllers.homeassistant import HomeAssistantController
import voluptuous as vol
from homeassistant import auth, config_entries, core
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.helpers import config_validation as cv, dispatcher
from homeassistant.helpers.network import get_url
from .const import DOMAIN, SIGNAL_HASS_CAST_SHOW_VIEW
SERVICE_SHOW_VIEW = "show_lovelace_view"
ATTR_VIEW_PATH = "view_path"
ATTR_URL_PATH = "dashboard_path"
async def async_setup_ha_cast(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up Home Assistant Cast."""
user_id: Optional[str] = entry.data.get("user_id")
user: Optional[auth.models.User] = None
if user_id is not None:
user = await hass.auth.async_get_user(user_id)
if user is None:
user = await hass.auth.async_create_system_user(
"Home Assistant Cast", [auth.GROUP_ID_ADMIN]
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, "user_id": user.id}
)
if user.refresh_tokens:
refresh_token: auth.models.RefreshToken = list(user.refresh_tokens.values())[0]
else:
refresh_token = await hass.auth.async_create_refresh_token(user)
async def handle_show_view(call: core.ServiceCall):
"""Handle a Show View service call."""
hass_url = get_url(hass, require_ssl=True, prefer_external=True)
controller = HomeAssistantController(
# If you are developing Home Assistant Cast, uncomment and set to your dev app id.
# app_id="5FE44367",
hass_url=hass_url,
client_id=None,
refresh_token=refresh_token.token,
)
dispatcher.async_dispatcher_send(
hass,
SIGNAL_HASS_CAST_SHOW_VIEW,
controller,
call.data[ATTR_ENTITY_ID],
call.data[ATTR_VIEW_PATH],
call.data.get(ATTR_URL_PATH),
)
hass.helpers.service.async_register_admin_service(
DOMAIN,
SERVICE_SHOW_VIEW,
handle_show_view,
vol.Schema(
{
ATTR_ENTITY_ID: cv.entity_id,
ATTR_VIEW_PATH: str,
vol.Optional(ATTR_URL_PATH): str,
}
),
)
| apache-2.0 |
Jgarcia-IAS/localizacion | openerp/addons/base/tests/test_osv.py | 446 | 4722 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.osv.query import Query
class QueryTestCase(unittest.TestCase):
def test_basic_query(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_product", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product" LEFT JOIN "res_user" as "product_product__user_id" ON ("product_product"."user_id" = "product_product__user_id"."id"),"product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_query_chained_explicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id")""".strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id""".strip())
def test_mixed_query_chained_explicit_implicit_joins(self):
query = Query()
query.tables.extend(['"product_product"', '"product_template"'])
query.where_clause.append("product_product.template_id = product_template.id")
query.add_join(("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False) # add normal join
query.add_join(("product_template__categ_id", "res_user", "user_id", "id", "user_id"), implicit=False, outer=True) # CHAINED outer join
query.tables.append('"account.account"')
query.where_clause.append("product_category.expense_account_id = account_account.id") # additional implicit join
self.assertEquals(query.get_sql()[0].strip(),
""""product_product","product_template" JOIN "product_category" as "product_template__categ_id" ON ("product_template"."categ_id" = "product_template__categ_id"."id") LEFT JOIN "res_user" as "product_template__categ_id__user_id" ON ("product_template__categ_id"."user_id" = "product_template__categ_id__user_id"."id"),"account.account" """.strip())
self.assertEquals(query.get_sql()[1].strip(), """product_product.template_id = product_template.id AND product_category.expense_account_id = account_account.id""".strip())
def test_raise_missing_lhs(self):
query = Query()
query.tables.append('"product_product"')
self.assertRaises(AssertionError, query.add_join, ("product_template", "product_category", "categ_id", "id", "categ_id"), implicit=False, outer=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
apache/airflow | airflow/sentry.py | 2 | 6613 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Sentry Integration"""
import logging
from functools import wraps
from airflow.configuration import conf
from airflow.utils.session import find_session_idx, provide_session
from airflow.utils.state import State
log = logging.getLogger(__name__)
class DummySentry:
"""Blank class for Sentry."""
@classmethod
def add_tagging(cls, task_instance):
"""Blank function for tagging."""
@classmethod
def add_breadcrumbs(cls, task_instance, session=None):
"""Blank function for breadcrumbs."""
@classmethod
def enrich_errors(cls, run):
"""Blank function for formatting a TaskInstance._run_raw_task."""
return run
def flush(self):
"""Blank function for flushing errors."""
Sentry: DummySentry = DummySentry()
if conf.getboolean("sentry", 'sentry_on', fallback=False):
import sentry_sdk
# Verify blinker installation
from blinker import signal # noqa: F401
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.logging import ignore_logger
class ConfiguredSentry(DummySentry):
"""Configure Sentry SDK."""
SCOPE_TAGS = frozenset(("task_id", "dag_id", "execution_date", "operator", "try_number"))
SCOPE_CRUMBS = frozenset(("task_id", "state", "operator", "duration"))
UNSUPPORTED_SENTRY_OPTIONS = frozenset(
(
"integrations",
"in_app_include",
"in_app_exclude",
"ignore_errors",
"before_breadcrumb",
"before_send",
"transport",
)
)
def __init__(self):
"""Initialize the Sentry SDK."""
ignore_logger("airflow.task")
ignore_logger("airflow.jobs.backfill_job.BackfillJob")
executor_name = conf.get("core", "EXECUTOR")
sentry_flask = FlaskIntegration()
# LoggingIntegration is set by default.
integrations = [sentry_flask]
if executor_name == "CeleryExecutor":
from sentry_sdk.integrations.celery import CeleryIntegration
sentry_celery = CeleryIntegration()
integrations.append(sentry_celery)
dsn = None
sentry_config_opts = conf.getsection("sentry") or {}
if sentry_config_opts:
sentry_config_opts.pop("sentry_on")
old_way_dsn = sentry_config_opts.pop("sentry_dsn", None)
new_way_dsn = sentry_config_opts.pop("dsn", None)
# supported backward compatibility with old way dsn option
dsn = old_way_dsn or new_way_dsn
unsupported_options = self.UNSUPPORTED_SENTRY_OPTIONS.intersection(sentry_config_opts.keys())
if unsupported_options:
log.warning(
"There are unsupported options in [sentry] section: %s",
", ".join(unsupported_options),
)
if dsn:
sentry_sdk.init(dsn=dsn, integrations=integrations, **sentry_config_opts)
else:
# Setting up Sentry using environment variables.
log.debug("Defaulting to SENTRY_DSN in environment.")
sentry_sdk.init(integrations=integrations, **sentry_config_opts)
def add_tagging(self, task_instance):
"""Function to add tagging for a task_instance."""
task = task_instance.task
with sentry_sdk.configure_scope() as scope:
for tag_name in self.SCOPE_TAGS:
attribute = getattr(task_instance, tag_name)
if tag_name == "operator":
attribute = task.__class__.__name__
scope.set_tag(tag_name, attribute)
@provide_session
def add_breadcrumbs(self, task_instance, session=None):
"""Function to add breadcrumbs inside of a task_instance."""
if session is None:
return
execution_date = task_instance.execution_date
task = task_instance.task
dag = task.dag
task_instances = dag.get_task_instances(
state={State.SUCCESS, State.FAILED},
end_date=execution_date,
start_date=execution_date,
session=session,
)
for ti in task_instances:
data = {}
for crumb_tag in self.SCOPE_CRUMBS:
data[crumb_tag] = getattr(ti, crumb_tag)
sentry_sdk.add_breadcrumb(category="completed_tasks", data=data, level="info")
def enrich_errors(self, func):
"""Wrap TaskInstance._run_raw_task to support task specific tags and breadcrumbs."""
session_args_idx = find_session_idx(func)
@wraps(func)
def wrapper(task_instance, *args, **kwargs):
# Wrapping the _run_raw_task function with push_scope to contain
# tags and breadcrumbs to a specific Task Instance
try:
session = kwargs.get('session', args[session_args_idx])
except IndexError:
session = None
with sentry_sdk.push_scope():
try:
return func(task_instance, *args, **kwargs)
except Exception as e:
self.add_tagging(task_instance)
self.add_breadcrumbs(task_instance, session=session)
sentry_sdk.capture_exception(e)
raise
return wrapper
def flush(self):
sentry_sdk.flush()
Sentry = ConfiguredSentry()
| apache-2.0 |
teltek/edx-platform | openedx/core/djangoapps/user_api/management/tests/test_bulk_rehash_retired_usernames.py | 6 | 6666 | """
Test the bulk_rehash_retired_usernames management command
"""
from mock import call, patch
import pytest
from django.conf import settings
from django.core.management import call_command
from user_util.user_util import get_retired_username
from lms.lib import comment_client
from openedx.core.djangoapps.user_api.accounts.tests.retirement_helpers import (
setup_retirement_states, fake_completed_retirement
)
from openedx.core.djangoapps.user_api.models import UserRetirementStatus
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def _setup_users():
"""
Creates and returns test users in the different states of needing rehash:
- Skipped: The retired username does not require updating, some of these are fake retired
- Needing rehash: Has been fake-retired and name changed so it triggers a hash update
"""
# When we loop through creating users, take additional action on these
user_indexes_to_be_fake_retired = (2, 4, 6, 8, 10)
user_indexes_to_be_rehashed = (4, 6)
users_skipped = []
users_needing_rehash = []
retirements = {}
# Create some test users with retirements
for i in range(1, 11):
user = UserFactory()
retirement = UserRetirementStatus.create_retirement(user)
retirements[user.id] = retirement
if i in user_indexes_to_be_fake_retired:
fake_completed_retirement(user)
if i in user_indexes_to_be_rehashed:
# In order to need a rehash user.username the new hash must be
# different, we force that here.
retirement.retired_username = retirement.retired_username.upper()
user.username = retirement.retired_username
retirement.save()
user.save()
users_needing_rehash.append(user)
else:
users_skipped.append(user)
else:
users_skipped.append(user)
return users_skipped, users_needing_rehash, retirements
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_successful_rehash(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
assert new_retired_username == user.username
assert new_retired_username == retirement.retired_username
retire_user_forums.assert_has_calls(expected_username_calls)
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_forums_failed(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
retire_user_forums.side_effect = Exception('something bad happened with forums')
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
# Confirm that the usernames are *not* updated, due to the forums error
assert new_retired_username != user.username
assert new_retired_username != retirement.retired_username
assert "FAILED! 2 retirements failed to rehash. Retirement IDs:" in output
retire_user_forums.assert_has_calls(expected_username_calls)
@skip_unless_lms
@pytest.mark.usefixtures("setup_retirement_states")
@patch('lms.lib.comment_client.User.retire')
def test_forums_404(retire_user_forums, capsys):
"""
Run the command with users of all different hash statuses, expect success
"""
users_skipped, users_needing_rehash, retirements = _setup_users()
retire_user_forums.side_effect = comment_client.utils.CommentClientRequestError('not found', status_codes=404)
call_command('bulk_rehash_retired_usernames')
output = capsys.readouterr().out
# Make sure forums was called the correct number of times
assert retire_user_forums.call_count == 2
for user in users_skipped:
assert "User ID {} because the hash would not change.".format(user.id) in output
expected_username_calls = []
for user in users_needing_rehash:
retirement = retirements[user.id]
user.refresh_from_db()
retirement.refresh_from_db()
new_retired_username = get_retired_username(
retirement.original_username,
settings.RETIRED_USER_SALTS,
settings.RETIRED_USERNAME_FMT
)
expected_username_calls.append(call(new_retired_username))
assert "User ID {} to rehash their retired username".format(user.id) in output
# Confirm that the usernames *are* updated, since this is a non-blocking forums error
assert new_retired_username == user.username
assert new_retired_username == retirement.retired_username
assert "Success!" in output
retire_user_forums.assert_has_calls(expected_username_calls)
| agpl-3.0 |
teddym6/qualitybots | src/appengine/handlers/config_handler.py | 26 | 2465 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for setting configuration options for the system."""
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from handlers import base
from models import aws_account_details
SET_AWS_ACCOUNT_URL = '/config/set_aws_account'
CONFIG_URL = '/config/config'
class SetAwsAccount(base.BaseHandler):
"""Handler to allow an admin to update the AWS credentials."""
# Disable the "Invalid method name" warnings.
# pylint: disable-msg=C6409
def post(self):
"""Allows an admin user to set the AWS credentials used by the system.
Url Params:
aws_account_number: Amazon EC2 account number.
aws_access_key_id: AWS access Key ID.
aws_secret_access_key: AWS secret access key.
"""
aws_account_number = self.GetRequiredParameter('aws_account_number')
aws_access_key_id = self.GetRequiredParameter('aws_access_key_id')
aws_secret_access_key = self.GetRequiredParameter('aws_secret_access_key')
account_details = aws_account_details.AwsAccountDetails.get()
if not account_details:
account_details = aws_account_details.AwsAccountDetails()
account_details.aws_account_number = aws_account_number
account_details.aws_access_key_id = aws_access_key_id
account_details.aws_secret_access_key = aws_secret_access_key
account_details.put()
class ConfigPage(base.BaseHandler):
"""Handler for the configuration page."""
# Disable the "Invalid method name" warnings.
# pylint: disable-msg=C6409
def get(self):
"""Displays the Add Url landing page."""
self.RenderTemplate('config_settings.html', {})
application = webapp.WSGIApplication(
[(SET_AWS_ACCOUNT_URL, SetAwsAccount),
(CONFIG_URL, ConfigPage)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == '__main__':
main()
| apache-2.0 |
posterior/loom | loom/test/test_posterior_enum.py | 2 | 26903 | # Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
# Copyright (c) 2015, Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from itertools import imap, product
from nose import SkipTest
from nose.tools import assert_true, assert_false, assert_equal
import numpy
import numpy.random
from distributions.tests.util import seed_all
from distributions.util import scores_to_probs
from distributions.io.stream import protobuf_stream_load, protobuf_stream_dump
from distributions.lp.clustering import PitmanYor
from goftests import multinomial_goodness_of_fit
from loom.util import tempdir
import loom.schema_pb2
import loom.schema
import loom.format
import loom.runner
import loom.util
import loom.test.util
import parsable
parsable = parsable.Parsable()
TRUNCATE_COUNT = 32
MIN_GOODNESS_OF_FIT = 5e-4
SCORE_TOL = 1e-1 # FIXME why does this need to be so large?
SEED = 123
FEATURE_TYPES = loom.schema.MODELS.copy()
DENSITIES = [
1.0,
0.5,
0.0,
]
# Cross Cat Latent Space Sizes up to 10000000, generated by:
# python test_posterior_enum.py datasets 10000000
LATENT_SIZES = [
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597],
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597],
[1, 2, 6, 22, 94, 454, 2430, 14214, 89918, 610182, 4412798],
[1, 5, 30, 205, 1555, 12880, 115155, 1101705],
[1, 15, 240, 4065, 72465, 1353390],
[1, 52, 2756, 148772, 8174244],
[1, 203, 41412, 8489257],
[1, 877, 770006],
[1, 4140],
[1, 21147],
[1, 115975],
[1, 678570],
[1, 4213597],
]
CAT_MAX_SIZE = 100000
KIND_MAX_SIZE = 205
GRID_SIZE = 2
PITMAN_YOR_GRID = [
{'alpha': 2.0, 'd': 0.1},
{'alpha': 10., 'd': 0.1},
]
HYPER_PRIOR = {
'topology': PITMAN_YOR_GRID,
'clustering': PITMAN_YOR_GRID,
'bb': {
'alpha': [0.5, 2.0],
'beta': [0.5, 2.0],
},
'dd': {
'alpha': [.5, 1.5],
},
'dpd': {
'alpha': [.5, 1.5],
'gamma': [.5, 1.5],
},
'gp': {
'alpha': [.5, 1.5],
'inv_beta': [.5, 1.5],
},
'nich': {
'kappa': [.5, 1.5],
'mu': [-1., 1.],
'nu': [.5, 1.5],
'sigmasq': [.5, 1.5],
}
}
CLUSTERING = PitmanYor.from_dict({'alpha': 2.0, 'd': 0.1})
if __name__ == '__main__' and sys.stdout.isatty():
colorize = {
'Info': '\x1b[34mInfo\x1b[0m',
'Warn': '\x1b[33mWarn\x1b[0m',
'Fail': '\x1b[31mFail\x1b[0m',
'Pass': '\x1b[32mPass\x1b[0m',
}
else:
colorize = {}
def LOG(prefix, casename, comment=''):
prefix = colorize.get(prefix, prefix)
message = '{: <4} {: <18} {}'.format(prefix, casename, comment)
sys.stdout.write(message)
sys.stdout.write('\n')
sys.stdout.flush()
return message
@parsable.command
def infer_cats(max_size=CAT_MAX_SIZE, debug=False):
'''
Test category inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count > 0 and size <= max_size
]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
[None])
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_kinds(max_size=KIND_MAX_SIZE, debug=False):
'''
Test kind inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 0 and feature_count > 0 and size <= max_size
if object_count + feature_count > 2
]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[True],
[debug],
[None])
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_feature_hypers(max_size=CAT_MAX_SIZE, debug=False):
'''
Test feature hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count == 1 and size <= max_size
]
hyper_prior = [
(hp_name, (param_name, param_grid))
for hp_name, param_grids in HYPER_PRIOR.iteritems()
if hp_name not in ['topology', 'clustering']
for param_name, param_grid in param_grids.iteritems()
]
datasets = filter(
lambda x: x[1] == x[5][0],
product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
hyper_prior))
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_topology_hypers(max_size=KIND_MAX_SIZE, debug=False):
'''
Test topology hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count > 1 and size <= max_size
]
hyper_prior = [('topology', HYPER_PRIOR['topology'])]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[True],
[debug],
hyper_prior)
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
@parsable.command
def infer_clustering_hypers(max_size=CAT_MAX_SIZE, debug=False):
'''
Test clusterng hyperparameter inference.
'''
dimensions = [
(object_count, feature_count)
for object_count, sizes in enumerate(LATENT_SIZES)
for feature_count, size in enumerate(sizes)
if object_count > 1 and feature_count == 1 and size <= max_size
]
# FIXME(jglidden) this uses too much tuple trickery
hyper_prior = [('clustering', HYPER_PRIOR['clustering'])]
datasets = product(
dimensions,
FEATURE_TYPES,
DENSITIES,
[False],
[debug],
hyper_prior)
datasets = list(datasets)
parallel_map = map if debug else loom.util.parallel_map
errors = sum(parallel_map(_test_dataset, datasets), [])
message = '\n'.join(['Failed {} Cases:'.format(len(errors))] + errors)
assert_false(errors, message)
# Run tiny examples through nose and expensive examples by hand.
def test_cat_inference():
infer_cats(100)
def test_kind_inference():
infer_kinds(50)
def test_feature_hyper_inference():
infer_feature_hypers(100)
def test_topology_hyper_inference():
infer_topology_hypers(50)
def test_clustering_hyper_inference():
infer_clustering_hypers(100)
def _test_dataset(args):
dim, feature_type, density, infer_kinds, debug, hyper_prior = args
object_count, feature_count = dim
with tempdir(cleanup_on_error=(not debug)):
seed_all(SEED)
config_name = os.path.abspath('config.pb')
model_base_name = 'model.pb'
model_name = os.path.abspath(model_base_name)
rows_name = os.path.abspath('rows.pbs')
models = generate_model(feature_count, feature_type, hyper_prior)
model, fixed_hyper_models = models
dump_model(model, model_name)
fixed_model_names = []
for i, fm in enumerate(fixed_hyper_models):
fixed_model_base = 'fixed-{}-{}'.format(i, model_base_name)
fixed_model_name = os.path.abspath(fixed_model_base)
fixed_model_names.append(fixed_model_name)
dump_model(fm, fixed_model_name)
if hyper_prior is None:
assert len(fixed_model_names) == 0
rows = generate_rows(
object_count,
feature_count,
feature_type,
density)
dump_rows(rows, rows_name)
infer_cats = (object_count > 1)
infer_hypers = (hyper_prior is not None)
if infer_kinds:
sample_count = 10 * LATENT_SIZES[object_count][feature_count]
iterations = 32
else:
sample_count = 10 * LATENT_SIZES[object_count][1]
iterations = 0
config = {
'posterior_enum': {
'sample_count': sample_count,
'sample_skip': 10,
},
'kernels': {
'hyper': {
'run': infer_hypers,
'parallel': False,
},
'kind': {
'iterations': iterations,
'row_queue_capacity': 0,
'score_parallel': False,
},
},
}
loom.config.config_dump(config, config_name)
casename = '{}-{}-{}-{}-{}{}{}'.format(
object_count,
feature_count,
feature_type,
density,
('C' if infer_cats else ''),
('K' if infer_kinds else ''),
('H' if infer_hypers else ''))
# LOG('Run', casename)
error = _test_dataset_config(
casename,
object_count,
feature_count,
config_name,
model_name,
fixed_model_names,
rows_name,
config,
debug)
return [] if error is None else [error]
def add_sample(sample, score, counts_dict, scores_dict):
if sample in counts_dict:
counts_dict[sample] += 1
scores_dict[sample] = score
expected = score
assert abs(score - expected) < SCORE_TOL, \
'inconsistent score: {} vs {}'.format(score, expected)
else:
counts_dict[sample] = 1
scores_dict[sample] = score
def process_fixed_samples(fixed_hyper_samples, unfixed_latents):
fixed_scores = []
fixed_counts = []
for f_samples in fixed_hyper_samples:
fixed_scores_dict = {}
fixed_counts_dict = {}
for sample, score in f_samples:
add_sample(sample, score, fixed_counts_dict, fixed_scores_dict)
fixed_scores.append(fixed_scores_dict)
fixed_counts.append(fixed_counts_dict)
all_fixed_latents = [set([lat for lat in fd]) for fd in fixed_scores]
fixed_latents = set.intersection(*all_fixed_latents)
latents = [lat for lat in unfixed_latents if lat in fixed_latents]
scores_dict = {}
for latent in latents:
latent_scores = [fd[latent] for fd in fixed_scores]
scores_dict[latent] = numpy.logaddexp.reduce(latent_scores)
return latents, scores_dict
def _test_dataset_config(
casename,
object_count,
feature_count,
config_name,
model_name,
fixed_model_names,
rows_name,
config,
debug):
dataset = {'model': model_name, 'rows': rows_name, 'config': config_name}
samples = generate_samples(casename, dataset, debug)
fixed_hyper_samples = []
for fixed_model_name in fixed_model_names:
fixed_dataset = dataset.copy()
fixed_dataset['model'] = fixed_model_name
fs = generate_samples(None, fixed_dataset, debug)
fixed_hyper_samples.append(fs)
sample_count = config['posterior_enum']['sample_count']
counts_dict = {}
scores_dict = {}
actual_count = 0
for sample, score in samples:
actual_count += 1
add_sample(sample, score, counts_dict, scores_dict)
assert_equal(actual_count, sample_count)
if fixed_hyper_samples:
latents, scores_dict = process_fixed_samples(
fixed_hyper_samples,
scores_dict.keys())
useable_count = sum([counts_dict[lat] for lat in latents])
if useable_count < sample_count:
LOG('Warn', casename, 'scores found for {} / {} samples'.format(
useable_count,
sample_count))
sample_count = useable_count
else:
latents = scores_dict.keys()
actual_latent_count = len(latents)
infer_kinds = (config['kernels']['kind']['iterations'] > 0)
if infer_kinds:
expected_latent_count = count_crosscats(object_count, feature_count)
else:
expected_latent_count = BELL_NUMBERS[object_count]
assert actual_latent_count <= expected_latent_count, 'programmer error'
if actual_latent_count < expected_latent_count:
LOG('Warn', casename, 'found only {} / {} latents'.format(
actual_latent_count,
expected_latent_count))
counts = numpy.array([counts_dict[key] for key in latents])
scores = numpy.array([scores_dict[key] for key in latents])
probs = scores_to_probs(scores)
highest_by_prob = numpy.argsort(probs)[::-1][:TRUNCATE_COUNT]
is_accurate = lambda p: sample_count * p * (1 - p) >= 1
highest_by_prob = [i for i in highest_by_prob if is_accurate(probs[i])]
highest_by_count = numpy.argsort(counts)[::-1][:TRUNCATE_COUNT]
highest = list(set(highest_by_prob) | set(highest_by_count))
truncated = len(highest_by_prob) < len(probs)
if len(highest_by_prob) < 1:
LOG('Warn', casename, 'test is inaccurate; use more samples')
return None
goodness_of_fit = multinomial_goodness_of_fit(
probs[highest_by_prob],
counts[highest_by_prob],
total_count=sample_count,
truncated=truncated)
comment = 'goodness of fit = {:0.3g}'.format(goodness_of_fit)
if goodness_of_fit > MIN_GOODNESS_OF_FIT:
LOG('Pass', casename, comment)
return None
else:
print 'EXPECT\tACTUAL\tCHI\tVALUE'
lines = [(probs[i], counts[i], latents[i]) for i in highest]
for prob, count, latent in sorted(lines, reverse=True):
expect = prob * sample_count
chi = (count - expect) * expect ** -0.5
pretty = pretty_latent(latent)
print '{:0.1f}\t{}\t{:+0.1f}\t{}'.format(
expect,
count,
chi,
pretty)
return LOG('Fail', casename, comment)
def generate_model(feature_count, feature_type, hyper_prior=None):
module = FEATURE_TYPES[feature_type]
shared = module.Shared.from_dict(module.EXAMPLES[0]['shared'])
shared.realize()
cross_cat = loom.schema_pb2.CrossCat()
kind = cross_cat.kinds.add()
CLUSTERING.protobuf_dump(kind.product_model.clustering)
features = getattr(kind.product_model, feature_type)
for featureid in xrange(feature_count):
shared.protobuf_dump(features.add())
kind.featureids.append(featureid)
CLUSTERING.protobuf_dump(cross_cat.topology)
# FIXME(jglidden) this belongs in a separate function
fixed_models = []
if hyper_prior is not None:
hp_name, grid_in = hyper_prior
if hp_name == 'topology':
get_grid_out = lambda model: model.hyper_prior.topology
extend = lambda grid_out, point: PitmanYor.to_protobuf(
point,
grid_out.add())
elif hp_name == 'clustering':
get_grid_out = lambda model: model.hyper_prior.clustering
extend = lambda grid_out, point: PitmanYor.to_protobuf(
point,
grid_out.add())
else:
param_name, grid_in = grid_in
get_grid_out = lambda model: getattr(
getattr(model.hyper_prior, hp_name),
param_name)
extend = lambda grid_out, point: grid_out.extend([point])
cross_cat_base = loom.schema_pb2.CrossCat()
cross_cat_base.MergeFrom(cross_cat)
for point in grid_in:
extend(get_grid_out(cross_cat), point)
if hp_name == 'dd':
pass
else:
fixed_model = loom.schema_pb2.CrossCat()
fixed_model.MergeFrom(cross_cat_base)
extend(get_grid_out(fixed_model), point)
fixed_models.append(fixed_model)
if hp_name == 'dd':
assert feature_count == 1
dim = len(shared.dump()['alphas'])
if dim > 4:
raise SkipTest('FIXME test runs out of memory')
for grid in product(*[grid_in] * dim):
fixed_model = loom.schema_pb2.CrossCat()
fixed_model.MergeFrom(cross_cat_base)
alphas = fixed_model.kinds[0].product_model.dd[0].alphas
assert len(alphas) == len(grid)
for i, alpha in enumerate(grid):
alphas[i] = alpha
fixed_models.append(fixed_model)
return cross_cat, fixed_models
def test_generate_model():
for feature_type in FEATURE_TYPES:
generate_model(10, feature_type)
def dump_model(model, model_name):
with open(model_name, 'wb') as f:
f.write(model.SerializeToString())
def generate_rows(object_count, feature_count, feature_type, density):
assert object_count > 0, object_count
assert feature_count > 0, feature_count
assert 0 <= density and density <= 1, density
# generate structure
feature_assignments = CLUSTERING.sample_assignments(feature_count)
kind_count = len(set(feature_assignments))
object_assignments = [
CLUSTERING.sample_assignments(object_count)
for _ in xrange(kind_count)
]
group_counts = [
len(set(assignments))
for assignments in object_assignments
]
# generate data
module = FEATURE_TYPES[feature_type]
shared = module.Shared.from_dict(module.EXAMPLES[0]['shared'])
def sampler_create():
group = module.Group()
group.init(shared)
sampler = module.Sampler()
sampler.init(shared, group)
return sampler
table = [[None] * feature_count for _ in xrange(object_count)]
for f, k in enumerate(feature_assignments):
samplers = [sampler_create() for _ in xrange(group_counts[k])]
for i, g in enumerate(object_assignments[k]):
if numpy.random.uniform() < density:
table[i][f] = samplers[g].eval(shared)
return table
def test_generate_rows():
for feature_type in FEATURE_TYPES:
table = generate_rows(100, 100, feature_type, 1.0)
assert_true(all(cell is not None for row in table for cell in row))
table = generate_rows(100, 100, feature_type, 0.0)
assert_true(all(cell is None for row in table for cell in row))
table = generate_rows(100, 100, feature_type, 0.5)
assert_true(any(cell is None for row in table for cell in row))
assert_true(any(cell is not None for row in table for cell in row))
def serialize_rows(table):
NONE = loom.schema_pb2.ProductValue.Observed.NONE
DENSE = loom.schema_pb2.ProductValue.Observed.DENSE
message = loom.schema_pb2.Row()
for i, values in enumerate(table):
message.Clear()
message.id = i
message.diff.neg.observed.sparsity = NONE
data = message.diff.pos
data.observed.sparsity = DENSE
for value in values:
data.observed.dense.append(value is not None)
if value is None:
pass
elif isinstance(value, bool):
data.booleans.append(value)
elif isinstance(value, int):
data.counts.append(value)
elif isinstance(value, float):
data.reals.append(value)
else:
raise ValueError('unknown value type: {}'.format(value))
yield message.SerializeToString()
def dump_rows(table, rows_name):
protobuf_stream_dump(serialize_rows(table), rows_name)
def test_dump_rows():
for feature_type in FEATURE_TYPES:
table = generate_rows(10, 10, feature_type, 0.5)
with tempdir():
rows_name = os.path.abspath('rows.pbs')
dump_rows(table, rows_name)
message = loom.schema_pb2.Row()
for string in protobuf_stream_load(rows_name):
message.ParseFromString(string)
# print message
def run_posterior_enum(casename, dataset, results, debug, sparsify=True):
if not sparsify:
loom.runner.posterior_enum(
config_in=dataset['config'],
rows_in=dataset['rows'],
model_in=dataset['model'],
samples_out=results['samples'],
debug=debug)
else:
loom.format.make_schema(
model_in=dataset['model'],
schema_out=results['schema'])
loom.format.make_schema_row(
schema_in=results['schema'],
schema_row_out=results['schema_row'])
loom.runner.tare(
schema_row_in=results['schema_row'],
rows_in=dataset['rows'],
tares_out=results['tares'],
debug=debug)
tare_count = sum(1 for _ in protobuf_stream_load(results['tares']))
if casename is not None and tare_count:
LOG('Info', casename, 'found {} tare rows'.format(tare_count))
loom.runner.sparsify(
schema_row_in=results['schema_row'],
tares_in=results['tares'],
rows_in=dataset['rows'],
rows_out=results['diffs'],
debug=debug)
loom.runner.posterior_enum(
config_in=dataset['config'],
rows_in=results['diffs'],
tares_in=results['tares'],
model_in=dataset['model'],
samples_out=results['samples'],
debug=debug)
def load_samples(filename):
message = loom.schema_pb2.PosteriorEnum.Sample()
for string in protobuf_stream_load(filename):
message.ParseFromString(string)
sample = parse_sample(message)
score = float(message.score)
yield sample, score
def generate_samples(casename, dataset, debug):
root = os.getcwd()
with tempdir(cleanup_on_error=(not debug)):
results = {
'schema': os.path.abspath('schema.json'),
'schema_row': os.path.abspath('schema_row.pb'),
'tares': os.path.abspath('tares.pbs'),
'diffs': os.path.abspath('diffs.pbs'),
'samples': os.path.abspath('samples.pbs.gz'),
}
os.chdir(root)
run_posterior_enum(casename, dataset, results, debug)
for sample in load_samples(results['samples']):
yield sample
def parse_sample(message):
return frozenset(
(
frozenset(kind.featureids),
frozenset(frozenset(group.rowids) for group in kind.groups)
)
for kind in message.kinds
)
def pretty_kind(kind):
featureids, groups = kind
return '{} |{}|'.format(
' '.join(imap(str, sorted(featureids))),
'|'.join(sorted(
' '.join(imap(str, sorted(group)))
for group in groups
))
)
def pretty_latent(latent):
return ' - '.join(sorted(pretty_kind(kind) for kind in latent))
# ----------------------------------------------------------------------------
# dataset suggestions
def enum_partitions(count):
if count == 0:
yield ()
elif count == 1:
yield ((1,),)
else:
for p in enum_partitions(count - 1):
yield p + ((count,),)
for i, part in enumerate(p):
yield p[:i] + (part + (count,),) + p[1 + i:]
BELL_NUMBERS = [
1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147, 115975, 678570, 4213597,
27644437, 190899322, 1382958545, 10480142147, 82864869804, 682076806159,
]
def test_enum_partitions():
for i, bell_number in enumerate(BELL_NUMBERS):
if bell_number < 1e6:
count = sum(1 for _ in enum_partitions(i))
assert_equal(count, bell_number)
def count_crosscats(rows, cols):
return sum(
BELL_NUMBERS[rows] ** len(kinds)
for kinds in enum_partitions(cols))
@parsable.command
def datasets(max_count=1000000):
'''
Suggest datasets based on bounded latent space size.
'''
enum_partitions
max_rows = 16
max_cols = 12
print '# Cross Cat Latent Space Sizes up to {}, generated by:'
print '# python {} datasets {}'.format(
os.path.basename(__file__),
max_count)
print 'LATENT_SIZES = ['
for rows in range(1 + max_rows):
counts = []
for cols in range(1 + max_cols):
count = count_crosscats(rows, cols)
if count > max_count:
break
counts.append(count)
if len(counts) > 1:
print ' [{}],'.format(', '.join(map(str, counts)))
print ']'
def test_datasets():
datasets(1000)
if __name__ == '__main__':
parsable.dispatch()
| bsd-3-clause |
TeamHG-Memex/frontera | examples/scripts/09_frontier_backends.py | 8 | 1052 | """
Test different frontier backends
"""
from frontera import FrontierManager, Settings, FrontierTester, graphs
def test_logic(backend):
# Graph
graph = graphs.Manager('sqlite:///data/graph.db')
# Frontier
settings = Settings()
settings.BACKEND = backend
settings.LOGGING_MANAGER_ENABLED = True
settings.LOGGING_BACKEND_ENABLED = True
settings.LOGGING_DEBUGGING_ENABLED = False
settings.TEST_MODE = True
frontier = FrontierManager.from_settings(settings)
# Tester
tester = FrontierTester(frontier, graph)
tester.run(add_all_pages=True)
# Show crawling sequence
print '-'*80
print frontier.backend.name
print '-'*80
for page in tester.sequence:
print page.url
if __name__ == '__main__':
test_logic('frontera.contrib.backends.memory.FIFO')
test_logic('frontera.contrib.backends.memory.LIFO')
test_logic('frontera.contrib.backends.memory.BFS')
test_logic('frontera.contrib.backends.memory.DFS')
test_logic('frontera.contrib.backends.memory.RANDOM')
| bsd-3-clause |
bheesham/servo | tests/wpt/web-platform-tests/tools/html5lib/utils/entities.py | 438 | 2734 | import json
import html5lib
def parse(path="html5ents.xml"):
return html5lib.parse(open(path), treebuilder="lxml")
def entity_table(tree):
return dict((entity_name("".join(tr[0].xpath(".//text()"))),
entity_characters(tr[1].text))
for tr in tree.xpath("//h:tbody/h:tr",
namespaces={"h":"http://www.w3.org/1999/xhtml"}))
def entity_name(inp):
return inp.strip()
def entity_characters(inp):
return "".join(codepoint_to_character(item)
for item in inp.split()
if item)
def codepoint_to_character(inp):
return ("\U000"+inp[2:]).decode("unicode-escape")
def make_tests_json(entities):
test_list = make_test_list(entities)
tests_json = {"tests":
[make_test(*item) for item in test_list]
}
return tests_json
def make_test(name, characters, good):
return {
"description":test_description(name, good),
"input":"&%s"%name,
"output":test_expected(name, characters, good)
}
def test_description(name, good):
with_semicolon = name.endswith(";")
semicolon_text = {True:"with a semi-colon",
False:"without a semi-colon"}[with_semicolon]
if good:
text = "Named entity: %s %s"%(name, semicolon_text)
else:
text = "Bad named entity: %s %s"%(name, semicolon_text)
return text
def test_expected(name, characters, good):
rv = []
if not good or not name.endswith(";"):
rv.append("ParseError")
rv.append(["Character", characters])
return rv
def make_test_list(entities):
tests = []
for entity_name, characters in entities.items():
if entity_name.endswith(";") and not subentity_exists(entity_name, entities):
tests.append((entity_name[:-1], "&" + entity_name[:-1], False))
tests.append((entity_name, characters, True))
return sorted(tests)
def subentity_exists(entity_name, entities):
for i in range(1, len(entity_name)):
if entity_name[:-i] in entities:
return True
return False
def make_entities_code(entities):
entities_text = "\n".join(" \"%s\": u\"%s\","%(
name, entities[name].encode(
"unicode-escape").replace("\"", "\\\""))
for name in sorted(entities.keys()))
return """entities = {
%s
}"""%entities_text
def main():
entities = entity_table(parse())
tests_json = make_tests_json(entities)
json.dump(tests_json, open("namedEntities.test", "w"), indent=4)
code = make_entities_code(entities)
open("entities_constants.py", "w").write(code)
if __name__ == "__main__":
main()
| mpl-2.0 |
yipenggao/moose | python/MooseDocs/extensions/admonition.py | 4 | 3946 | ##pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import re
from markdown.blockprocessors import BlockProcessor
from MooseMarkdownExtension import MooseMarkdownExtension
from MooseMarkdownCommon import MooseMarkdownCommon
class AdmonitionExtension(MooseMarkdownExtension):
"""
Extension for creating admontion (e.g, warning, errors, info, etc.).
"""
@staticmethod
def defaultConfig():
"""
Default configuration options for SQAExtension
"""
config = MooseMarkdownExtension.defaultConfig()
return config
def extendMarkdown(self, md, md_globals):
"""
Adds components to AdmonitionExtension.
"""
md.registerExtension(self)
config = self.getConfigs()
md.parser.blockprocessors.add('moose_admonition',
AdmonitionBlock(markdown_instance=md, **config),
'_begin')
def makeExtension(*args, **kwargs): #pylint: disable=invalid-name
"""
Create SQAExtension
"""
return AdmonitionExtension(*args, **kwargs)
class AdmonitionBlock(MooseMarkdownCommon, BlockProcessor):
"""
Adds an admonition functionality using syntax similar to other MOOSE syntax.
"""
RE = re.compile(r'!admonition\s+'
r'(?P<command>info|note|important|warning|danger|error)\s*' # commands
r'(?P<title>[^\n]*?)' # optional title (any non newline)
r'(?P<settings>\w+=.*?)?' # optional settings
r'\n(?P<message>.*?)(?:\Z|\n{2,})', # message
flags=re.DOTALL|re.MULTILINE)
@staticmethod
def defaultSettings():
"""Settings for AdmonitionBlock"""
settings = MooseMarkdownCommon.defaultSettings()
return settings
def __init__(self, markdown_instance=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
BlockProcessor.__init__(self, markdown_instance.parser)
self.markdown = markdown_instance
def test(self, parent, block):
"""
Check that block contains the defined RE.
"""
return self.RE.search(block)
def run(self, parent, blocks):
"""
Create the collapsible region with the listed requirements.
"""
block = blocks.pop(0)
match = self.RE.search(block)
command = match.group('command')
title = match.group('title').strip()
message = match.group('message').strip()
self.createAdmonition(command, message, title=title, parent=parent)
| lgpl-2.1 |
chentao/thrift | lib/py/src/transport/THttpClient.py | 51 | 4464 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import BytesIO
import os
import socket
import sys
import warnings
from six.moves import urllib
from six.moves import http_client
from .TTransport import *
import six
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http://host:port/path') syntax",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http_client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http_client.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
self.__custom_headers = None
def open(self):
if self.scheme == 'http':
self.__http = http_client.HTTPConnection(self.host, self.port)
else:
self.__http = http_client.HTTPSConnection(self.host, self.port)
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if not hasattr(socket, 'getdefaulttimeout'):
raise NotImplementedError
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http_response.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def __withTimeout(f):
def _f(*args, **kwargs):
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(args[0].__timeout)
try:
result = f(*args, **kwargs)
finally:
socket.setdefaulttimeout(orig_timeout)
return result
return _f
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
# HTTP request
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in six.iteritems(self.__custom_headers):
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
# Decorate if we know how to timeout
if hasattr(socket, 'getdefaulttimeout'):
flush = __withTimeout(flush)
| apache-2.0 |
anurag03/integration_tests | cfme/storage/object_store_object.py | 1 | 6474 | # -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToSibling, NavigateToAttribute
from widgetastic.widget import View, Text, NoSuchElementException
from widgetastic_patternfly import BreadCrumb, Button, Dropdown
from cfme.base.ui import BaseLoggedInPage
from cfme.common import TagPageView, Taggable
from cfme.exceptions import ItemNotFound
from cfme.modeling.base import BaseCollection, BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from cfme.utils.providers import get_crud_by_name
from widgetastic_manageiq import (
Accordion, BaseEntitiesView, ItemsToolBarViewSelector, ManageIQTree, SummaryTable, Search)
class ObjectStoreObjectToolbar(View):
"""The toolbar on the Object Store Object page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
download = Dropdown('Download')
view_selector = View.nested(ItemsToolBarViewSelector)
class ObjectStoreObjectDetailsToolbar(View):
"""The toolbar on the Object Store Object detail page"""
policy = Dropdown('Policy')
download = Button(title='Download summary in PDF format')
class ObjectStoreObjectDetailsEntities(View):
"""The entities on the Object Store Object detail page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
class ObjectStoreObjectDetailsSidebar(View):
"""The sidebar on the Object Store Object details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class ObjectStoreObjectView(BaseLoggedInPage):
"""A base view for all the Object Store Object pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
@property
def in_object(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Storage', 'Object Storage',
'Object Store Objects'])
class ObjectStoreObjectAllView(ObjectStoreObjectView):
"""The all Object Store Object page"""
toolbar = View.nested(ObjectStoreObjectToolbar)
search = View.nested(Search)
including_entities = View.include(BaseEntitiesView, use_parent=True)
@property
def is_displayed(self):
return (
self.in_object and
self.title.text == 'Cloud Object Store Objects')
class ObjectStoreObjectDetailsView(ObjectStoreObjectView):
"""The detail Object Store Object page"""
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].key)
return (
self.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(ObjectStoreObjectDetailsToolbar)
sidebar = View.nested(ObjectStoreObjectDetailsSidebar)
entities = View.nested(ObjectStoreObjectDetailsEntities)
@attr.s
class ObjectStoreObject(BaseEntity, Taggable):
""" Model of an Storage Object Store Object in cfme
Args:
key: key of the object.
provider: provider
"""
key = attr.ib()
provider = attr.ib()
@attr.s
class ObjectStoreObjectCollection(BaseCollection):
"""Collection object for the :py:class:'cfme.storage.object_store_object.ObjStoreObject' """
ENTITY = ObjectStoreObject
def all(self):
"""returning all Object Store Objects"""
view = navigate_to(self, 'All')
view.entities.paginator.set_items_per_page(500)
objects = []
try:
if 'provider'in self.filters:
for item in view.entities.elements.read():
if self.filters['provider'].name in item['Cloud Provider']:
objects.append(self.instantiate(key=item['Key'],
provider=self.filters['provider']))
else:
for item in view.entities.elements.read():
provider_name = item['Cloud Provider'].split()[0]
provider = get_crud_by_name(provider_name)
objects.append(self.instantiate(key=item['Key'], provider=provider))
return objects
except NoSuchElementException:
return None
def delete(self, *objects):
# TODO: capture flash message after BZ 1497113 resolve.
view = navigate_to(self, 'All')
for obj in objects:
try:
row = view.entities.paginator.find_row_on_pages(
view.entities.elements, key=obj.key)
row[0].check()
except NoSuchElementException:
raise ItemNotFound('Could not locate object {}'.format(obj.key))
view.toolbar.configuration.item_select('Remove Object Storage Objects',
handle_alert=True)
@navigator.register(ObjectStoreObjectCollection, 'All')
class ObjectStoreObjectAll(CFMENavigateStep):
VIEW = ObjectStoreObjectAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select(
'Storage', 'Object Storage', 'Object Store Objects')
def resetter(self):
self.view.toolbar.view_selector.select("List View")
@navigator.register(ObjectStoreObject, 'Details')
class ObjectStoreObjectDetails(CFMENavigateStep):
VIEW = ObjectStoreObjectDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
# ToDo: use get_entity method as JS API issue (#2898) resolve.
row = self.prerequisite_view.entities.paginator.find_row_on_pages(
self.prerequisite_view.entities.elements, key=self.obj.key)
row[1].click()
except NoSuchElementException:
raise ItemNotFound('Could not locate object {}'.format(self.obj.key))
@navigator.register(ObjectStoreObject, 'EditTagsFromDetails')
class ObjectStoreObjectDetailEditTag(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
| gpl-2.0 |
eroicaleo/LearningPython | interview/leet/394_Decode_String.py | 1 | 2187 | #!/usr/bin/env python3
import re
# s = "3[a]2[bc]", return "aaabcbc".
# s = "3[a2[c]]", return "accaccacc".
# s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
class Solution:
def decodeString(self, s):
stack, n, t = [], 0, ''
for c in s:
if c.isdigit():
n = 10*n + int(c)
if t:
stack, t = stack + [t], ''
elif c == '[':
stack.append(n)
n, t = 0, ''
elif c.isalpha():
t += c
elif c == ']':
t = stack.pop() * t
if stack and isinstance(stack[-1], str):
t = stack.pop() + t
print(f'c = {c}, t = {t}, stack = {stack}')
return t
def decodeString_stefan(self, s):
while '[' in s:
s = re.sub(r'(\d+)\[([a-z]+)\]', lambda m: int(m.group(1)) * m.group(2), s)
print(s)
return s
def decodeString_recursive(self, s):
self.i, l = 0, len(s)
def helper():
n, t = 0, ''
while self.i < l:
c, self.i = s[self.i], self.i+1
if c.isdigit():
n = 10*n+int(c)
elif c.isalpha():
t += c
elif c == '[':
t += n*helper()
n = 0
elif c == ']':
break
print(f'I am returning {t}')
return t
return helper()
def decodeString_iter2(self, s):
stack = []
n, t = 0, ''
for c in s:
if c.isdigit():
n = 10*n+int(c)
elif c.isalpha():
t += c
elif c == '[':
stack += [n, t]
n, t = 0, ''
elif c == ']':
t, n = stack.pop()+stack.pop()*t, 0
return t
s = "2[abc]3[cd]ef"
s = "3[3[a]3[b]]"
s = "3[a]2[bc]"
s = "3[a2[c]]"
sol = Solution()
print(sol.decodeString(s))
print('Solution 2')
print(sol.decodeString_stefan(s))
print('Solution 3')
print(sol.decodeString_recursive(s))
print('Solution 4')
print(sol.decodeString_iter2(s))
| mit |
mastizada/kuma | vendor/packages/nose/nose/plugins/testid.py | 29 | 9641 | """
This plugin adds a test id (like #1) to each test name output. After
you've run once to generate test ids, you can re-run individual
tests by activating the plugin and passing the ids (with or
without the # prefix) instead of test names.
For example, if your normal test run looks like::
% nosetests -v
tests.test_a ... ok
tests.test_b ... ok
tests.test_c ... ok
When adding ``--with-id`` you'll see::
% nosetests -v --with-id
#1 tests.test_a ... ok
#2 tests.test_b ... ok
#2 tests.test_c ... ok
Then you can re-run individual tests by supplying just an id number::
% nosetests -v --with-id 2
#2 tests.test_b ... ok
You can also pass multiple id numbers::
% nosetests -v --with-id 2 3
#2 tests.test_b ... ok
#3 tests.test_c ... ok
Since most shells consider '#' a special character, you can leave it out when
specifying a test id.
Note that when run without the -v switch, no special output is displayed, but
the ids file is still written.
Looping over failed tests
-------------------------
This plugin also adds a mode that will direct the test runner to record
failed tests. Subsequent test runs will then run only the tests that failed
last time. Activate this mode with the ``--failed`` switch::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
#4 test.test_d ... ok
On the second run, only tests #2 and #3 will run::
% nosetests -v --failed
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
As you correct errors and tests pass, they'll drop out of subsequent runs.
First::
% nosetests -v --failed
#2 test.test_b ... ok
#3 test.test_c ... FAILED
Second::
% nosetests -v --failed
#3 test.test_c ... FAILED
When all tests pass, the full set will run on the next invocation.
First::
% nosetests -v --failed
#3 test.test_c ... ok
Second::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ok
#3 test.test_c ... ok
#4 test.test_d ... ok
.. note ::
If you expect to use ``--failed`` regularly, it's a good idea to always run
run using the ``--with-id`` option. This will ensure that an id file is
always created, allowing you to add ``--failed`` to the command line as soon
as you have failing tests. Otherwise, your first run using ``--failed`` will
(perhaps surprisingly) run *all* tests, because there won't be an id file
containing the record of failed tests from your previous run.
"""
__test__ = False
import logging
import os
from nose.plugins import Plugin
from nose.util import src, set
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
log = logging.getLogger(__name__)
class TestId(Plugin):
"""
Activate to add a test id (like #1) to each test name output. Activate
with --failed to rerun failing tests only.
"""
name = 'id'
idfile = None
collecting = True
loopOnFailed = False
def options(self, parser, env):
"""Register commandline options.
"""
Plugin.options(self, parser, env)
parser.add_option('--id-file', action='store', dest='testIdFile',
default='.noseids', metavar="FILE",
help="Store test ids found in test runs in this "
"file. Default is the file .noseids in the "
"working directory.")
parser.add_option('--failed', action='store_true',
dest='failed', default=False,
help="Run the tests that failed in the last "
"test run.")
def configure(self, options, conf):
"""Configure plugin.
"""
Plugin.configure(self, options, conf)
if options.failed:
self.enabled = True
self.loopOnFailed = True
log.debug("Looping on failed tests")
self.idfile = os.path.expanduser(options.testIdFile)
if not os.path.isabs(self.idfile):
self.idfile = os.path.join(conf.workingDir, self.idfile)
self.id = 1
# Ids and tests are mirror images: ids are {id: test address} and
# tests are {test address: id}
self.ids = {}
self.tests = {}
self.failed = []
self.source_names = []
# used to track ids seen when tests is filled from
# loaded ids file
self._seen = {}
self._write_hashes = conf.verbosity >= 2
def finalize(self, result):
"""Save new ids file, if needed.
"""
if result.wasSuccessful():
self.failed = []
if self.collecting:
ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
else:
ids = self.ids
fh = open(self.idfile, 'wb')
dump({'ids': ids,
'failed': self.failed,
'source_names': self.source_names}, fh)
fh.close()
log.debug('Saved test ids: %s, failed %s to %s',
ids, self.failed, self.idfile)
def loadTestsFromNames(self, names, module=None):
"""Translate ids in the list of requested names into their
test addresses, if they are found in my dict of tests.
"""
log.debug('ltfn %s %s', names, module)
try:
fh = open(self.idfile, 'rb')
data = load(fh)
if 'ids' in data:
self.ids = data['ids']
self.failed = data['failed']
self.source_names = data['source_names']
else:
# old ids field
self.ids = data
self.failed = []
self.source_names = names
if self.ids:
self.id = max(self.ids) + 1
self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
else:
self.id = 1
log.debug(
'Loaded test ids %s tests %s failed %s sources %s from %s',
self.ids, self.tests, self.failed, self.source_names,
self.idfile)
fh.close()
except IOError:
log.debug('IO error reading %s', self.idfile)
if self.loopOnFailed and self.failed:
self.collecting = False
names = self.failed
self.failed = []
# I don't load any tests myself, only translate names like '#2'
# into the associated test addresses
translated = []
new_source = []
really_new = []
for name in names:
trans = self.tr(name)
if trans != name:
translated.append(trans)
else:
new_source.append(name)
# names that are not ids and that are not in the current
# list of source names go into the list for next time
if new_source:
new_set = set(new_source)
old_set = set(self.source_names)
log.debug("old: %s new: %s", old_set, new_set)
really_new = [s for s in new_source
if not s in old_set]
if really_new:
# remember new sources
self.source_names.extend(really_new)
if not translated:
# new set of source names, no translations
# means "run the requested tests"
names = new_source
else:
# no new names to translate and add to id set
self.collecting = False
log.debug("translated: %s new sources %s names %s",
translated, really_new, names)
return (None, translated + really_new or names)
def makeName(self, addr):
log.debug("Make name %s", addr)
filename, module, call = addr
if filename is not None:
head = src(filename)
else:
head = module
if call is not None:
return "%s:%s" % (head, call)
return head
def setOutputStream(self, stream):
"""Get handle on output stream so the plugin can print id #s
"""
self.stream = stream
def startTest(self, test):
"""Maybe output an id # before the test name.
Example output::
#1 test.test ... ok
#2 test.test_two ... ok
"""
adr = test.address()
log.debug('start test %s (%s)', adr, adr in self.tests)
if adr in self.tests:
if adr in self._seen:
self.write(' ')
else:
self.write('#%s ' % self.tests[adr])
self._seen[adr] = 1
return
self.tests[adr] = self.id
self.write('#%s ' % self.id)
self.id += 1
def afterTest(self, test):
# None means test never ran, False means failed/err
if test.passed is False:
try:
key = str(self.tests[test.address()])
except KeyError:
# never saw this test -- startTest didn't run
pass
else:
if key not in self.failed:
self.failed.append(key)
def tr(self, name):
log.debug("tr '%s'", name)
try:
key = int(name.replace('#', ''))
except ValueError:
return name
log.debug("Got key %s", key)
# I'm running tests mapped from the ids file,
# not collecting new ones
if key in self.ids:
return self.makeName(self.ids[key])
return name
def write(self, output):
if self._write_hashes:
self.stream.write(output)
| mpl-2.0 |
rabipanda/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/cauchy_test.py | 33 | 16857 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.test_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.test_session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.test_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.test_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.test_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.test_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.test_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.test_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.test_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.test_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
| apache-2.0 |
pwhelan/djshouts | django/db/backends/postgresql_psycopg2/base.py | 239 | 8346 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.safestring import SafeUnicode, SafeString
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeString, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class CursorWrapper(object):
"""
A thin wrapper around psycopg2's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = False
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
self._set_isolation_level(int(not autocommit))
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_connection = False
set_tz = False
settings_dict = self.settings_dict
if self.connection is None:
new_connection = True
set_tz = settings_dict.get('TIME_ZONE')
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = settings_dict['PASSWORD']
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
self.connection = Database.connect(**conn_params)
self.connection.set_client_encoding('UTF8')
self.connection.set_isolation_level(self.isolation_level)
connection_created.send(sender=self.__class__, connection=self)
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if new_connection:
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings_dict['TIME_ZONE']])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version[0:2] < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
if self.features.uses_autocommit:
if self._version[0:2] < (8, 2):
# FIXME: Needs extra code to do reliable model insert
# handling, so we forbid it for now.
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You cannot use autocommit=True with PostgreSQL prior to 8.2 at the moment.")
else:
# FIXME: Eventually we're enable this by default for
# versions that support it, but, right now, that's hard to
# do without breaking other things (#10509).
self.features.can_return_id_from_insert = True
return CursorWrapper(cursor)
def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1)
def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0)
def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
| bsd-3-clause |
legrosbuffle/or-tools | examples/python/coins3.py | 7 | 2902 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coin application in Google CP Solver.
From 'Constraint Logic Programming using ECLiPSe'
pages 99f and 234 ff.
The solution in ECLiPSe is at page 236.
'''
What is the minimum number of coins that allows one to pay _exactly_
any amount smaller than one Euro? Recall that there are six different
euro cents, of denomination 1, 2, 5, 10, 20, 50
'''
Compare with the following models:
* MiniZinc: http://hakank.org/minizinc/coins3.mzn
* Comet : http://www.hakank.org/comet/coins3.co
* Gecode : http://hakank.org/gecode/coins3.cpp
* SICStus : http://hakank.org/sicstus/coins3.pl
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver("Coins")
#
# data
#
n = 6 # number of different coins
variables = [1, 2, 5, 10, 25, 50]
# declare variables
x = [solver.IntVar(0, 99, "x%i" % i) for i in range(n)]
num_coins = solver.IntVar(0, 99, "num_coins")
#
# constraints
#
# number of used coins, to be minimized
solver.Add(num_coins == solver.Sum(x))
# Check that all changes from 1 to 99 can be made.
for j in range(1, 100):
tmp = [solver.IntVar(0, 99, "b%i" % i) for i in range(n)]
solver.Add(solver.ScalProd(tmp, variables) == j)
[solver.Add(tmp[i] <= x[i]) for i in range(n)]
# objective
objective = solver.Minimize(num_coins, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(num_coins)
solution.AddObjective(num_coins)
db = solver.Phase(x,
solver.CHOOSE_MIN_SIZE_LOWEST_MAX,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print("x: ", [x[i].Value() for i in range(n)])
print("num_coins:", num_coins.Value())
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
if __name__ == "__main__":
main()
| apache-2.0 |
xujb/odoo | addons/account_payment/account_move_line.py | 241 | 4455 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rahushen/ansible | lib/ansible/utils/module_docs_fragments/k8s_state_options.py | 80 | 1411 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Options for specifying object state
class ModuleDocFragment(object):
DOCUMENTATION = '''
options:
state:
description:
- Determines if an object should be created, patched, or deleted. When set to C(present), an object will be
created, if it does not already exist. If set to C(absent), an existing object will be deleted. If set to
C(present), an existing object will be patched, if its attributes differ from those specified using
I(resource_definition) or I(src).
default: present
choices:
- present
- absent
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will be replaced.
default: false
type: bool
'''
| gpl-3.0 |
soxofaan/luigi | test/simulate_test.py | 13 | 2971 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
from luigi.contrib.simulate import RunAnywayTarget
from multiprocessing import Process
import os
import tempfile
def temp_dir():
return os.path.join(tempfile.gettempdir(), 'luigi-simulate')
def is_writable():
d = temp_dir()
fn = os.path.join(d, 'luigi-simulate-write-test')
exists = True
try:
try:
os.makedirs(d)
except OSError:
pass
open(fn, 'w').close()
os.remove(fn)
except BaseException:
exists = False
return unittest.skipIf(not exists, 'Can\'t write to temporary directory')
class TaskA(luigi.Task):
i = luigi.IntParameter(default=0)
def output(self):
return RunAnywayTarget(self)
def run(self):
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
try:
os.makedirs(os.path.dirname(fn))
except OSError:
pass
with open(fn, 'a') as f:
f.write('{0}={1}\n'.format(self.__class__.__name__, self.i))
self.output().done()
class TaskB(TaskA):
def requires(self):
return TaskA(i=10)
class TaskC(TaskA):
def requires(self):
return TaskA(i=5)
class TaskD(TaskA):
def requires(self):
return [TaskB(), TaskC(), TaskA(i=20)]
class TaskWrap(luigi.WrapperTask):
def requires(self):
return [TaskA(), TaskD()]
def reset():
# Force tasks to be executed again (because multiple pipelines are executed inside of the same process)
t = TaskA().output()
with t.unique.get_lock():
t.unique.value = 0
class RunAnywayTargetTest(unittest.TestCase):
@is_writable()
def test_output(self):
reset()
fn = os.path.join(temp_dir(), 'luigi-simulate-test.tmp')
luigi.build([TaskWrap()], local_scheduler=True)
with open(fn, 'r') as f:
data = f.read().strip().split('\n')
data.sort()
reference = ['TaskA=0', 'TaskA=10', 'TaskA=20', 'TaskA=5', 'TaskB=0', 'TaskC=0', 'TaskD=0']
reference.sort()
os.remove(fn)
self.assertEqual(data, reference)
@is_writable()
def test_output_again(self):
# Running the test in another process because the PID is used to determine if the target exists
p = Process(target=self.test_output)
p.start()
p.join()
| apache-2.0 |
aviaryan/pythons | TheHyliaSoundtrack/hylia_s.py | 1 | 1588 | from bs4 import BeautifulSoup
from urllib.request import urlopen
from platform import subprocess
# if genlist = 0, then this script downloads the files, the cmd_downloader variable comes into play
# if genlist = 1, then this script generates a list.txt file containing direct links to music files in the working directory
# the list.txt can be imported in any download manager like IDM , FDM etc to download all files at once with full speed
genlist = 1
cmd_downloader = 'aria2c -x 8 -s 8 -k 3M'
# example of url : http://anime.thehylia.com/soundtracks/album/death-note-original-soundtrack
def run():
url = input('url of soundtrack album \n> ')
response = urlopen(url)
data = response.read()
soup = BeautifulSoup(data, 'lxml') # HTML.parser fails, smart technique hylia
# open('list.html', 'w').write(data.decode())
getsongs( soup.body.find_all('a') )
def getsongs( tds ):
downlist = ''
cur = 1
for i in tds:
link = i['href']
if not ismp3(link):
continue
# download song
response = urlopen(link)
songdata = response.read()
songsoup = BeautifulSoup(songdata, 'lxml')
links = songsoup.body.find_all('a')
for dlink in links:
if not ismp3(dlink['href']):
continue
print('Downloading song #' + str(cur))
if genlist:
downlist += dlink['href'] + '\n'
else:
subprocess.call(cmd_downloader + ' ' + dlink['href'])
break # ehh
cur += 1
if genlist:
open('list.txt', 'w').write(downlist)
def ismp3(link):
if len(link) < 5:
return False
if link[-4:] != '.mp3':
return False
return True
if __name__ == '__main__':
run() | apache-2.0 |
chipx86/reviewboard | reviewboard/diffviewer/tests/test_forms.py | 2 | 35354 | from __future__ import unicode_literals
import base64
import json
import nose
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.client import RequestFactory
from django.utils import six
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.filesystem import is_exe_in_path
from kgb import SpyAgency
from reviewboard.diffviewer.diffutils import (get_original_file,
get_patched_file,
patch)
from reviewboard.diffviewer.errors import (DiffParserError, DiffTooBigError,
EmptyDiffError)
from reviewboard.diffviewer.forms import (UploadCommitForm, UploadDiffForm,
ValidateCommitForm)
from reviewboard.diffviewer.models import DiffSet, DiffSetHistory
from reviewboard.scmtools.errors import FileNotFoundError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.testing import TestCase
class UploadCommitFormTests(SpyAgency, TestCase):
"""Unit tests for UploadCommitForm."""
fixtures = ['test_scmtools']
_default_form_data = {
'base_commit_id': '1234',
'basedir': '/',
'commit_id': 'r1',
'parent_id': 'r0',
'commit_message': 'Message',
'author_name': 'Author',
'author_email': '[email protected]',
'author_date': '1970-01-01 00:00:00+0000',
'committer_name': 'Committer',
'committer_email': '[email protected]',
'committer_date': '1970-01-01 00:00:00+0000',
}
def setUp(self):
super(UploadCommitFormTests, self).setUp()
self.repository = self.create_repository(tool_name='Git')
self.spy_on(self.repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
self.diffset = DiffSet.objects.create_empty(repository=self.repository)
def test_create(self):
"""Testing UploadCommitForm.create"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
commit = form.create()
self.assertEqual(self.diffset.files.count(), 1)
self.assertEqual(self.diffset.commits.count(), 1)
self.assertEqual(commit.files.count(), 1)
self.assertEqual(set(self.diffset.files.all()),
set(commit.files.all()))
def test_clean_parent_diff_path(self):
"""Testing UploadCommitForm.clean() for a subsequent commit with a
parent diff
"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
parent_diff = SimpleUploadedFile('parent_diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
form.create()
form = UploadCommitForm(
diffset=self.diffset,
data=dict(
self._default_form_data,
**{
'parent_id': 'r1',
'commit_id': 'r2',
}
),
files={
'diff': diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
self.assertNotIn('parent_diff', form.errors)
def test_clean_published_diff(self):
"""Testing UploadCommitForm.clean() for a DiffSet that has already been
published
"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data,
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
form.create()
self.diffset.history = DiffSetHistory.objects.create()
self.diffset.save(update_fields=('history_id',))
form = UploadCommitForm(
diffset=self.diffset,
data=dict(
self._default_form_data,
parent_id='r1',
commit_id='r0',
),
files={
'diff_path': SimpleUploadedFile(
'diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch'),
})
self.assertFalse(form.is_valid())
self.assertNotEqual(form.non_field_errors, [])
def test_clean_author_date(self):
"""Testing UploadCommitForm.clean_author_date"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=dict(self._default_form_data, **{
'author_date': 'Jan 1 1970',
}),
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertIn('author_date', form.errors)
def test_clean_committer_date(self):
"""Testing UploadCommitForm.clean_committer_date"""
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=dict(self._default_form_data, **{
'committer_date': 'Jun 1 1970',
}),
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertIn('committer_date', form.errors)
def test_clean_no_committer(self):
"""Testing UploadCommitForm.clean when no committer_ fields are present
"""
field_names = {
'committer_date',
'committer_email',
'committer_name',
}
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form_data = self._default_form_data.copy()
for field in field_names:
del form_data[field]
form = UploadCommitForm(
diffset=self.diffset,
data=form_data,
files={
'diff': diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
field: ['This field is required.']
for field in field_names
})
def test_clean_commiter_unsupported(self):
"""Testing UploadCommitForm.clean when committer_ fields are present
for a SCMTool that doesn't support them
"""
if not is_exe_in_path('hg'):
raise nose.SkipTest('Hg is not installed')
self.repository.tool = Tool.objects.get(name='Mercurial')
self.repository.save()
diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
form = UploadCommitForm(
diffset=self.diffset,
data=self._default_form_data.copy(),
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
self.assertNotIn('committer_date', form.cleaned_data)
self.assertNotIn('committer_email', form.cleaned_data)
self.assertNotIn('committer_name', form.cleaned_data)
class UploadDiffFormTests(SpyAgency, TestCase):
"""Unit tests for UploadDiffForm."""
fixtures = ['test_scmtools']
def test_create(self):
"""Testing UploadDiffForm.create"""
diff_file = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
'base_commit_id': '1234',
},
files={
'path': diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
self.assertEqual(diffset.basedir, '/')
self.assertEqual(diffset.base_commit_id, '1234')
def test_create_filters_parent_diffs(self):
"""Testing UploadDiffForm.create filters parent diff files"""
saw_file_exists = {}
def get_file_exists(repository, filename, revision, *args, **kwargs):
saw_file_exists[(filename, revision)] = True
return True
parent_diff_1 = (
b'diff --git a/README b/README\n'
b'index d6613f4..5b50865 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah..\n'
b'+blah blah\n'
)
parent_diff_2 = (
b'diff --git a/UNUSED b/UNUSED\n'
b'index 1234567..5b50866 100644\n'
b'--- UNUSED\n'
b'+++ UNUSED\n'
b'@@ -1,1 +1,1 @@\n'
b'-foo\n'
b'+bar\n'
)
parent_diff = parent_diff_1 + parent_diff_2
diff_file = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
parent_diff_file = SimpleUploadedFile('parent_diff', parent_diff,
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists, call_fake=get_file_exists)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff_file,
'parent_diff_path': parent_diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.diff, self.DEFAULT_GIT_FILEDIFF_DATA_DIFF)
self.assertEqual(filediff.parent_diff, parent_diff_1)
self.assertIn(('/README', 'd6613f4'), saw_file_exists)
self.assertNotIn(('/UNUSED', '1234567'), saw_file_exists)
self.assertEqual(len(saw_file_exists), 1)
def test_create_with_parser_get_orig_commit_id(self):
"""Testing UploadDiffForm.create uses correct base revision returned
by DiffParser.get_orig_commit_id
"""
if not is_exe_in_path('hg'):
raise nose.SkipTest('Hg is not installed')
diff = (
b'# Node ID a6fc203fee9091ff9739c9c00cd4a6694e023f48\n'
b'# Parent 7c4735ef51a7c665b5654f1a111ae430ce84ebbd\n'
b'diff --git a/doc/readme b/doc/readme\n'
b'--- a/doc/readme\n'
b'+++ b/doc/readme\n'
b'@@ -1,3 +1,3 @@\n'
b' Hello\n'
b'-\n'
b'+...\n'
b' goodbye\n'
)
parent_diff = (
b'# Node ID 7c4735ef51a7c665b5654f1a111ae430ce84ebbd\n'
b'# Parent 661e5dd3c4938ecbe8f77e2fdfa905d70485f94c\n'
b'diff --git a/doc/newfile b/doc/newfile\n'
b'new file mode 100644\n'
b'--- /dev/null\n'
b'+++ b/doc/newfile\n'
b'@@ -0,0 +1,1 @@\n'
b'+Lorem ipsum\n'
)
diff_file = SimpleUploadedFile('diff', diff,
content_type='text/x-patch')
parent_diff_file = SimpleUploadedFile('parent_diff', parent_diff,
content_type='text/x-patch')
repository = Repository.objects.create(
name='Test HG',
path='scmtools/testdata/hg_repo',
tool=Tool.objects.get(name='Mercurial'))
form = UploadDiffForm(
repository=repository,
files={
'path': diff_file,
'parent_diff_path': parent_diff_file,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_revision,
'661e5dd3c4938ecbe8f77e2fdfa905d70485f94c')
def test_create_with_parent_filediff_with_move_and_no_change(self):
"""Testing UploadDiffForm.create with a parent diff consisting only
of a move/rename without content change
"""
revisions = [
b'93e6b3e8944c48737cb11a1e52b046fa30aea7a9',
b'4839fc480f47ca59cf05a9c39410ea744d1e17a2',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/bar\n'
b'similarity index 100%%\n'
b'rename from foo\n'
b'rename to bar\n'),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff,
'parent_diff_path': parent_diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
f = diffset.files.get()
self.assertEqual(f.source_revision, revisions[0].decode('utf-8'))
self.assertEqual(f.dest_detail, revisions[1].decode('utf-8'))
# We shouldn't call out to patch because the parent diff is just a
# rename.
original_file = get_original_file(filediff=f,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\n')
self.assertFalse(patch.spy.called)
patched_file = get_patched_file(source_data=original_file,
filediff=f)
self.assertEqual(patched_file, b'Foo\nBar\n')
self.assertTrue(patch.spy.called)
def test_create_with_parent_filediff_with_move_and_change(self):
"""Testing UploadDiffForm.create with a parent diff consisting of a
move/rename with content change
"""
revisions = [
b'5d36b88bb697a2d778f024048bafabd443d74503',
b'9b32edcd37a88c6ada91efc562afa637ccfdad36',
b'8a567d328293f85d68332bc693b0a98869b23b47',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/bar\n'
b'similarity index 55%%\n'
b'rename from foo\n'
b'rename to bar\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,3 +1,4 @@\n'
b' Foo\n'
b' Bar\n'
b'+Baz\n') % (revisions[1], revisions[2]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'path': diff,
'parent_diff_path': parent_diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_file, 'bar')
self.assertEqual(filediff.dest_file, 'bar')
self.assertEqual(filediff.source_revision, revisions[1].decode('utf-8'))
self.assertEqual(filediff.dest_detail, revisions[2].decode('utf-8'))
self.assertEqual(filediff.extra_data, {
'__parent_diff_empty': False,
'is_symlink': False,
'parent_moved': True,
'parent_source_filename': '/foo',
'parent_source_revision': revisions[0].decode('utf-8'),
'raw_delete_count': 0,
'raw_insert_count': 1,
})
original_file = get_original_file(filediff=filediff,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\nBar\n')
self.assertTrue(patch.spy.called)
patched_file = get_patched_file(source_data=original_file,
filediff=filediff)
self.assertEqual(patched_file, b'Foo\nBar\nBaz\n')
self.assertEqual(len(patch.spy.calls), 2)
def test_create_missing_basedir(self):
"""Testing UploadDiffForm with a missing basedir field that is
required
"""
repository = self.create_repository(tool_name='Test')
scmtool = repository.get_scmtool()
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
revisions = [
b'93e6b3e8944c48737cb11a1e52b046fa30aea7a9',
b'4839fc480f47ca59cf05a9c39410ea744d1e17a2',
]
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/bar b/bar\n'
b'index %s..%s 100644\n'
b'--- a/bar\n'
b'+++ b/bar\n'
b'@@ -1,2 +1,3 @@\n'
b' Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
try:
orig_use_abs_paths = scmtool.diffs_use_absolute_paths
scmtool.diffs_use_absolute_paths = True
form = UploadDiffForm(
repository=repository,
files={
'path': diff,
}
)
self.assertFalse(form.is_valid())
finally:
scmtool.diffs_use_absolute_paths = orig_use_abs_paths
self.assertIn('basedir', form.errors)
self.assertIn('This field is required.', form.errors['basedir'])
def test_create_with_parent_filediff_with_new_file(self):
"""Testing UploadDiffForm.create with a parent diff consisting of a
newly-introduced file
"""
revisions = [
b'0000000000000000000000000000000000000000',
b'9b32edcd37a88c6ada91efc562afa637ccfdad36',
b'8a567d328293f85d68332bc693b0a98869b23b47',
]
parent_diff = SimpleUploadedFile(
'parent_diff',
(b'diff --git a/foo b/foo\n'
b'new file mode 100644\n'
b'index %s..%s\n'
b'--- /dev/null\n'
b'+++ b/foo\n'
b'@@ -0,0 +1,2 @@\n'
b'+Foo\n'
b'+Bar\n') % (revisions[0], revisions[1]),
content_type='text/x-patch')
diff = SimpleUploadedFile(
'diff',
(b'diff --git a/foo b/foo\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/foo\n'
b'@@ -1,3 +1,4 @@\n'
b' Foo\n'
b' Bar\n'
b'+Baz\n') % (revisions[1], revisions[2]),
content_type='text/x-patch')
repository = self.create_repository(tool_name='Test')
self.spy_on(repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
# We will only be making one call to get_file and we can fake it out.
self.spy_on(repository.get_file,
call_fake=lambda *args, **kwargs: b'Foo\n')
self.spy_on(patch)
form = UploadDiffForm(
repository=repository,
data={
'basedir': '/',
},
files={
'parent_diff_path': parent_diff,
'path': diff,
})
self.assertTrue(form.is_valid())
diffset = form.create()
self.assertEqual(diffset.files.count(), 1)
filediff = diffset.files.get()
self.assertEqual(filediff.source_file, 'foo')
self.assertEqual(filediff.dest_file, 'foo')
self.assertEqual(filediff.source_revision, revisions[1].decode('utf-8'))
self.assertEqual(filediff.dest_detail, revisions[2].decode('utf-8'))
self.assertEqual(filediff.extra_data, {
'__parent_diff_empty': False,
'is_symlink': False,
'parent_source_filename': '/foo',
'parent_source_revision': 'PRE-CREATION',
'raw_delete_count': 0,
'raw_insert_count': 1,
})
# Double-check the types.
self.assertIsInstance(filediff.extra_data['parent_source_filename'],
six.text_type)
self.assertIsInstance(filediff.extra_data['parent_source_revision'],
six.text_type)
original_file = get_original_file(filediff=filediff,
request=None,
encoding_list=['ascii'])
self.assertEqual(original_file, b'Foo\nBar\n')
self.assertSpyCalled(patch)
patched_file = get_patched_file(source_data=original_file,
filediff=filediff)
self.assertEqual(patched_file, b'Foo\nBar\nBaz\n')
self.assertEqual(len(patch.calls), 2)
class ValidateCommitFormTests(SpyAgency, TestCase):
"""Unit tests for ValidateCommitForm."""
fixtures = ['test_scmtools']
_PARENT_DIFF_DATA = (
b'diff --git a/README b/README\n'
b'new file mode 100644\n'
b'index 0000000..94bdd3e\n'
b'--- /dev/null\n'
b'+++ b/README\n'
b'@@ -0,0 +2 @@\n'
b'+blah blah\n'
b'+blah blah\n'
)
@classmethod
def setUpClass(cls):
super(ValidateCommitFormTests, cls).setUpClass()
cls.request_factory = RequestFactory()
def setUp(self):
super(ValidateCommitFormTests, self).setUp()
self.repository = self.create_repository(tool_name='Git')
self.request = self.request_factory.get('/')
self.diff = SimpleUploadedFile('diff',
self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,
content_type='text/x-patch')
def test_clean_already_validated(self):
"""Testing ValidateCommitForm.clean for a commit that has already been
validated
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': ['This commit was already validated.'],
})
def test_clean_parent_not_validated(self):
"""Testing ValidateCommitForm.clean for a commit whose parent has not
been validated
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r3',
'parent_id': 'r2',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': ['The parent commit was not validated.'],
})
def test_clean_parent_diff_subsequent_commit(self):
"""Testing ValidateCommitForm.clean with a non-empty parent diff for
a subsequent commit
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
parent_diff = SimpleUploadedFile('diff',
self._PARENT_DIFF_DATA,
content_type='text/x-patch')
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
'parent_diff': parent_diff,
})
self.assertTrue(form.is_valid())
def test_clean_validation_info(self):
"""Testing ValidateCommitForm.clean_validation_info"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
def test_clean_validation_info_invalid_base64(self):
"""Testing ValidateCommitForm.clean_validation_info with
non-base64-encoded data"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': 'This is not base64!',
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
'validation_info': [
'Could not parse validation info "This is not base64!": '
'Incorrect padding',
],
})
def test_clean_validation_info_invalid_json(self):
"""Testing ValidateCommitForm.clean_validation_info with base64-encoded
non-json data
"""
validation_info = base64.b64encode(b'Not valid json.')
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertFalse(form.is_valid())
# Python 2 and 3 differ in the error contents you'll get when
# attempting to load non-JSON data.
if six.PY3:
expected_error = 'Expecting value: line 1 column 1 (char 0)'
else:
expected_error = 'No JSON object could be decoded'
self.assertEqual(form.errors, {
'validation_info': [
'Could not parse validation info "%s": %s'
% (validation_info.decode('utf-8'), expected_error),
],
})
def test_validate_diff(self):
"""Testing ValidateCommitForm.validate_diff"""
self.spy_on(self.repository.get_file_exists,
call_fake=lambda *args, **kwargs: True)
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r2',
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
form.validate_diff()
def test_validate_diff_subsequent_commit(self):
"""Testing ValidateCommitForm.validate_diff for a subsequent commit"""
diff_content = (
b'diff --git a/foo b/foo\n'
b'index %s..%s 100644\n'
b'--- a/foo\n'
b'+++ b/foo\n'
b'@@ -0,0 +1,2 @@\n'
b'+This is not a new file.\n'
% (b'a' * 40, b'b' * 40)
)
diff = SimpleUploadedFile('diff', diff_content,
content_type='text/x-patch')
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [{
'filename': 'foo',
'revision': 'a' * 40,
}],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': diff,
})
self.assertTrue(form.is_valid())
form.validate_diff()
def test_validate_diff_missing_files(self):
"""Testing ValidateCommitForm.validate_diff for a subsequent commit
with missing files
"""
validation_info = self._base64_json({
'r1': {
'parent_id': 'r0',
'tree': {
'added': [],
'removed': [],
'modified': [],
},
},
})
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r2',
'parent_id': 'r1',
'validation_info': validation_info,
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
with self.assertRaises(FileNotFoundError):
form.validate_diff()
def test_validate_diff_empty(self):
"""Testing ValidateCommitForm.validate_diff for an empty diff"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': SimpleUploadedFile('diff', b' ',
content_type='text/x-patch'),
})
self.assertTrue(form.is_valid())
with self.assertRaises(EmptyDiffError):
form.validate_diff()
def test_validate_diff_too_big(self):
"""Testing ValidateCommitForm.validate_diff for a diff that is too
large
"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': self.diff,
})
self.assertTrue(form.is_valid())
with self.assertRaises(DiffTooBigError):
with self.siteconfig_settings({'diffviewer_max_diff_size': 1},
reload_settings=False):
form.validate_diff()
def test_validate_diff_parser_error(self):
"""Testing ValidateCommitForm.validate_diff for an invalid diff"""
form = ValidateCommitForm(
repository=self.repository,
request=self.request,
data={
'commit_id': 'r1',
'parent_id': 'r0',
},
files={
'diff': SimpleUploadedFile('diff', b'asdf',
content_type='text/x-patch'),
})
self.assertTrue(form.is_valid())
with self.assertRaises(DiffParserError):
form.validate_diff()
def _base64_json(self, data):
"""Return a Base64-encoded JSON payload.
Args:
data (object):
The data to encode to JSON.
Returns:
bytes:
The Base64-encoded JSON payload.
"""
return base64.b64encode(json.dumps(data).encode('utf-8'))
| mit |
BlueLens/bl-magi | tensorflow/slim/datasets/cifar10.py | 7 | 3237 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides data for the Cifar10 dataset.
The dataset scripts used to create the dataset can be found at:
tensorflow/models/research/slim/datasets/download_and_convert_cifar10.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'cifar10_%s.tfrecord'
SPLITS_TO_SIZES = {'train': 50000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [32 x 32 x 3] color image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading cifar10.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if not reader:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[32, 32, 3]),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
| apache-2.0 |
RasaHQ/rasa_nlu | rasa/core/policies/form_policy.py | 1 | 5093 | import logging
from typing import List, Optional, Dict, Text
from rasa.core.actions.action import ACTION_LISTEN_NAME
from rasa.core.domain import PREV_PREFIX, ACTIVE_FORM_PREFIX, Domain
from rasa.core.events import FormValidation
from rasa.core.featurizers import TrackerFeaturizer
from rasa.core.policies.memoization import MemoizationPolicy
from rasa.core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class FormPolicy(MemoizationPolicy):
"""Policy which handles prediction of Forms"""
ENABLE_FEATURE_STRING_COMPRESSION = True
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = 4,
lookup: Optional[Dict] = None,
) -> None:
# max history is set to 2 in order to capture
# previous meaningful action before action listen
super(FormPolicy, self).__init__(
featurizer=featurizer, priority=priority, max_history=2, lookup=lookup
)
@staticmethod
def _get_active_form_name(state):
found_forms = [
state_name[len(ACTIVE_FORM_PREFIX) :]
for state_name, prob in state.items()
if ACTIVE_FORM_PREFIX in state_name and prob > 0
]
# by construction there is only one active form
return found_forms[0] if found_forms else None
@staticmethod
def _prev_action_listen_in_state(state):
return any(
PREV_PREFIX + ACTION_LISTEN_NAME in state_name and prob > 0
for state_name, prob in state.items()
)
@staticmethod
def _modified_states(states):
"""Modify the states to
- capture previous meaningful action before action_listen
- ignore previous intent
"""
if states[0] is None:
action_before_listen = None
else:
action_before_listen = {
state_name: prob
for state_name, prob in states[0].items()
if PREV_PREFIX in state_name and prob > 0
}
return [action_before_listen, states[-1]]
def _add_states_to_lookup(
self, trackers_as_states, trackers_as_actions, domain, online=False
):
"""Add states to lookup dict"""
for states in trackers_as_states:
active_form = self._get_active_form_name(states[-1])
if active_form and self._prev_action_listen_in_state(states[-1]):
# modify the states
states = self._modified_states(states)
feature_key = self._create_feature_key(states)
# even if there are two identical feature keys
# their form will be the same
# because of `active_form_...` feature
self.lookup[feature_key] = active_form
def recall(
self,
states: List[Dict[Text, float]],
tracker: DialogueStateTracker,
domain: Domain,
) -> Optional[int]:
# modify the states
return self._recall_states(self._modified_states(states))
def state_is_unhappy(self, tracker, domain):
# since it is assumed that training stories contain
# only unhappy paths, notify the form that
# it should not be validated if predicted by other policy
tracker_as_states = self.featurizer.prediction_states([tracker], domain)
states = tracker_as_states[0]
memorized_form = self.recall(states, tracker, domain)
state_is_unhappy = (
memorized_form is not None
and memorized_form == tracker.active_form.get("name")
)
if state_is_unhappy:
logger.debug(
"There is a memorized tracker state {}, "
"added `FormValidation(False)` event"
"".format(self._modified_states(states))
)
return state_is_unhappy
def predict_action_probabilities(
self, tracker: DialogueStateTracker, domain: Domain
) -> List[float]:
"""Predicts the corresponding form action if there is an active form"""
result = [0.0] * domain.num_actions
if tracker.active_form.get("name"):
logger.debug(
"There is an active form '{}'".format(tracker.active_form["name"])
)
if tracker.latest_action_name == ACTION_LISTEN_NAME:
# predict form action after user utterance
if tracker.active_form.get("rejected"):
if self.state_is_unhappy(tracker, domain):
tracker.update(FormValidation(False))
return result
idx = domain.index_for_action(tracker.active_form["name"])
result[idx] = 1.0
elif tracker.latest_action_name == tracker.active_form.get("name"):
# predict action_listen after form action
idx = domain.index_for_action(ACTION_LISTEN_NAME)
result[idx] = 1.0
else:
logger.debug("There is no active form")
return result
| apache-2.0 |
zturchan/CMPUT410-Lab6 | v1/lib/python2.7/site-packages/django/forms/fields.py | 34 | 46782 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
warnings.warn(
'Using SplitDateTimeWidget with DateTimeField is deprecated. '
'Use SplitDateTimeField instead.',
RemovedInDjango19Warning, stacklevel=2)
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message is not None:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from django.utils.image import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
# verify() must be called immediately after the constructor.
Image.open(file).verify()
except Exception:
# Pillow (or PIL) doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple([x.__deepcopy__(memo) for x in self.fields])
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field._has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
super(IPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
| apache-2.0 |
Kiiv/CouchPotatoServer | libs/rsa/key.py | 110 | 17087 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''RSA key generation code.
Create new keys with the newkeys() function. It will give you a PublicKey and a
PrivateKey object.
Loading and saving keys requires the pyasn1 module. This module is imported as
late as possible, such that other functionality will remain working in absence
of pyasn1.
'''
import logging
from rsa._compat import b
import rsa.prime
import rsa.pem
import rsa.common
log = logging.getLogger(__name__)
class AbstractKey(object):
'''Abstract superclass for private and public keys.'''
@classmethod
def load_pkcs1(cls, keyfile, format='PEM'):
r'''Loads a key in PKCS#1 DER or PEM format.
:param keyfile: contents of a DER- or PEM-encoded file that contains
the public key.
:param format: the format of the file to load; 'PEM' or 'DER'
:return: a PublicKey object
'''
methods = {
'PEM': cls._load_pkcs1_pem,
'DER': cls._load_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method(keyfile)
def save_pkcs1(self, format='PEM'):
'''Saves the public key in PKCS#1 DER or PEM format.
:param format: the format to save; 'PEM' or 'DER'
:returns: the DER- or PEM-encoded public key.
'''
methods = {
'PEM': self._save_pkcs1_pem,
'DER': self._save_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method()
class PublicKey(AbstractKey):
'''Represents a public RSA key.
This key is also known as the 'encryption key'. It contains the 'n' and 'e'
values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PublicKey(5, 3)
PublicKey(5, 3)
>>> key = PublicKey(5, 3)
>>> key.n
5
>>> key['n']
5
>>> key.e
3
>>> key['e']
3
'''
__slots__ = ('n', 'e')
def __init__(self, n, e):
self.n = n
self.e = e
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PublicKey(%i, %i)' % (self.n, self.e)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PublicKey):
return False
return self.n == other.n and self.e == other.e
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the public
key.
@return: a PublicKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MAwCBQCNGmYtAgMBAAE='
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PublicKey._load_pkcs1_der(der)
PublicKey(2367317549, 65537)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded public key:
#
# RSAPublicKey ::= SEQUENCE {
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
as_ints = tuple(int(x) for x in priv)
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the public key in PKCS#1 DER format.
@returns: the DER-encoded public key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPubKey()
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded public key file.
The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
after the "-----END RSA PUBLIC KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the public
key.
@return: a PublicKey object
'''
der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded public key file.
@return: contents of a PEM-encoded file that contains the public key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
class PrivateKey(AbstractKey):
'''Represents a private RSA key.
This key is also known as the 'decryption key'. It contains the 'n', 'e',
'd', 'p', 'q' and other values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PrivateKey(3247, 65537, 833, 191, 17)
PrivateKey(3247, 65537, 833, 191, 17)
exp1, exp2 and coef don't have to be given, they will be calculated:
>>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
>>> pk.exp1
55063
>>> pk.exp2
10095
>>> pk.coef
50797
If you give exp1, exp2 or coef, they will be used as-is:
>>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
>>> pk.exp1
6
>>> pk.exp2
7
>>> pk.coef
8
'''
__slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
# Calculate the other values if they aren't supplied
if exp1 is None:
self.exp1 = int(d % (p - 1))
else:
self.exp1 = exp1
if exp1 is None:
self.exp2 = int(d % (q - 1))
else:
self.exp2 = exp2
if coef is None:
self.coef = rsa.common.inverse(q, p)
else:
self.coef = coef
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PrivateKey):
return False
return (self.n == other.n and
self.e == other.e and
self.d == other.d and
self.p == other.p and
self.q == other.q and
self.exp1 == other.exp1 and
self.exp2 == other.exp2 and
self.coef == other.coef)
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the private
key.
@return: a PrivateKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PrivateKey._load_pkcs1_der(der)
PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded private key:
#
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER, -- (inverse of q) mod p
# otherPrimeInfos OtherPrimeInfos OPTIONAL
# }
if priv[0] != 0:
raise ValueError('Unable to read this file, version %s != 0' % priv[0])
as_ints = tuple(int(x) for x in priv[1:9])
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the private key in PKCS#1 DER format.
@returns: the DER-encoded private key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPrivKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPrivKey()
asn_key.setComponentByName('version', 0)
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
asn_key.setComponentByName('privateExponent', self.d)
asn_key.setComponentByName('prime1', self.p)
asn_key.setComponentByName('prime2', self.q)
asn_key.setComponentByName('exponent1', self.exp1)
asn_key.setComponentByName('exponent2', self.exp2)
asn_key.setComponentByName('coefficient', self.coef)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded private key file.
The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
after the "-----END RSA PRIVATE KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the private
key.
@return: a PrivateKey object
'''
der = rsa.pem.load_pem(keyfile, b('RSA PRIVATE KEY'))
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded private key file.
@return: contents of a PEM-encoded file that contains the private key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, b('RSA PRIVATE KEY'))
def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
''''Returns a tuple of two different primes of nbits bits each.
The resulting p * q has exacty 2 * nbits bits, and the returned p and q
will not be equal.
:param nbits: the number of bits in each of p and q.
:param getprime_func: the getprime function, defaults to
:py:func:`rsa.prime.getprime`.
*Introduced in Python-RSA 3.1*
:param accurate: whether to enable accurate mode or not.
:returns: (p, q), where p > q
>>> (p, q) = find_p_q(128)
>>> from rsa import common
>>> common.bit_size(p * q)
256
When not in accurate mode, the number of bits can be slightly less
>>> (p, q) = find_p_q(128, accurate=False)
>>> from rsa import common
>>> common.bit_size(p * q) <= 256
True
>>> common.bit_size(p * q) > 240
True
'''
total_bits = nbits * 2
# Make sure that p and q aren't too close or the factoring programs can
# factor n.
shift = nbits // 16
pbits = nbits + shift
qbits = nbits - shift
# Choose the two initial primes
log.debug('find_p_q(%i): Finding p', nbits)
p = getprime_func(pbits)
log.debug('find_p_q(%i): Finding q', nbits)
q = getprime_func(qbits)
def is_acceptable(p, q):
'''Returns True iff p and q are acceptable:
- p and q differ
- (p * q) has the right nr of bits (when accurate=True)
'''
if p == q:
return False
if not accurate:
return True
# Make sure we have just the right amount of bits
found_size = rsa.common.bit_size(p * q)
return total_bits == found_size
# Keep choosing other primes until they match our requirements.
change_p = False
while not is_acceptable(p, q):
# Change p on one iteration and q on the other
if change_p:
p = getprime_func(pbits)
else:
q = getprime_func(qbits)
change_p = not change_p
# We want p > q as described on
# http://www.di-mgt.com.au/rsa_alg.html#crt
return (max(p, q), min(p, q))
def calculate_keys(p, q, nbits):
'''Calculates an encryption and a decryption key given p and q, and
returns them as a tuple (e, d)
'''
phi_n = (p - 1) * (q - 1)
# A very common choice for e is 65537
e = 65537
try:
d = rsa.common.inverse(e, phi_n)
except ValueError:
raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
(e, phi_n))
if (e * d) % phi_n != 1:
raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
"phi_n (%d)" % (e, d, phi_n))
return (e, d)
def gen_keys(nbits, getprime_func, accurate=True):
'''Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
:param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
``q`` will use ``nbits/2`` bits.
:param getprime_func: either :py:func:`rsa.prime.getprime` or a function
with similar signature.
'''
(p, q) = find_p_q(nbits // 2, getprime_func, accurate)
(e, d) = calculate_keys(p, q, nbits // 2)
return (p, q, e, d)
def newkeys(nbits, accurate=True, poolsize=1):
'''Generates public and private keys, and returns them as (pub, priv).
The public key is also known as the 'encryption key', and is a
:py:class:`rsa.PublicKey` object. The private key is also known as the
'decryption key' and is a :py:class:`rsa.PrivateKey` object.
:param nbits: the number of bits required to store ``n = p*q``.
:param accurate: when True, ``n`` will have exactly the number of bits you
asked for. However, this makes key generation much slower. When False,
`n`` may have slightly less bits.
:param poolsize: the number of processes to use to generate the prime
numbers. If set to a number > 1, a parallel algorithm will be used.
This requires Python 2.6 or newer.
:returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
Python 2.6 or newer.
'''
if nbits < 16:
raise ValueError('Key too small')
if poolsize < 1:
raise ValueError('Pool size (%i) should be >= 1' % poolsize)
# Determine which getprime function to use
if poolsize > 1:
from rsa import parallel
import functools
getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
else: getprime_func = rsa.prime.getprime
# Generate the key components
(p, q, e, d) = gen_keys(nbits, getprime_func)
# Create the key objects
n = p * q
return (
PublicKey(n, e),
PrivateKey(n, e, d, p, q)
)
__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
if __name__ == '__main__':
import doctest
try:
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if (count and count % 10 == 0) or count == 1:
print('%i times' % count)
except KeyboardInterrupt:
print('Aborted')
else:
print('Doctests done')
| gpl-3.0 |
jazcollins/models | syntaxnet/dragnn/python/composite_optimizer_test.py | 12 | 4661 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CompositeOptimizer."""
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from dragnn.python import composite_optimizer
class MockAdamOptimizer(tf.train.AdamOptimizer):
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="Adam"):
super(MockAdamOptimizer, self).__init__(learning_rate, beta1, beta2,
epsilon, use_locking, name)
def _create_slots(self, var_list):
super(MockAdamOptimizer, self)._create_slots(var_list)
for v in var_list:
self._zeros_slot(v, "adam_counter", self._name)
def _apply_dense(self, grad, var):
train_op = super(MockAdamOptimizer, self)._apply_dense(grad, var)
counter = self.get_slot(var, "adam_counter")
return tf.group(train_op, tf.assign_add(counter, [1.0]))
class MockMomentumOptimizer(tf.train.MomentumOptimizer):
def __init__(self,
learning_rate,
momentum,
use_locking=False,
name="Momentum",
use_nesterov=False):
super(MockMomentumOptimizer, self).__init__(learning_rate, momentum,
use_locking, name, use_nesterov)
def _create_slots(self, var_list):
super(MockMomentumOptimizer, self)._create_slots(var_list)
for v in var_list:
self._zeros_slot(v, "momentum_counter", self._name)
def _apply_dense(self, grad, var):
train_op = super(MockMomentumOptimizer, self)._apply_dense(grad, var)
counter = self.get_slot(var, "momentum_counter")
return tf.group(train_op, tf.assign_add(counter, [1.0]))
class CompositeOptimizerTest(test_util.TensorFlowTestCase):
def test_switching(self):
with self.test_session() as sess:
# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# Try to find values for w and b that compute y_data = w * x_data + b
# (We know that w should be 0.1 and b 0.3, but TensorFlow will
# figure that out for us.)
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
y = w * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
# Set up optimizers.
step = tf.get_variable(
"step",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=tf.int32)
optimizer1 = MockAdamOptimizer(0.05)
optimizer2 = MockMomentumOptimizer(0.05, 0.5)
switch = tf.less(step, 100)
optimizer = composite_optimizer.CompositeOptimizer(optimizer1, optimizer2,
switch)
train_op = optimizer.minimize(loss)
sess.run(tf.global_variables_initializer())
# Fit the line.:
for iteration in range(201):
self.assertEqual(sess.run(switch), iteration < 100)
sess.run(train_op)
sess.run(tf.assign_add(step, 1))
slot_names = optimizer.get_slot_names()
self.assertItemsEqual(
slot_names,
["m", "v", "momentum", "adam_counter", "momentum_counter"])
adam_counter = sess.run(optimizer.get_slot(w, "adam_counter"))
momentum_counter = sess.run(optimizer.get_slot(w, "momentum_counter"))
self.assertEqual(adam_counter, min(iteration + 1, 100))
self.assertEqual(momentum_counter, max(iteration - 99, 0))
if iteration % 20 == 0:
logging.info("%d %s %d %d", iteration, sess.run([switch, step, w, b]),
adam_counter, momentum_counter)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
koonsolo/MysticMine | monorail/pickupsview.py | 1 | 11068 |
import random
import pygame
from koon.geo import Vec2D
import koon.geo as geo
from koon.gfx import SpriteFilm, Font, LoopAnimationTimer, PingPongTimer, Timer
from koon.res import resman
import pickups
import event
import tiles
class PickupView:
def __init__( self ):
self.pos = None
self.jump_pos = None
def get_z( self ):
if self.pos is None:
return -999
else:
return self.pos.y + 64
z = property( get_z )
def get_pos( self, frame ):
self.pos = None
if self.model.container is None or not hasattr( self.model.container, "views" ): return None
self.pos = self.model.container.views[0].get_pickup_pos( frame )
if self.model.jump_cnt is not None:
if self.jump_pos is None:
self.jump_pos = self.pos
x = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.x, self.pos.x )
y = geo.lin_ipol( self.model.jump_cnt, self.jump_pos.y, self.pos.y )
height = self.model.jump_cnt
if self.model.jump_cnt > 0.5:
height = 1.0 - self.model.jump_cnt
self.pos = Vec2D( x, y - 30 * height)
else:
self.jump_pos = None
return self.pos
class TorchView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.torch_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class KeyView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.key_sprite")
self.animTimer = LoopAnimationTimer( 25, 0, 19 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class MirrorView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.mirror_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 9 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 10) )
class OilerView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.oiler_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class MultiplierView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
def draw( self, frame ):
if self.get_pos( frame ) is None: return
font = Font(size = 28, color = (255,0,0))
pos = self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET)
if self.model.goldcar is not None:
pos += Vec2D(0, 20)
font.draw("x2", frame.surface, pos.get_tuple(), Font.CENTER, Font.MIDDLE)
class BalloonView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.balloon_sprite")
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class GhostView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.ghost_sprite").clone()
def draw( self, frame ):
if self.get_pos( frame ) is not None and self.model.goldcar is None: # only draw on tile
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class CopperCoinView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.copper_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, self.sprite.max_x )
self.animTimer.set_frame( 0, random.randint(0,self.sprite.max_x-1) )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class GoldBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.gold_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class RockBlockView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.rock_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 15 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET + 10) )
class DiamondView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.diamond_sprite").clone()
self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class DynamiteView (PickupView):
class Sparkle:
def __init__( self, pos ):
self.pos = pos
self.life = 10 + int(random.random() * 2)
self.move = Vec2D( random.uniform( -2.5, 2.5 ), random.uniform( -2.5, 0.0 ) )
self.surf = resman.get("game.sparkle_surf")
width, height = self.surf.get_size()
self.center = Vec2D( width/2, height/2 )
def game_tick( self ):
self.life -= 1
self.pos += self.move
self.move.y += 0.1
def is_dead( self ):
return self.life <= 0
def draw( self, frame ):
pos = self.pos + self.center + Vec2D( frame.X_OFFSET, frame.Y_OFFSET )
self.surf.draw( frame.surface, pos )
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.dynamite_sprite").clone()
self.sprite_delta = 1
self.prev_life = 1.0
w, h = self.sprite.get_size()
self.sparkle_offset = Vec2D( 7, -h + 24 )
self.sparkle_line = Vec2D( 0, -22 )
self.sparkles = []
self.sparkle_timer = Timer( 25 )
def draw( self, frame ):
if self.get_pos(frame) is None: return
# no time... must implement... bad code...
if self.model.life < pickups.Dynamite.DEC * 18 and\
self.model.life != self.prev_life:
self.prev_life = self.model.life
self.sprite.nr += self.sprite_delta
if self.sprite.nr < 0:
self.sprite.nr = 0
self.sprite_delta = 1
elif self.sprite.nr >= 4:
self.sprite.nr = 3
self.sprite_delta = -1
event.Event.dynamite_tick()
while self.sparkle_timer.do_tick( frame.time_sec ):
self.sparkle_tick( frame )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D( frame.X_OFFSET, frame.Y_OFFSET ) )
for sparkle in self.sparkles:
sparkle.draw( frame )
def sparkle_tick( self, frame ):
if self.model.life > pickups.Dynamite.DEC * 18:
for i in range(3):
pos = self.get_pos(frame) + self.sparkle_offset + self.sparkle_line * self.model.life
self.sparkles.append( DynamiteView.Sparkle( pos ) )
new_sparkles = []
for sparkle in self.sparkles:
sparkle.game_tick()
if not sparkle.is_dead():
new_sparkles.append( sparkle )
self.sparkles = new_sparkles
class LampView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.lamp_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class AxeView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.axe_sprite").clone()
# FIXME: make it pingpong instead of loop
self.animTimer = PingPongTimer( 25, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET) )
class FlagView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.flag%d_sprite" % (model.goldcar.nr+1))
self.animTimer = LoopAnimationTimer( 20, 0, 8 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
class LeprechaunView (PickupView):
def __init__( self, model ):
PickupView.__init__( self )
self.model = model
self.sprite = resman.get("game.leprechaun_sprite").clone()
#self.animTimer = LoopAnimationTimer( 25, 0, 4 )
def draw( self, frame ):
if self.get_pos( frame ) is not None:
#self.sprite.nr = self.animTimer.get_frame( frame.time_sec )
self.sprite.draw( frame.surface, self.get_pos(frame) + Vec2D(frame.X_OFFSET, frame.Y_OFFSET - 20) )
| mit |
dgzurita/odoo | addons/hr_recruitment/__openerp__.py | 260 | 2780 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recruitment Process',
'version': '1.0',
'category': 'Human Resources',
'sequence': 25,
'summary': 'Jobs, Recruitment, Applications, Job Interviews, Surveys',
'description': """
Manage job positions and the recruitment process
================================================
This application allows you to easily keep track of jobs, vacancies, applications, interviews...
It is integrated with the mail gateway to automatically fetch email sent to <[email protected]> in the list of applications. It's also integrated with the document management system to store and search in the CV base and find the candidate that you are looking for. Similarly, it is integrated with the survey module to allow you to define interviews for different jobs.
You can define the different phases of interviews and easily rate the applicant from the kanban view.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/recruitment',
'depends': [
'decimal_precision',
'hr',
'survey',
'calendar',
'fetchmail',
'web_kanban_gauge',
],
'data': [
'wizard/hr_recruitment_create_partner_job_view.xml',
'hr_recruitment_view.xml',
'hr_recruitment_menu.xml',
'security/hr_recruitment_security.xml',
'security/ir.model.access.csv',
'report/hr_recruitment_report_view.xml',
'hr_recruitment_installer_view.xml',
'res_config_view.xml',
'survey_data_recruitment.xml',
'hr_recruitment_data.xml',
'views/hr_recruitment.xml',
],
'demo': ['hr_recruitment_demo.xml'],
'test': ['test/recruitment_process.yml'],
'installable': True,
'auto_install': False,
'application': True,
}
| agpl-3.0 |
GbalsaC/bitnamiP | lms/djangoapps/instructor/tests/utils.py | 121 | 2732 | """
Utilities for instructor unit tests
"""
import datetime
import json
import random
from django.utils.timezone import utc
from util.date_utils import get_default_time_display
class FakeInfo(object):
"""Parent class for faking objects used in tests"""
FEATURES = []
def __init__(self):
for feature in self.FEATURES:
setattr(self, feature, u'expected')
def to_dict(self):
""" Returns a dict representation of the object """
return {key: getattr(self, key) for key in self.FEATURES}
class FakeContentTask(FakeInfo):
""" Fake task info needed for email content list """
FEATURES = [
'task_input',
'task_output',
'requester',
]
def __init__(self, email_id, num_sent, num_failed, sent_to):
super(FakeContentTask, self).__init__()
self.task_input = {'email_id': email_id, 'to_option': sent_to}
self.task_input = json.dumps(self.task_input)
self.task_output = {'succeeded': num_sent, 'failed': num_failed}
self.task_output = json.dumps(self.task_output)
self.requester = 'expected'
def make_invalid_input(self):
"""Corrupt the task input field to test errors"""
self.task_input = "THIS IS INVALID JSON"
class FakeEmail(FakeInfo):
""" Corresponding fake email for a fake task """
FEATURES = [
'subject',
'html_message',
'id',
'created',
]
def __init__(self, email_id):
super(FakeEmail, self).__init__()
self.id = unicode(email_id) # pylint: disable=invalid-name
# Select a random data for create field
year = random.randint(1950, 2000)
month = random.randint(1, 12)
day = random.randint(1, 28)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
self.created = datetime.datetime(year, month, day, hour, minute, tzinfo=utc)
class FakeEmailInfo(FakeInfo):
""" Fake email information object """
FEATURES = [
u'created',
u'sent_to',
u'email',
u'number_sent',
u'requester',
]
EMAIL_FEATURES = [
u'subject',
u'html_message',
u'id'
]
def __init__(self, fake_email, num_sent, num_failed):
super(FakeEmailInfo, self).__init__()
self.created = get_default_time_display(fake_email.created)
number_sent = str(num_sent) + ' sent'
if num_failed > 0:
number_sent += ', ' + str(num_failed) + " failed"
self.number_sent = number_sent
fake_email_dict = fake_email.to_dict()
self.email = {feature: fake_email_dict[feature] for feature in self.EMAIL_FEATURES}
self.requester = u'expected'
| agpl-3.0 |
raviflipsyde/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_handshake.py | 452 | 7134 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_quoted_data(self):
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_redundant_quoted_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
eayunstack/fuel-ostf | fuel_health/common/facts.py | 2 | 2308 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from fuel_health.common import log as logging
LOG = logging.getLogger(__name__)
class Facts:
__default_config_path = '/var/lib/puppet/yaml/facts/'
def __init__(self, config=None):
_config_path = config or self.__default_config_path
self.config = self._read_config(_config_path)
@property
def amqp(self):
_amqp = self._get_rabbit_data() or self._get_qpid_data()
return _amqp
@property
def amqp_user(self):
return 'nova'
@property
def amqp_password(self):
return self.amqp['password']
def _read_config(self, path):
_file = None
for file in os.listdir(path):
if file.endswith('.yaml'):
_file = file
break
_file = open(os.path.join(path, _file))
self._init_parser()
data = yaml.load(_file)
_file.close()
return data
def _get_rabbit_data(self):
try:
return self.config['values']['rabbit']
except KeyError:
return None
def _get_qpid_data(self):
try:
return self.config['values']['qpid']
except KeyError:
return None
def _init_parser(self):
# Custom YAML constructs for ruby objects for puppet files parsing
def _construct_ruby_object(loader, suffix, node):
return loader.construct_yaml_map(node)
def _construct_ruby_sym(loader, suffix, node):
return loader.construct_yaml_str(node)
yaml.add_multi_constructor(u"!ruby/object:", _construct_ruby_object)
yaml.add_multi_constructor(u"!ruby/sym", _construct_ruby_sym)
| apache-2.0 |
Geeglee/scrapy | tests/test_selector_csstranslator.py | 59 | 6026 | """
Selector tests for cssselect backend
"""
from twisted.trial import unittest
from scrapy.http import HtmlResponse
from scrapy.selector.csstranslator import ScrapyHTMLTranslator
from scrapy.selector import Selector
from cssselect.parser import SelectorSyntaxError
from cssselect.xpath import ExpressionError
HTMLBODY = '''
<html>
<body>
<div>
<a id="name-anchor" name="foo"></a>
<a id="tag-anchor" rel="tag" href="http://localhost/foo">link</a>
<a id="nofollow-anchor" rel="nofollow" href="https://example.org"> link</a>
<p id="paragraph">
lorem ipsum text
<b id="p-b">hi</b> <em id="p-em">there</em>
<b id="p-b2">guy</b>
<input type="checkbox" id="checkbox-unchecked" />
<input type="checkbox" id="checkbox-disabled" disabled="" />
<input type="text" id="text-checked" checked="checked" />
<input type="hidden" />
<input type="hidden" disabled="disabled" />
<input type="checkbox" id="checkbox-checked" checked="checked" />
<input type="checkbox" id="checkbox-disabled-checked"
disabled="disabled" checked="checked" />
<fieldset id="fieldset" disabled="disabled">
<input type="checkbox" id="checkbox-fieldset-disabled" />
<input type="hidden" />
</fieldset>
</p>
<map name="dummymap">
<area shape="circle" coords="200,250,25" href="foo.html" id="area-href" />
<area shape="default" id="area-nohref" />
</map>
</div>
<div class="cool-footer" id="foobar-div" foobar="ab bc cde">
<span id="foobar-span">foo ter</span>
</div>
</body></html>
'''
class TranslatorMixinTest(unittest.TestCase):
tr_cls = ScrapyHTMLTranslator
def setUp(self):
self.tr = self.tr_cls()
self.c2x = self.tr.css_to_xpath
def test_attr_function(self):
cases = [
('::attr(name)', u'descendant-or-self::*/@name'),
('a::attr(href)', u'descendant-or-self::a/@href'),
('a ::attr(img)', u'descendant-or-self::a/descendant-or-self::*/@img'),
('a > ::attr(class)', u'descendant-or-self::a/*/@class'),
]
for css, xpath in cases:
self.assertEqual(self.c2x(css), xpath, css)
def test_attr_function_exception(self):
cases = [
('::attr(12)', ExpressionError),
('::attr(34test)', ExpressionError),
('::attr(@href)', SelectorSyntaxError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_text_pseudo_element(self):
cases = [
('::text', u'descendant-or-self::text()'),
('p::text', u'descendant-or-self::p/text()'),
('p ::text', u'descendant-or-self::p/descendant-or-self::text()'),
('#id::text', u"descendant-or-self::*[@id = 'id']/text()"),
('p#id::text', u"descendant-or-self::p[@id = 'id']/text()"),
('p#id ::text', u"descendant-or-self::p[@id = 'id']/descendant-or-self::text()"),
('p#id > ::text', u"descendant-or-self::p[@id = 'id']/*/text()"),
('p#id ~ ::text', u"descendant-or-self::p[@id = 'id']/following-sibling::*/text()"),
('a[href]::text', u'descendant-or-self::a[@href]/text()'),
('a[href] ::text', u'descendant-or-self::a[@href]/descendant-or-self::text()'),
('p::text, a::text', u"descendant-or-self::p/text() | descendant-or-self::a/text()"),
]
for css, xpath in cases:
self.assertEqual(self.c2x(css), xpath, css)
def test_pseudo_function_exception(self):
cases = [
('::attribute(12)', ExpressionError),
('::text()', ExpressionError),
('::attr(@href)', SelectorSyntaxError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_unknown_pseudo_element(self):
cases = [
('::text-node', ExpressionError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
def test_unknown_pseudo_class(self):
cases = [
(':text', ExpressionError),
(':attribute(name)', ExpressionError),
]
for css, exc in cases:
self.assertRaises(exc, self.c2x, css)
class CSSSelectorTest(unittest.TestCase):
sscls = Selector
def setUp(self):
self.htmlresponse = HtmlResponse('http://example.com', body=HTMLBODY)
self.sel = self.sscls(self.htmlresponse)
def x(self, *a, **kw):
return [v.strip() for v in self.sel.css(*a, **kw).extract() if v.strip()]
def test_selector_simple(self):
for x in self.sel.css('input'):
self.assertTrue(isinstance(x, self.sel.__class__), x)
self.assertEqual(self.sel.css('input').extract(),
[x.extract() for x in self.sel.css('input')])
def test_text_pseudo_element(self):
self.assertEqual(self.x('#p-b2'), [u'<b id="p-b2">guy</b>'])
self.assertEqual(self.x('#p-b2::text'), [u'guy'])
self.assertEqual(self.x('#p-b2 ::text'), [u'guy'])
self.assertEqual(self.x('#paragraph::text'), [u'lorem ipsum text'])
self.assertEqual(self.x('#paragraph ::text'), [u'lorem ipsum text', u'hi', u'there', u'guy'])
self.assertEqual(self.x('p::text'), [u'lorem ipsum text'])
self.assertEqual(self.x('p ::text'), [u'lorem ipsum text', u'hi', u'there', u'guy'])
def test_attribute_function(self):
self.assertEqual(self.x('#p-b2::attr(id)'), [u'p-b2'])
self.assertEqual(self.x('.cool-footer::attr(class)'), [u'cool-footer'])
self.assertEqual(self.x('.cool-footer ::attr(id)'), [u'foobar-div', u'foobar-span'])
self.assertEqual(self.x('map[name="dummymap"] ::attr(shape)'), [u'circle', u'default'])
def test_nested_selector(self):
self.assertEqual(self.sel.css('p').css('b::text').extract(),
[u'hi', u'guy'])
self.assertEqual(self.sel.css('div').css('area:last-child').extract(),
[u'<area shape="default" id="area-nohref">'])
| bsd-3-clause |
mitsuhiko/click | src/click/types.py | 1 | 30864 | import os
import stat
from datetime import datetime
from ._compat import _get_argv_encoding
from ._compat import filename_to_ui
from ._compat import get_filesystem_encoding
from ._compat import get_strerror
from ._compat import open_stream
from .exceptions import BadParameter
from .utils import LazyFile
from .utils import safecall
class ParamType:
"""Represents the type of a parameter. Validates and converts values
from the command line or Python into the correct type.
To implement a custom type, subclass and implement at least the
following:
- The :attr:`name` class attribute must be set.
- Calling an instance of the type with ``None`` must return
``None``. This is already implemented by default.
- :meth:`convert` must convert string values to the correct type.
- :meth:`convert` must accept values that are already the correct
type.
- It must be able to convert a value if the ``ctx`` and ``param``
arguments are ``None``. This can occur when converting prompt
input.
"""
is_composite = False
#: the descriptive name of this type
name = None
#: if a list of this type is expected and the value is pulled from a
#: string environment variable, this is what splits it up. `None`
#: means any whitespace. For all parameters the general rule is that
#: whitespace splits them up. The exception are paths and files which
#: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
#: Windows).
envvar_list_splitter = None
def to_info_dict(self):
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
# The class name without the "ParamType" suffix.
param_type = type(self).__name__.partition("ParamType")[0]
param_type = param_type.partition("ParameterType")[0]
return {"param_type": param_type, "name": self.name}
def __call__(self, value, param=None, ctx=None):
if value is not None:
return self.convert(value, param, ctx)
def get_metavar(self, param):
"""Returns the metavar default for this param if it provides one."""
def get_missing_message(self, param):
"""Optionally might return extra information about a missing
parameter.
.. versionadded:: 2.0
"""
def convert(self, value, param, ctx):
"""Convert the value to the correct type. This is not called if
the value is ``None`` (the missing value).
This must accept string values from the command line, as well as
values that are already the correct type. It may also convert
other compatible types.
The ``param`` and ``ctx`` arguments may be ``None`` in certain
situations, such as when converting prompt input.
If the value cannot be converted, call :meth:`fail` with a
descriptive message.
:param value: The value to convert.
:param param: The parameter that is using this type to convert
its value. May be ``None``.
:param ctx: The current context that arrived at this value. May
be ``None``.
"""
return value
def split_envvar_value(self, rv):
"""Given a value from an environment variable this splits it up
into small chunks depending on the defined envvar list splitter.
If the splitter is set to `None`, which means that whitespace splits,
then leading and trailing whitespace is ignored. Otherwise, leading
and trailing splitters usually lead to empty items being included.
"""
return (rv or "").split(self.envvar_list_splitter)
def fail(self, message, param=None, ctx=None):
"""Helper method to fail with an invalid value message."""
raise BadParameter(message, ctx=ctx, param=param)
def shell_complete(self, ctx, param, incomplete):
"""Return a list of
:class:`~click.shell_completion.CompletionItem` objects for the
incomplete value. Most types do not provide completions, but
some do, and this allows custom types to provide custom
completions as well.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
return []
class CompositeParamType(ParamType):
is_composite = True
@property
def arity(self):
raise NotImplementedError()
class FuncParamType(ParamType):
def __init__(self, func):
self.name = func.__name__
self.func = func
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["func"] = self.func
return info_dict
def convert(self, value, param, ctx):
try:
return self.func(value)
except ValueError:
try:
value = str(value)
except UnicodeError:
value = value.decode("utf-8", "replace")
self.fail(value, param, ctx)
class UnprocessedParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
return value
def __repr__(self):
return "UNPROCESSED"
class StringParamType(ParamType):
name = "text"
def convert(self, value, param, ctx):
if isinstance(value, bytes):
enc = _get_argv_encoding()
try:
value = value.decode(enc)
except UnicodeError:
fs_enc = get_filesystem_encoding()
if fs_enc != enc:
try:
value = value.decode(fs_enc)
except UnicodeError:
value = value.decode("utf-8", "replace")
else:
value = value.decode("utf-8", "replace")
return value
return str(value)
def __repr__(self):
return "STRING"
class Choice(ParamType):
"""The choice type allows a value to be checked against a fixed set
of supported values. All of these values have to be strings.
You should only pass a list or tuple of choices. Other iterables
(like generators) may lead to surprising results.
The resulting value will always be one of the originally passed choices
regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
being specified.
See :ref:`choice-opts` for an example.
:param case_sensitive: Set to false to make choices case
insensitive. Defaults to true.
"""
name = "choice"
def __init__(self, choices, case_sensitive=True):
self.choices = choices
self.case_sensitive = case_sensitive
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["choices"] = self.choices
info_dict["case_sensitive"] = self.case_sensitive
return info_dict
def get_metavar(self, param):
choices_str = "|".join(self.choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
def get_missing_message(self, param):
choice_str = ",\n\t".join(self.choices)
return f"Choose from:\n\t{choice_str}"
def convert(self, value, param, ctx):
# Match through normalization and case sensitivity
# first do token_normalize_func, then lowercase
# preserve original `value` to produce an accurate message in
# `self.fail`
normed_value = value
normed_choices = {choice: choice for choice in self.choices}
if ctx is not None and ctx.token_normalize_func is not None:
normed_value = ctx.token_normalize_func(value)
normed_choices = {
ctx.token_normalize_func(normed_choice): original
for normed_choice, original in normed_choices.items()
}
if not self.case_sensitive:
normed_value = normed_value.casefold()
normed_choices = {
normed_choice.casefold(): original
for normed_choice, original in normed_choices.items()
}
if normed_value in normed_choices:
return normed_choices[normed_value]
one_of = "one of " if len(self.choices) > 1 else ""
choices_str = ", ".join(repr(c) for c in self.choices)
self.fail(f"{value!r} is not {one_of}{choices_str}.", param, ctx)
def __repr__(self):
return f"Choice({list(self.choices)})"
def shell_complete(self, ctx, param, incomplete):
"""Complete choices that start with the incomplete value.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
str_choices = map(str, self.choices)
if self.case_sensitive:
matched = (c for c in str_choices if c.startswith(incomplete))
else:
incomplete = incomplete.lower()
matched = (c for c in str_choices if c.lower().startswith(incomplete))
return [CompletionItem(c) for c in matched]
class DateTime(ParamType):
"""The DateTime type converts date strings into `datetime` objects.
The format strings which are checked are configurable, but default to some
common (non-timezone aware) ISO 8601 formats.
When specifying *DateTime* formats, you should only pass a list or a tuple.
Other iterables, like generators, may lead to surprising results.
The format strings are processed using ``datetime.strptime``, and this
consequently defines the format strings which are allowed.
Parsing is tried using each format, in order, and the first format which
parses successfully is used.
:param formats: A list or tuple of date format strings, in the order in
which they should be tried. Defaults to
``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
``'%Y-%m-%d %H:%M:%S'``.
"""
name = "datetime"
def __init__(self, formats=None):
self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["formats"] = self.formats
return info_dict
def get_metavar(self, param):
return f"[{'|'.join(self.formats)}]"
def _try_to_convert_date(self, value, format):
try:
return datetime.strptime(value, format)
except ValueError:
return None
def convert(self, value, param, ctx):
if isinstance(value, datetime):
return value
for format in self.formats:
converted = self._try_to_convert_date(value, format)
if converted is not None:
return converted
plural = "s" if len(self.formats) > 1 else ""
formats_str = ", ".join(repr(f) for f in self.formats)
self.fail(
f"{value!r} does not match the format{plural} {formats_str}.", param, ctx
)
def __repr__(self):
return "DateTime"
class _NumberParamTypeBase(ParamType):
_number_class = None
def convert(self, value, param, ctx):
try:
return self._number_class(value)
except ValueError:
self.fail(f"{value!r} is not a valid {self.name}.", param, ctx)
class _NumberRangeBase(_NumberParamTypeBase):
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
self.min = min
self.max = max
self.min_open = min_open
self.max_open = max_open
self.clamp = clamp
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
min=self.min,
max=self.max,
min_open=self.min_open,
max_open=self.max_open,
clamp=self.clamp,
)
return info_dict
def convert(self, value, param, ctx):
import operator
rv = super().convert(value, param, ctx)
lt_min = self.min is not None and (
operator.le if self.min_open else operator.lt
)(rv, self.min)
gt_max = self.max is not None and (
operator.ge if self.max_open else operator.gt
)(rv, self.max)
if self.clamp:
if lt_min:
return self._clamp(self.min, 1, self.min_open)
if gt_max:
return self._clamp(self.max, -1, self.max_open)
if lt_min or gt_max:
self.fail(f"{rv} is not in the range {self._describe_range()}.", param, ctx)
return rv
def _clamp(self, bound, dir, open):
"""Find the valid value to clamp to bound in the given
direction.
:param bound: The boundary value.
:param dir: 1 or -1 indicating the direction to move.
:param open: If true, the range does not include the bound.
"""
raise NotImplementedError
def _describe_range(self):
"""Describe the range for use in help text."""
if self.min is None:
op = "<" if self.max_open else "<="
return f"x{op}{self.max}"
if self.max is None:
op = ">" if self.min_open else ">="
return f"x{op}{self.min}"
lop = "<" if self.min_open else "<="
rop = "<" if self.max_open else "<="
return f"{self.min}{lop}x{rop}{self.max}"
def __repr__(self):
clamp = " clamped" if self.clamp else ""
return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
class IntParamType(_NumberParamTypeBase):
name = "integer"
_number_class = int
def __repr__(self):
return "INT"
class IntRange(_NumberRangeBase, IntParamType):
"""Restrict an :data:`click.INT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "integer range"
def _clamp(self, bound, dir, open):
if not open:
return bound
return bound + dir
class FloatParamType(_NumberParamTypeBase):
name = "float"
_number_class = float
def __repr__(self):
return "FLOAT"
class FloatRange(_NumberRangeBase, FloatParamType):
"""Restrict a :data:`click.FLOAT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing. This is not supported if either
boundary is marked ``open``.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "float range"
def __init__(self, min=None, max=None, min_open=False, max_open=False, clamp=False):
super().__init__(
min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
)
if (min_open or max_open) and clamp:
raise TypeError("Clamping is not supported for open bounds.")
def _clamp(self, bound, dir, open):
if not open:
return bound
# Could use Python 3.9's math.nextafter here, but clamping an
# open float range doesn't seem to be particularly useful. It's
# left up to the user to write a callback to do it if needed.
raise RuntimeError("Clamping is not supported for open bounds.")
class BoolParamType(ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if value in {False, True}:
return bool(value)
norm = value.strip().lower()
if norm in {"1", "true", "t", "yes", "y", "on"}:
return True
if norm in {"0", "false", "f", "no", "n", "off"}:
return False
self.fail(f"{value!r} is not a valid boolean.", param, ctx)
def __repr__(self):
return "BOOL"
class UUIDParameterType(ParamType):
name = "uuid"
def convert(self, value, param, ctx):
import uuid
if isinstance(value, uuid.UUID):
return value
value = value.strip()
try:
return uuid.UUID(value)
except ValueError:
self.fail(f"{value!r} is not a valid UUID.", param, ctx)
def __repr__(self):
return "UUID"
class File(ParamType):
"""Declares a parameter to be a file for reading or writing. The file
is automatically closed once the context tears down (after the command
finished working).
Files can be opened for reading or writing. The special value ``-``
indicates stdin or stdout depending on the mode.
By default, the file is opened for reading text data, but it can also be
opened in binary mode or for writing. The encoding parameter can be used
to force a specific encoding.
The `lazy` flag controls if the file should be opened immediately or upon
first IO. The default is to be non-lazy for standard input and output
streams as well as files opened for reading, `lazy` otherwise. When opening a
file lazily for reading, it is still opened temporarily for validation, but
will not be held open until first IO. lazy is mainly useful when opening
for writing to avoid creating the file until it is needed.
Starting with Click 2.0, files can also be opened atomically in which
case all writes go into a separate file in the same folder and upon
completion the file will be moved over to the original location. This
is useful if a file regularly read by other users is modified.
See :ref:`file-args` for more information.
"""
name = "filename"
envvar_list_splitter = os.path.pathsep
def __init__(
self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
):
self.mode = mode
self.encoding = encoding
self.errors = errors
self.lazy = lazy
self.atomic = atomic
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(mode=self.mode, encoding=self.encoding)
return info_dict
def resolve_lazy_flag(self, value):
if self.lazy is not None:
return self.lazy
if value == "-":
return False
elif "w" in self.mode:
return True
return False
def convert(self, value, param, ctx):
try:
if hasattr(value, "read") or hasattr(value, "write"):
return value
lazy = self.resolve_lazy_flag(value)
if lazy:
f = LazyFile(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
if ctx is not None:
ctx.call_on_close(f.close_intelligently)
return f
f, should_close = open_stream(
value, self.mode, self.encoding, self.errors, atomic=self.atomic
)
# If a context is provided, we automatically close the file
# at the end of the context execution (or flush out). If a
# context does not exist, it's the caller's responsibility to
# properly close the file. This for instance happens when the
# type is used with prompts.
if ctx is not None:
if should_close:
ctx.call_on_close(safecall(f.close))
else:
ctx.call_on_close(safecall(f.flush))
return f
except OSError as e: # noqa: B014
self.fail(f"{filename_to_ui(value)!r}: {get_strerror(e)}", param, ctx)
def shell_complete(self, ctx, param, incomplete):
"""Return a special completion marker that tells the completion
system to use the shell to provide file path completions.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
return [CompletionItem(incomplete, type="file")]
class Path(ParamType):
"""The path type is similar to the :class:`File` type but it performs
different checks. First of all, instead of returning an open file
handle it returns just the filename. Secondly, it can perform various
basic checks about what the file or directory should be.
.. versionchanged:: 6.0
`allow_dash` was added.
:param exists: if set to true, the file or directory needs to exist for
this value to be valid. If this is not required and a
file does indeed not exist, then all further checks are
silently skipped.
:param file_okay: controls if a file is a possible value.
:param dir_okay: controls if a directory is a possible value.
:param writable: if true, a writable check is performed.
:param readable: if true, a readable check is performed.
:param resolve_path: if this is true, then the path is fully resolved
before the value is passed onwards. This means
that it's absolute and symlinks are resolved. It
will not expand a tilde-prefix, as this is
supposed to be done by the shell only.
:param allow_dash: If this is set to `True`, a single dash to indicate
standard streams is permitted.
:param path_type: optionally a string type that should be used to
represent the path. The default is `None` which
means the return value will be either bytes or
unicode depending on what makes most sense given the
input data Click deals with.
"""
envvar_list_splitter = os.path.pathsep
def __init__(
self,
exists=False,
file_okay=True,
dir_okay=True,
writable=False,
readable=True,
resolve_path=False,
allow_dash=False,
path_type=None,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.writable = writable
self.readable = readable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name = "file"
self.path_type = "File"
elif self.dir_okay and not self.file_okay:
self.name = "directory"
self.path_type = "Directory"
else:
self.name = "path"
self.path_type = "Path"
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict.update(
exists=self.exists,
file_okay=self.file_okay,
dir_okay=self.dir_okay,
writable=self.writable,
readable=self.readable,
allow_dash=self.allow_dash,
)
return info_dict
def coerce_path_result(self, rv):
if self.type is not None and not isinstance(rv, self.type):
if self.type is str:
rv = rv.decode(get_filesystem_encoding())
else:
rv = rv.encode(get_filesystem_encoding())
return rv
def convert(self, value, param, ctx):
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} does not exist.",
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a file.",
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is a directory.",
param,
ctx,
)
if self.writable and not os.access(value, os.W_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not writable.",
param,
ctx,
)
if self.readable and not os.access(value, os.R_OK):
self.fail(
f"{self.path_type} {filename_to_ui(value)!r} is not readable.",
param,
ctx,
)
return self.coerce_path_result(rv)
def shell_complete(self, ctx, param, incomplete):
"""Return a special completion marker that tells the completion
system to use the shell to provide path completions for only
directories or any paths.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
type = "dir" if self.dir_okay and not self.file_okay else "file"
return [CompletionItem(incomplete, type=type)]
class Tuple(CompositeParamType):
"""The default behavior of Click is to apply a type on a value directly.
This works well in most cases, except for when `nargs` is set to a fixed
count and different types should be used for different items. In this
case the :class:`Tuple` type can be used. This type can only be used
if `nargs` is set to a fixed number.
For more information see :ref:`tuple-type`.
This can be selected by using a Python tuple literal as a type.
:param types: a list of types that should be used for the tuple items.
"""
def __init__(self, types):
self.types = [convert_type(ty) for ty in types]
def to_info_dict(self):
info_dict = super().to_info_dict()
info_dict["types"] = [t.to_info_dict() for t in self.types]
return info_dict
@property
def name(self):
return f"<{' '.join(ty.name for ty in self.types)}>"
@property
def arity(self):
return len(self.types)
def convert(self, value, param, ctx):
if len(value) != len(self.types):
raise TypeError(
"It would appear that nargs is set to conflict with the"
" composite type arity."
)
return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
def convert_type(ty, default=None):
"""Find the most appropriate :class:`ParamType` for the given Python
type. If the type isn't provided, it can be inferred from a default
value.
"""
guessed_type = False
if ty is None and default is not None:
if isinstance(default, (tuple, list)):
# If the default is empty, ty will remain None and will
# return STRING.
if default:
item = default[0]
# A tuple of tuples needs to detect the inner types.
# Can't call convert recursively because that would
# incorrectly unwind the tuple to a single type.
if isinstance(item, (tuple, list)):
ty = tuple(map(type, item))
else:
ty = type(item)
else:
ty = type(default)
guessed_type = True
if isinstance(ty, tuple):
return Tuple(ty)
if isinstance(ty, ParamType):
return ty
if ty is str or ty is None:
return STRING
if ty is int:
return INT
if ty is float:
return FLOAT
# Booleans are only okay if not guessed. For is_flag options with
# flag_value, default=True indicates which flag_value is the
# default.
if ty is bool and not guessed_type:
return BOOL
if guessed_type:
return STRING
if __debug__:
try:
if issubclass(ty, ParamType):
raise AssertionError(
f"Attempted to use an uninstantiated parameter type ({ty})."
)
except TypeError:
# ty is an instance (correct), so issubclass fails.
pass
return FuncParamType(ty)
#: A dummy parameter type that just does nothing. From a user's
#: perspective this appears to just be the same as `STRING` but
#: internally no string conversion takes place if the input was bytes.
#: This is usually useful when working with file paths as they can
#: appear in bytes and unicode.
#:
#: For path related uses the :class:`Path` type is a better choice but
#: there are situations where an unprocessed type is useful which is why
#: it is is provided.
#:
#: .. versionadded:: 4.0
UNPROCESSED = UnprocessedParamType()
#: A unicode string parameter type which is the implicit default. This
#: can also be selected by using ``str`` as type.
STRING = StringParamType()
#: An integer parameter. This can also be selected by using ``int`` as
#: type.
INT = IntParamType()
#: A floating point value parameter. This can also be selected by using
#: ``float`` as type.
FLOAT = FloatParamType()
#: A boolean parameter. This is the default for boolean flags. This can
#: also be selected by using ``bool`` as a type.
BOOL = BoolParamType()
#: A UUID parameter.
UUID = UUIDParameterType()
| bsd-3-clause |
nox/servo | tests/wpt/web-platform-tests/XMLHttpRequest/resources/authentication.py | 247 | 1292 | def main(request, response):
if "logout" in request.GET:
return ((401, "Unauthorized"),
[("WWW-Authenticate", 'Basic realm="test"')],
"Logged out, hopefully")
session_user = request.auth.username
session_pass = request.auth.password
expected_user_name = request.headers.get("X-User", None)
token = expected_user_name
if session_user is None and session_pass is None:
if token is not None and request.server.stash.take(token) is not None:
return 'FAIL (did not authorize)'
else:
if token is not None:
request.server.stash.put(token, "1")
status = (401, 'Unauthorized')
headers = [('WWW-Authenticate', 'Basic realm="test"'),
('XHR-USER', expected_user_name),
('SES-USER', session_user)]
return status, headers, 'FAIL (should be transparent)'
else:
if request.server.stash.take(token) == "1":
challenge = "DID"
else:
challenge = "DID-NOT"
headers = [('XHR-USER', expected_user_name),
('SES-USER', session_user),
("X-challenge", challenge)]
return headers, session_user + "\n" + session_pass;
| mpl-2.0 |
blindFS/powerline | powerline/lint/__init__.py | 9 | 21309 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import logging
from collections import defaultdict
from itertools import chain
from functools import partial
from powerline import generate_config_finder, get_config_paths, load_config
from powerline.segments.vim import vim_modes
from powerline.lib.dict import mergedicts_copy
from powerline.lib.config import ConfigLoader
from powerline.lib.unicode import unicode
from powerline.lib.path import join
from powerline.lint.markedjson import load
from powerline.lint.markedjson.error import echoerr, EchoErr, MarkedError
from powerline.lint.checks import (check_matcher_func, check_ext, check_config, check_top_theme,
check_color, check_translated_group_name, check_group,
check_segment_module, check_exinclude_function, type_keys,
check_segment_function, check_args, get_one_segment_function,
check_highlight_groups, check_highlight_group, check_full_segment_data,
get_all_possible_functions, check_segment_data_key, register_common_name,
highlight_group_spec, check_log_file_level, check_logging_handler)
from powerline.lint.spec import Spec
from powerline.lint.context import Context
def open_file(path):
return open(path, 'rb')
def generate_json_config_loader(lhadproblem):
def load_json_config(config_file_path, load=load, open_file=open_file):
with open_file(config_file_path) as config_file_fp:
r, hadproblem = load(config_file_fp)
if hadproblem:
lhadproblem[0] = True
return r
return load_json_config
function_name_re = '^(\w+\.)*[a-zA-Z_]\w*$'
divider_spec = Spec().printable().len(
'le', 3, (lambda value: 'Divider {0!r} is too large!'.format(value))).copy
ext_theme_spec = Spec().type(unicode).func(lambda *args: check_config('themes', *args)).copy
top_theme_spec = Spec().type(unicode).func(check_top_theme).copy
ext_spec = Spec(
colorscheme=Spec().type(unicode).func(
(lambda *args: check_config('colorschemes', *args))
),
theme=ext_theme_spec(),
top_theme=top_theme_spec().optional(),
).copy
gen_components_spec = (lambda *components: Spec().list(Spec().type(unicode).oneof(set(components))))
log_level_spec = Spec().re('^[A-Z]+$').func(
(lambda value, *args: (True, True, not hasattr(logging, value))),
(lambda value: 'unknown debugging level {0}'.format(value))
).copy
log_format_spec = Spec().type(unicode).copy
main_spec = (Spec(
common=Spec(
default_top_theme=top_theme_spec().optional(),
term_truecolor=Spec().type(bool).optional(),
term_escape_style=Spec().type(unicode).oneof(set(('auto', 'xterm', 'fbterm'))).optional(),
# Python is capable of loading from zip archives. Thus checking path
# only for existence of the path, not for it being a directory
paths=Spec().list(
(lambda value, *args: (True, True, not os.path.exists(os.path.expanduser(value.value)))),
(lambda value: 'path does not exist: {0}'.format(value))
).optional(),
log_file=Spec().either(
Spec().type(unicode).func(
(
lambda value, *args: (
True,
True,
not os.path.isdir(os.path.dirname(os.path.expanduser(value)))
)
),
(lambda value: 'directory does not exist: {0}'.format(os.path.dirname(value)))
),
Spec().list(Spec().either(
Spec().type(unicode, type(None)),
Spec().tuple(
Spec().re(function_name_re).func(check_logging_handler),
Spec().tuple(
Spec().type(list).optional(),
Spec().type(dict).optional(),
),
log_level_spec().func(check_log_file_level).optional(),
log_format_spec().optional(),
),
))
).optional(),
log_level=log_level_spec().optional(),
log_format=log_format_spec().optional(),
interval=Spec().either(Spec().cmp('gt', 0.0), Spec().type(type(None))).optional(),
reload_config=Spec().type(bool).optional(),
watcher=Spec().type(unicode).oneof(set(('auto', 'inotify', 'stat'))).optional(),
).context_message('Error while loading common configuration (key {key})'),
ext=Spec(
vim=ext_spec().update(
components=gen_components_spec('statusline', 'tabline').optional(),
local_themes=Spec(
__tabline__=ext_theme_spec(),
).unknown_spec(
Spec().re(function_name_re).func(partial(check_matcher_func, 'vim')),
ext_theme_spec()
),
).optional(),
ipython=ext_spec().update(
local_themes=Spec(
in2=ext_theme_spec(),
out=ext_theme_spec(),
rewrite=ext_theme_spec(),
),
).optional(),
shell=ext_spec().update(
components=gen_components_spec('tmux', 'prompt').optional(),
local_themes=Spec(
continuation=ext_theme_spec(),
select=ext_theme_spec(),
),
).optional(),
wm=ext_spec().update(
local_themes=Spec().unknown_spec(
Spec().re('^[0-9A-Za-z-]+$'),
ext_theme_spec()
).optional()
).optional(),
).unknown_spec(
check_ext,
ext_spec(),
).context_message('Error while loading extensions configuration (key {key})'),
).context_message('Error while loading main configuration'))
term_color_spec = Spec().unsigned().cmp('le', 255).copy
true_color_spec = Spec().re(
'^[0-9a-fA-F]{6}$',
(lambda value: '"{0}" is not a six-digit hexadecimal unsigned integer written as a string'.format(value))
).copy
colors_spec = (Spec(
colors=Spec().unknown_spec(
Spec().ident(),
Spec().either(
Spec().tuple(term_color_spec(), true_color_spec()),
term_color_spec()
)
).context_message('Error while checking colors (key {key})'),
gradients=Spec().unknown_spec(
Spec().ident(),
Spec().tuple(
Spec().len('gt', 1).list(term_color_spec()),
Spec().len('gt', 1).list(true_color_spec()).optional(),
)
).context_message('Error while checking gradients (key {key})'),
).context_message('Error while loading colors configuration'))
color_spec = Spec().type(unicode).func(check_color).copy
name_spec = Spec().type(unicode).len('gt', 0).optional().copy
group_name_spec = Spec().ident().copy
group_spec = Spec().either(Spec(
fg=color_spec(),
bg=color_spec(),
attrs=Spec().list(Spec().type(unicode).oneof(set(('bold', 'italic', 'underline')))),
), group_name_spec().func(check_group)).copy
groups_spec = Spec().unknown_spec(
group_name_spec(),
group_spec(),
).context_message('Error while loading groups (key {key})').copy
colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
).context_message('Error while loading coloscheme'))
mode_translations_value_spec = Spec(
colors=Spec().unknown_spec(
color_spec(),
color_spec(),
).optional(),
groups=Spec().unknown_spec(
group_name_spec().func(check_translated_group_name),
group_spec(),
).optional(),
).copy
top_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
Spec().type(unicode),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})').optional(),
).context_message('Error while loading top-level coloscheme'))
vim_mode_spec = Spec().oneof(set(list(vim_modes) + ['nc', 'tab_nc', 'buf_nc'])).copy
vim_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
vim_mode_spec(),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})'),
).context_message('Error while loading vim colorscheme'))
shell_mode_spec = Spec().re('^(?:[\w\-]+|\.safe)$').copy
shell_colorscheme_spec = (Spec(
name=name_spec(),
groups=groups_spec(),
mode_translations=Spec().unknown_spec(
shell_mode_spec(),
mode_translations_value_spec(),
).optional().context_message('Error while loading mode translations (key {key})'),
).context_message('Error while loading shell colorscheme'))
args_spec = Spec(
pl=Spec().error('pl object must be set by powerline').optional(),
segment_info=Spec().error('Segment info dictionary must be set by powerline').optional(),
).unknown_spec(Spec(), Spec()).optional().copy
segment_module_spec = Spec().type(unicode).func(check_segment_module).optional().copy
exinclude_spec = Spec().re(function_name_re).func(check_exinclude_function).copy
segment_spec_base = Spec(
name=Spec().re('^[a-zA-Z_]\w*$').optional(),
function=Spec().re(function_name_re).func(check_segment_function).optional(),
exclude_modes=Spec().list(vim_mode_spec()).optional(),
include_modes=Spec().list(vim_mode_spec()).optional(),
exclude_function=exinclude_spec().optional(),
include_function=exinclude_spec().optional(),
draw_hard_divider=Spec().type(bool).optional(),
draw_soft_divider=Spec().type(bool).optional(),
draw_inner_divider=Spec().type(bool).optional(),
display=Spec().type(bool).optional(),
module=segment_module_spec(),
priority=Spec().type(int, float, type(None)).optional(),
after=Spec().printable().optional(),
before=Spec().printable().optional(),
width=Spec().either(Spec().unsigned(), Spec().cmp('eq', 'auto')).optional(),
align=Spec().oneof(set('lr')).optional(),
args=args_spec().func(lambda *args, **kwargs: check_args(get_one_segment_function, *args, **kwargs)),
contents=Spec().printable().optional(),
highlight_groups=Spec().list(
highlight_group_spec().re(
'^(?:(?!:divider$).)+$',
(lambda value: 'it is recommended that only divider highlight group names end with ":divider"')
)
).func(check_highlight_groups).optional(),
divider_highlight_group=highlight_group_spec().func(check_highlight_group).re(
':divider$',
(lambda value: 'it is recommended that divider highlight group names end with ":divider"')
).optional(),
).func(check_full_segment_data).copy
subsegment_spec = segment_spec_base().update(
type=Spec().oneof(set((key for key in type_keys if key != 'segment_list'))).optional(),
)
segment_spec = segment_spec_base().update(
type=Spec().oneof(type_keys).optional(),
segments=Spec().optional().list(subsegment_spec),
)
segments_spec = Spec().optional().list(segment_spec).copy
segdict_spec = Spec(
left=segments_spec().context_message('Error while loading segments from left side (key {key})'),
right=segments_spec().context_message('Error while loading segments from right side (key {key})'),
).func(
(lambda value, *args: (True, True, not (('left' in value) or ('right' in value)))),
(lambda value: 'segments dictionary must contain either left, right or both keys')
).context_message('Error while loading segments (key {key})').copy
divside_spec = Spec(
hard=divider_spec(),
soft=divider_spec(),
).copy
segment_data_value_spec = Spec(
after=Spec().printable().optional(),
before=Spec().printable().optional(),
display=Spec().type(bool).optional(),
args=args_spec().func(lambda *args, **kwargs: check_args(get_all_possible_functions, *args, **kwargs)),
contents=Spec().printable().optional(),
).copy
dividers_spec = Spec(
left=divside_spec(),
right=divside_spec(),
).copy
spaces_spec = Spec().unsigned().cmp(
'le', 2, (lambda value: 'Are you sure you need such a big ({0}) number of spaces?'.format(value))
).copy
common_theme_spec = Spec(
default_module=segment_module_spec().optional(),
cursor_space=Spec().type(int, float).cmp('le', 100).cmp('gt', 0).optional(),
cursor_columns=Spec().type(int).cmp('gt', 0).optional(),
).context_message('Error while loading theme').copy
top_theme_spec = common_theme_spec().update(
dividers=dividers_spec(),
spaces=spaces_spec(),
use_non_breaking_spaces=Spec().type(bool).optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
)
main_theme_spec = common_theme_spec().update(
dividers=dividers_spec().optional(),
spaces=spaces_spec().optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
)
theme_spec = common_theme_spec().update(
dividers=dividers_spec().optional(),
spaces=spaces_spec().optional(),
segment_data=Spec().unknown_spec(
Spec().func(check_segment_data_key),
segment_data_value_spec(),
).optional().context_message('Error while loading segment data (key {key})'),
segments=segdict_spec().update(above=Spec().list(segdict_spec()).optional()),
)
def register_common_names():
register_common_name('player', 'powerline.segments.common.players', '_player')
def load_json_file(path):
with open_file(path) as F:
try:
config, hadproblem = load(F)
except MarkedError as e:
return True, None, str(e)
else:
return hadproblem, config, None
def updated_with_config(d):
hadproblem, config, error = load_json_file(d['path'])
d.update(
hadproblem=hadproblem,
config=config,
error=error,
)
return d
def find_all_ext_config_files(search_paths, subdir):
for config_root in search_paths:
top_config_subpath = join(config_root, subdir)
if not os.path.isdir(top_config_subpath):
if os.path.exists(top_config_subpath):
yield {
'error': 'Path {0} is not a directory'.format(top_config_subpath),
'path': top_config_subpath,
}
continue
for ext_name in os.listdir(top_config_subpath):
ext_path = os.path.join(top_config_subpath, ext_name)
if not os.path.isdir(ext_path):
if ext_name.endswith('.json') and os.path.isfile(ext_path):
yield updated_with_config({
'error': False,
'path': ext_path,
'name': ext_name[:-5],
'ext': None,
'type': 'top_' + subdir,
})
else:
yield {
'error': 'Path {0} is not a directory or configuration file'.format(ext_path),
'path': ext_path,
}
continue
for config_file_name in os.listdir(ext_path):
config_file_path = os.path.join(ext_path, config_file_name)
if config_file_name.endswith('.json') and os.path.isfile(config_file_path):
yield updated_with_config({
'error': False,
'path': config_file_path,
'name': config_file_name[:-5],
'ext': ext_name,
'type': subdir,
})
else:
yield {
'error': 'Path {0} is not a configuration file'.format(config_file_path),
'path': config_file_path,
}
def dict2(d):
return defaultdict(dict, ((k, dict(v)) for k, v in d.items()))
def check(paths=None, debug=False, echoerr=echoerr, require_ext=None):
'''Check configuration sanity
:param list paths:
Paths from which configuration should be loaded.
:param bool debug:
Determines whether some information useful for debugging linter should
be output.
:param function echoerr:
Function that will be used to echo the error(s). Should accept four
optional keyword parameters: ``problem`` and ``problem_mark``, and
``context`` and ``context_mark``.
:param str require_ext:
Require configuration for some extension to be present.
:return:
``False`` if user configuration seems to be completely sane and ``True``
if some problems were found.
'''
hadproblem = False
register_common_names()
search_paths = paths or get_config_paths()
find_config_files = generate_config_finder(lambda: search_paths)
logger = logging.getLogger('powerline-lint')
logger.setLevel(logging.DEBUG if debug else logging.ERROR)
logger.addHandler(logging.StreamHandler())
ee = EchoErr(echoerr, logger)
if require_ext:
used_main_spec = main_spec.copy()
try:
used_main_spec['ext'][require_ext].required()
except KeyError:
used_main_spec['ext'][require_ext] = ext_spec()
else:
used_main_spec = main_spec
lhadproblem = [False]
load_json_config = generate_json_config_loader(lhadproblem)
config_loader = ConfigLoader(run_once=True, load=load_json_config)
lists = {
'colorschemes': set(),
'themes': set(),
'exts': set(),
}
found_dir = {
'themes': False,
'colorschemes': False,
}
config_paths = defaultdict(lambda: defaultdict(dict))
loaded_configs = defaultdict(lambda: defaultdict(dict))
for d in chain(
find_all_ext_config_files(search_paths, 'colorschemes'),
find_all_ext_config_files(search_paths, 'themes'),
):
if d['error']:
hadproblem = True
ee(problem=d['error'])
continue
if d['hadproblem']:
hadproblem = True
if d['ext']:
found_dir[d['type']] = True
lists['exts'].add(d['ext'])
if d['name'] == '__main__':
pass
elif d['name'].startswith('__') or d['name'].endswith('__'):
hadproblem = True
ee(problem='File name is not supposed to start or end with “__”: {0}'.format(
d['path']))
else:
lists[d['type']].add(d['name'])
config_paths[d['type']][d['ext']][d['name']] = d['path']
loaded_configs[d['type']][d['ext']][d['name']] = d['config']
else:
config_paths[d['type']][d['name']] = d['path']
loaded_configs[d['type']][d['name']] = d['config']
for typ in ('themes', 'colorschemes'):
if not found_dir[typ]:
hadproblem = True
ee(problem='Subdirectory {0} was not found in paths {1}'.format(typ, ', '.join(search_paths)))
diff = set(config_paths['colorschemes']) - set(config_paths['themes'])
if diff:
hadproblem = True
for ext in diff:
typ = 'colorschemes' if ext in config_paths['themes'] else 'themes'
if not config_paths['top_' + typ] or typ == 'themes':
ee(problem='{0} extension {1} not present in {2}'.format(
ext,
'configuration' if (
ext in loaded_configs['themes'] and ext in loaded_configs['colorschemes']
) else 'directory',
typ,
))
try:
main_config = load_config('config', find_config_files, config_loader)
except IOError:
main_config = {}
ee(problem='Configuration file not found: config.json')
hadproblem = True
except MarkedError as e:
main_config = {}
ee(problem=str(e))
hadproblem = True
else:
if used_main_spec.match(
main_config,
data={'configs': config_paths, 'lists': lists},
context=Context(main_config),
echoerr=ee
)[1]:
hadproblem = True
import_paths = [os.path.expanduser(path) for path in main_config.get('common', {}).get('paths', [])]
try:
colors_config = load_config('colors', find_config_files, config_loader)
except IOError:
colors_config = {}
ee(problem='Configuration file not found: colors.json')
hadproblem = True
except MarkedError as e:
colors_config = {}
ee(problem=str(e))
hadproblem = True
else:
if colors_spec.match(colors_config, context=Context(colors_config), echoerr=ee)[1]:
hadproblem = True
if lhadproblem[0]:
hadproblem = True
top_colorscheme_configs = dict(loaded_configs['top_colorschemes'])
data = {
'ext': None,
'top_colorscheme_configs': top_colorscheme_configs,
'ext_colorscheme_configs': {},
'colors_config': colors_config
}
for colorscheme, config in loaded_configs['top_colorschemes'].items():
data['colorscheme'] = colorscheme
if top_colorscheme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
ext_colorscheme_configs = dict2(loaded_configs['colorschemes'])
for ext, econfigs in ext_colorscheme_configs.items():
data = {
'ext': ext,
'top_colorscheme_configs': top_colorscheme_configs,
'ext_colorscheme_configs': ext_colorscheme_configs,
'colors_config': colors_config,
}
for colorscheme, config in econfigs.items():
data['colorscheme'] = colorscheme
if ext == 'vim':
spec = vim_colorscheme_spec
elif ext == 'shell':
spec = shell_colorscheme_spec
else:
spec = colorscheme_spec
if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
colorscheme_configs = {}
for ext in lists['exts']:
colorscheme_configs[ext] = {}
for colorscheme in lists['colorschemes']:
econfigs = ext_colorscheme_configs[ext]
ecconfigs = econfigs.get(colorscheme)
mconfigs = (
top_colorscheme_configs.get(colorscheme),
econfigs.get('__main__'),
ecconfigs,
)
if not (mconfigs[0] or mconfigs[2]):
continue
config = None
for mconfig in mconfigs:
if not mconfig:
continue
if config:
config = mergedicts_copy(config, mconfig)
else:
config = mconfig
colorscheme_configs[ext][colorscheme] = config
theme_configs = dict2(loaded_configs['themes'])
top_theme_configs = dict(loaded_configs['top_themes'])
for ext, configs in theme_configs.items():
data = {
'ext': ext,
'colorscheme_configs': colorscheme_configs,
'import_paths': import_paths,
'main_config': main_config,
'top_themes': top_theme_configs,
'ext_theme_configs': configs,
'colors_config': colors_config
}
for theme, config in configs.items():
data['theme'] = theme
if theme == '__main__':
data['theme_type'] = 'main'
spec = main_theme_spec
else:
data['theme_type'] = 'regular'
spec = theme_spec
if spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
for top_theme, config in top_theme_configs.items():
data = {
'ext': None,
'colorscheme_configs': colorscheme_configs,
'import_paths': import_paths,
'main_config': main_config,
'theme_configs': theme_configs,
'ext_theme_configs': None,
'colors_config': colors_config
}
data['theme_type'] = 'top'
data['theme'] = top_theme
if top_theme_spec.match(config, context=Context(config), data=data, echoerr=ee)[1]:
hadproblem = True
return hadproblem
| mit |
chenjun0210/tensorflow | tensorflow/python/ops/state_ops.py | 10 | 9619 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables. See the @{python/state_ops} guide.
@@Variable
@@global_variables
@@local_variables
@@model_variables
@@trainable_variables
@@moving_average_variables
@@global_variables_initializer
@@local_variables_initializer
@@variables_initializer
@@is_variable_initialized
@@report_uninitialized_variables
@@assert_variables_initialized
@@assign
@@assign_add
@@assign_sub
@@Saver
@@latest_checkpoint
@@get_checkpoint_state
@@update_checkpoint_state
@@get_variable
@@get_local_variable
@@VariableScope
@@variable_scope
@@variable_op_scope
@@get_variable_scope
@@make_template
@@no_regularizer
@@constant_initializer
@@random_normal_initializer
@@truncated_normal_initializer
@@random_uniform_initializer
@@uniform_unit_scaling_initializer
@@zeros_initializer
@@ones_initializer
@@orthogonal_initializer
@@fixed_size_partitioner
@@variable_axis_size_partitioner
@@min_max_variable_partitioner
@@scatter_update
@@scatter_add
@@scatter_sub
@@scatter_mul
@@scatter_div
@@scatter_nd_update
@@scatter_nd_add
@@scatter_nd_sub
@@sparse_mask
@@IndexedSlices
@@initialize_all_tables
@@tables_initializer
@@export_meta_graph
@@import_meta_graph
@@all_variables
@@initialize_all_variables
@@initialize_local_variables
@@initialize_variables
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops._variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.1;5A
"""
return gen_state_ops._variable_v2(shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
if ref.op.type == "VarHandleOp":
return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
name=name)
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value, name=name)
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value, name=name)
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
| apache-2.0 |
llvm/llvm-test-suite | MicroBenchmarks/libs/benchmark/bindings/python/google_benchmark/__init__.py | 4 | 4370 | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import google_benchmark as benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
from absl import app
from google_benchmark import _benchmark
from google_benchmark._benchmark import (
Counter,
kNanosecond,
kMicrosecond,
kMillisecond,
kSecond,
oNone,
o1,
oN,
oNSquared,
oNCubed,
oLogN,
oNLogN,
oAuto,
oLambda,
)
__all__ = [
"register",
"main",
"Counter",
"kNanosecond",
"kMicrosecond",
"kMillisecond",
"kSecond",
"oNone",
"o1",
"oN",
"oNSquared",
"oNCubed",
"oLogN",
"oNLogN",
"oAuto",
"oLambda",
]
__version__ = "0.2.0"
class __OptionMaker:
"""A stateless class to collect benchmark options.
Collect all decorator calls like @option.range(start=0, limit=1<<5).
"""
class Options:
"""Pure data class to store options calls, along with the benchmarked function."""
def __init__(self, func):
self.func = func
self.builder_calls = []
@classmethod
def make(cls, func_or_options):
"""Make Options from Options or the benchmarked function."""
if isinstance(func_or_options, cls.Options):
return func_or_options
return cls.Options(func_or_options)
def __getattr__(self, builder_name):
"""Append option call in the Options."""
# The function that get returned on @option.range(start=0, limit=1<<5).
def __builder_method(*args, **kwargs):
# The decorator that get called, either with the benchmared function
# or the previous Options
def __decorator(func_or_options):
options = self.make(func_or_options)
options.builder_calls.append((builder_name, args, kwargs))
# The decorator returns Options so it is not technically a decorator
# and needs a final call to @regiser
return options
return __decorator
return __builder_method
# Alias for nicer API.
# We have to instantiate an object, even if stateless, to be able to use __getattr__
# on option.range
option = __OptionMaker()
def register(undefined=None, *, name=None):
"""Register function for benchmarking."""
if undefined is None:
# Decorator is called without parenthesis so we return a decorator
return lambda f: register(f, name=name)
# We have either the function to benchmark (simple case) or an instance of Options
# (@option._ case).
options = __OptionMaker.make(undefined)
if name is None:
name = options.func.__name__
# We register the benchmark and reproduce all the @option._ calls onto the
# benchmark builder pattern
benchmark = _benchmark.RegisterBenchmark(name, options.func)
for name, args, kwargs in options.builder_calls[::-1]:
getattr(benchmark, name)(*args, **kwargs)
# return the benchmarked function because the decorator does not modify it
return options.func
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
# Methods for use with custom main function.
initialize = _benchmark.Initialize
run_benchmarks = _benchmark.RunSpecifiedBenchmarks
| apache-2.0 |
mwilliammyers/sportsstats | tests/test_nba.py | 1 | 1960 | #!/usr/bin/env python
"""test_nba
Tests for `nba` module.
"""
from sportsstats import nba
import unittest
class TestNba(unittest.TestCase):
def setUp(self):
from datetime import datetime
april_9 = datetime(2016, 4, 9)
self.nba_stats = nba.Stats(april_9, april_9)
self.expected_query_url = (
"/stats/leaguedashptstats?"
"College=&Conference=&Country=&DateFrom=04%2F09%2F2016&"
"DateTo=04%2F09%2F2016&Division=&DraftPick=&DraftYear=&"
"GameScope=&Height=&LastNGames=0&LeagueID=00&Location=&"
"Month=0&OpponentTeamID=0&Outcome=&PORound=0&PerMode=Totals&"
"PlayerExperience=&PlayerOrTeam=Player&PlayerPosition=&"
"PtMeasureType=SpeedDistance&Season=2015-16&SeasonSegment=&"
"SeasonType=Regular+Season&StarterBench=&TeamID=0&"
"VsConference=&VsDivision=&Weight="
)
pass
def tearDown(self):
del self.nba_stats
pass
def test_build_query_url(self):
actual = self.nba_stats._Stats__build_query_url()
self.assertEqual(actual, self.expected_query_url)
def test_send_get_request(self):
connection = self.nba_stats._Stats__send_get_request(
self.expected_query_url)
actual = connection.getresponse().status
self.assertEqual(actual, 200)
connection.close()
def test_download(self):
data = json.loads(self.nba_stats.download())
expected = [
'PLAYER_ID', 'PLAYER_NAME', 'TEAM_ID', 'TEAM_ABBREVIATION',
'GP', 'W', 'L', 'MIN', 'DIST_FEET', 'DIST_MILES',
'DIST_MILES_OFF', 'DIST_MILES_DEF', 'AVG_SPEED',
'AVG_SPEED_OFF', 'AVG_SPEED_DEF'
]
actual = data['resultSets'][0]['headers']
self.assertEqual(actual, expected)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| gpl-3.0 |
onitake/ansible | lib/ansible/module_utils/oneview.py | 23 | 18876 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
import abc
import collections
import json
import os
import traceback
try:
from hpOneView.oneview_client import OneViewClient
HAS_HPE_ONEVIEW = True
except ImportError:
HAS_HPE_ONEVIEW = False
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.common._collections_compat import Mapping
def transform_list_to_dict(list_):
"""
Transforms a list into a dictionary, putting values as keys.
:arg list list_: List of values
:return: dict: dictionary built
"""
ret = {}
if not list_:
return ret
for value in list_:
if isinstance(value, Mapping):
ret.update(value)
else:
ret[to_native(value, errors='surrogate_or_strict')] = True
return ret
def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
"""
Merge two lists by the key. It basically:
1. Adds the items that are present on updated_list and are absent on original_list.
2. Removes items that are absent on updated_list and are present on original_list.
3. For all items that are in both lists, overwrites the values from the original item by the updated item.
:arg list original_list: original list.
:arg list updated_list: list with changes.
:arg str key: unique identifier.
:arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
if its values are null.
:return: list: Lists merged.
"""
ignore_when_null = [] if ignore_when_null is None else ignore_when_null
if not original_list:
return updated_list
items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
merged_items = collections.OrderedDict()
for item in updated_list:
item_key = item[key]
if item_key in items_map:
for ignored_key in ignore_when_null:
if ignored_key in item and item[ignored_key] is None:
item.pop(ignored_key)
merged_items[item_key] = items_map[item_key]
merged_items[item_key].update(item)
else:
merged_items[item_key] = item
return list(merged_items.values())
def _str_sorted(obj):
if isinstance(obj, Mapping):
return json.dumps(obj, sort_keys=True)
else:
return str(obj)
def _standardize_value(value):
"""
Convert value to string to enhance the comparison.
:arg value: Any object type.
:return: str: Converted value.
"""
if isinstance(value, float) and value.is_integer():
# Workaround to avoid erroneous comparison between int and float
# Removes zero from integer floats
value = int(value)
return str(value)
class OneViewModuleException(Exception):
"""
OneView base Exception.
Attributes:
msg (str): Exception message.
oneview_response (dict): OneView rest response.
"""
def __init__(self, data):
self.msg = None
self.oneview_response = None
if isinstance(data, six.string_types):
self.msg = data
else:
self.oneview_response = data
if data and isinstance(data, dict):
self.msg = data.get('message')
if self.oneview_response:
Exception.__init__(self, self.msg, self.oneview_response)
else:
Exception.__init__(self, self.msg)
class OneViewModuleTaskError(OneViewModuleException):
"""
OneView Task Error Exception.
Attributes:
msg (str): Exception message.
error_code (str): A code which uniquely identifies the specific error.
"""
def __init__(self, msg, error_code=None):
super(OneViewModuleTaskError, self).__init__(msg)
self.error_code = error_code
class OneViewModuleValueError(OneViewModuleException):
"""
OneView Value Error.
The exception is raised when the data contains an inappropriate value.
Attributes:
msg (str): Exception message.
"""
pass
class OneViewModuleResourceNotFound(OneViewModuleException):
"""
OneView Resource Not Found Exception.
The exception is raised when an associated resource was not found.
Attributes:
msg (str): Exception message.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class OneViewModuleBase(object):
MSG_CREATED = 'Resource created successfully.'
MSG_UPDATED = 'Resource updated successfully.'
MSG_DELETED = 'Resource deleted successfully.'
MSG_ALREADY_PRESENT = 'Resource is already present.'
MSG_ALREADY_ABSENT = 'Resource is already absent.'
MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
HPE_ONEVIEW_SDK_REQUIRED = 'HPE OneView Python SDK is required for this module.'
ONEVIEW_COMMON_ARGS = dict(
config=dict(type='path'),
hostname=dict(type='str'),
username=dict(type='str'),
password=dict(type='str', no_log=True),
api_version=dict(type='int'),
image_streamer_hostname=dict(type='str')
)
ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
resource_client = None
def __init__(self, additional_arg_spec=None, validate_etag_support=False):
"""
OneViewModuleBase constructor.
:arg dict additional_arg_spec: Additional argument spec definition.
:arg bool validate_etag_support: Enables support to eTag validation.
"""
argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
self._check_hpe_oneview_sdk()
self._create_oneview_client()
self.state = self.module.params.get('state')
self.data = self.module.params.get('data')
# Preload params for get_all - used by facts
self.facts_params = self.module.params.get('params') or {}
# Preload options as dict - used by facts
self.options = transform_list_to_dict(self.module.params.get('options'))
self.validate_etag_support = validate_etag_support
def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
merged_arg_spec = dict()
merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
if validate_etag_support:
merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
if additional_arg_spec:
merged_arg_spec.update(additional_arg_spec)
return merged_arg_spec
def _check_hpe_oneview_sdk(self):
if not HAS_HPE_ONEVIEW:
self.module.fail_json(msg=self.HPE_ONEVIEW_SDK_REQUIRED)
def _create_oneview_client(self):
if self.module.params.get('hostname'):
config = dict(ip=self.module.params['hostname'],
credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
api_version=self.module.params['api_version'],
image_streamer_ip=self.module.params['image_streamer_hostname'])
self.oneview_client = OneViewClient(config)
elif not self.module.params['config']:
self.oneview_client = OneViewClient.from_environment_variables()
else:
self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
@abc.abstractmethod
def execute_module(self):
"""
Abstract method, must be implemented by the inheritor.
This method is called from the run method. It should contains the module logic
:return: dict: It must return a dictionary with the attributes for the module result,
such as ansible_facts, msg and changed.
"""
pass
def run(self):
"""
Common implementation of the OneView run modules.
It calls the inheritor 'execute_module' function and sends the return to the Ansible.
It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
"""
try:
if self.validate_etag_support:
if not self.module.params.get('validate_etag'):
self.oneview_client.connection.disable_etag_validation()
result = self.execute_module()
if "changed" not in result:
result['changed'] = False
self.module.exit_json(**result)
except OneViewModuleException as exception:
error_msg = '; '.join(to_native(e) for e in exception.args)
self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
def resource_absent(self, resource, method='delete'):
"""
Generic implementation of the absent state for the OneView resources.
It checks if the resource needs to be removed.
:arg dict resource: Resource to delete.
:arg str method: Function of the OneView client that will be called for resource deletion.
Usually delete or remove.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if resource:
getattr(self.resource_client, method)(resource)
return {"changed": True, "msg": self.MSG_DELETED}
else:
return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
def get_by_name(self, name):
"""
Generic get by name implementation.
:arg str name: Resource name to search for.
:return: The resource found or None.
"""
result = self.resource_client.get_by('name', name)
return result[0] if result else None
def resource_present(self, resource, fact_name, create_method='create'):
"""
Generic implementation of the present state for the OneView resources.
It checks if the resource needs to be created or updated.
:arg dict resource: Resource to create or update.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg str create_method: Function of the OneView client that will be called for resource creation.
Usually create or add.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
changed = False
if "newName" in self.data:
self.data["name"] = self.data.pop("newName")
if not resource:
resource = getattr(self.resource_client, create_method)(self.data)
msg = self.MSG_CREATED
changed = True
else:
merged_data = resource.copy()
merged_data.update(self.data)
if self.compare(resource, merged_data):
msg = self.MSG_ALREADY_PRESENT
else:
resource = self.resource_client.update(merged_data)
changed = True
msg = self.MSG_UPDATED
return dict(
msg=msg,
changed=changed,
ansible_facts={fact_name: resource}
)
def resource_scopes_set(self, state, fact_name, scope_uris):
"""
Generic implementation of the scopes update PATCH for the OneView resources.
It checks if the resource needs to be updated with the current scopes.
This method is meant to be run after ensuring the present state.
:arg dict state: Dict containing the data from the last state results in the resource.
It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
:arg str fact_name: Name of the fact returned to the Ansible.
:arg list scope_uris: List with all the scope URIs to be added to the resource.
:return: A dictionary with the expected arguments for the AnsibleModule.exit_json
"""
if scope_uris is None:
scope_uris = []
resource = state['ansible_facts'][fact_name]
operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
state['changed'] = True
state['msg'] = self.MSG_UPDATED
return state
def compare(self, first_resource, second_resource):
"""
Recursively compares dictionary contents equivalence, ignoring types and elements order.
Particularities of the comparison:
- Inexistent key = None
- These values are considered equal: None, empty, False
- Lists are compared value by value after a sort, if they have same size.
- Each element is converted to str before the comparison.
:arg dict first_resource: first dictionary
:arg dict second_resource: second dictionary
:return: bool: True when equal, False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The first resource is True / Not Null and the second resource is False / Null
if resource1 and not resource2:
self.module.log("resource1 and not resource2. " + debug_resources)
return False
# Checks all keys in first dict against the second dict
for key in resource1:
if key not in resource2:
if resource1[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# If both values are null, empty or False it will be considered equal.
elif not resource1[key] and not resource2[key]:
continue
elif isinstance(resource1[key], Mapping):
# recursive call
if not self.compare(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif isinstance(resource1[key], list):
# change comparison function to compare_list
if not self.compare_list(resource1[key], resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
# Checks all keys in the second dict, looking for missing elements
for key in resource2.keys():
if key not in resource1:
if resource2[key] is not None:
# Inexistent key is equivalent to exist with value None
self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
return False
return True
def compare_list(self, first_resource, second_resource):
"""
Recursively compares lists contents equivalence, ignoring types and element orders.
Lists with same size are compared value by value after a sort,
each element is converted to str before the comparison.
:arg list first_resource: first list
:arg list second_resource: second list
:return: True when equal; False when different.
"""
resource1 = first_resource
resource2 = second_resource
debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
# The second list is null / empty / False
if not resource2:
self.module.log("resource 2 is null. " + debug_resources)
return False
if len(resource1) != len(resource2):
self.module.log("resources have different length. " + debug_resources)
return False
resource1 = sorted(resource1, key=_str_sorted)
resource2 = sorted(resource2, key=_str_sorted)
for i, val in enumerate(resource1):
if isinstance(val, Mapping):
# change comparison function to compare dictionaries
if not self.compare(val, resource2[i]):
self.module.log("resources are different. " + debug_resources)
return False
elif isinstance(val, list):
# recursive call
if not self.compare_list(val, resource2[i]):
self.module.log("lists are different. " + debug_resources)
return False
elif _standardize_value(val) != _standardize_value(resource2[i]):
self.module.log("values are different. " + debug_resources)
return False
# no differences found
return True
| gpl-3.0 |
Emsu/prophet | examples/tutorial/__main__.py | 3 | 1782 | import datetime as dt
from prophet import Prophet
from prophet.data import YahooCloseData
from prophet.analyze import default_analyzers
from bollinger import BollingerData
from eventstudy import BollingerEventStudy
from eventstudy import OrderGenerator
# Based on Homework #7 for Computational Investing
# http://wiki.quantsoftware.org/index.php?title=CompInvesti_Homework_7
# Here we use 2 symbols and a benchmark to reduce data pulled
# but you can use the full sp5002012.txt file from QSTK
# You will have to adjust the portfolio analyzers
# The homework solution's analyzers start the analysis
# when the first trade is conducted instead of the entire
# duration of the backtest.
prophet = Prophet()
symbols = ["AAPL", "XOM", "SPX"]
prophet.set_universe(symbols)
prophet.register_data_generators(YahooCloseData(),
BollingerData(),
BollingerEventStudy())
prophet.set_order_generator(OrderGenerator())
backtest = prophet.run_backtest(start=dt.datetime(2008, 1, 1),
end=dt.datetime(2009, 12, 31), lookback=20)
prophet.register_portfolio_analyzers(default_analyzers)
analysis = prophet.analyze_backtest(backtest)
print(analysis)
# +----------------------------------------+
# | sharpe | -0.851247401074 |
# | average_return | -2.04368321273e-07 |
# | cumulative_return | -0.000103 |
# | volatility | 3.81116761073e-06 |
# +----------------------------------------+
# Generate orders for your to execute today
# Using Nov, 10 2014 as the date because there might be no data for today's
# date (Market might not be open) and we don't want a examples to fail.
today = dt.datetime(2009, 12, 31)
print(prophet.generate_orders(today, lookback=20))
| bsd-3-clause |
markeTIC/OCB | addons/website_mail/tests/__init__.py | 261 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 20123TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yrizk/django-blog | blogvenv/lib/python3.4/site-packages/django/core/management/commands/createcachetable.py | 96 | 3927 | from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table)
def create_table(self, database, tablename):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute("\n".join(full_statement))
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| apache-2.0 |
simongoffin/website_version | openerp/addons/base/ir/ir_model.py | 8 | 59779 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
def _search_display_name(self, operator, value):
# overridden to allow searching both on model name (model field) and
# model description (name field)
return ['|', ('model', operator, value), ('name', operator, value)]
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
openerp.modules.registry.RegistryManager.new(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
if isinstance(model, unicode):
model = model.encode('utf-8')
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model._build_model(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = self.pool[field.model]._all_columns[field.name].column._rel
cr.execute('DROP table "%s"' % (rel_name))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
self.pool[vals['model']]._rec_name = 'x_name'
self.pool[vals['model']].__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def _display_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
result2 = {}
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
result2[res.id] = False
for model in result:
try:
r = dict(self.pool[model].name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2[val] = r.get(key, False)
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'display_name': fields.function(_display_name_get, type='char', string='Record Name'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
diorcety/intellij-community | python/helpers/pydev/pydev_imports.py | 53 | 2519 | from pydevd_constants import USE_LIB_COPY, izip
try:
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
else:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
except ImportError:
from _pydev_imps import _pydev_xmlrpclib as xmlrpclib
try:
try:
if USE_LIB_COPY:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
else:
from SimpleXMLRPCServer import SimpleXMLRPCServer
except ImportError:
from xmlrpc.server import SimpleXMLRPCServer
except ImportError:
from _pydev_imps._pydev_SimpleXMLRPCServer import SimpleXMLRPCServer
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
execfile=execfile #Not in Py3k
except NameError:
from _pydev_imps._pydev_execfile import execfile
try:
if USE_LIB_COPY:
from _pydev_imps import _pydev_Queue as _queue
else:
import Queue as _queue
except:
import queue as _queue #@UnresolvedImport
try:
from pydevd_exec import Exec
except:
from pydevd_exec2 import Exec
try:
from urllib import quote, quote_plus, unquote_plus
except:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
import os
try:
relpath = os.path.relpath
except:
# Only there from 2.6 onwards... let's provide a replacement.
def _split_path(path):
parts = []
loc = path
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(path, start=None):
if start is None:
start = os.curdir
origin = os.path.abspath(path)
start = os.path.abspath(start)
orig_list = _split_path(os.path.normcase(origin))
dest_list = _split_path(start)
if orig_list[0] != os.path.normcase(dest_list[0]):
return start
i = 0
for start_seg, dest_seg in izip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
segments = [os.pardir] * (len(orig_list) - i)
segments += dest_list[i:]
if len(segments) == 0:
return os.curdir
else:
return os.path.join(*segments)
| apache-2.0 |
python-ivi/python-ivi | ivi/agilent/agilentU2004A.py | 2 | 1628 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2015-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentU2000 import *
class agilentU2004A(agilentU2000):
"Agilent U2004A RF power sensor driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'U2004A')
super(agilentU2004A, self).__init__(*args, **kwargs)
self._channel_count = 1
self._frequency_low = 9e3
self._frequency_high = 6e9
self._power_low = -60
self._power_high = 20
self._init_channels()
| mit |
arrabito/DIRAC | Core/DISET/private/MessageBroker.py | 8 | 15445 | """ Here, we need some documentation...
"""
import threading
import select
import time
import socket
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.private.TransportPool import getGlobalTransportPool
from DIRAC.Core.Utilities.ThreadPool import getGlobalThreadPool
from DIRAC.Core.Utilities.ReturnValues import isReturnStructure
from DIRAC.Core.DISET.private.MessageFactory import MessageFactory, DummyMessage
class MessageBroker( object ):
def __init__( self, name, transportPool = None, threadPool = None ):
self.__name = name
self.__messageTransports = {}
self.__msgCounter = 0
self.__msgCounterLock = threading.Lock()
self.__responseCallbacks = {}
self.__msgInTransport = {}
self.__listenPersistConn = False
self.__useMessageObjects = True
self.__callbacksLock = threading.Condition()
self.__trInOutLock = threading.Lock()
self.__msgFactory = MessageFactory()
self.__log = gLogger.getSubLogger( "MSGBRK" )
if not transportPool:
transportPool = getGlobalTransportPool()
self.__trPool = transportPool
if not threadPool:
threadPool = getGlobalThreadPool()
self.__threadPool = threadPool
self.__listeningForMessages = False
self.__listenThread = None
def getNumConnections( self ):
return len( self.__messageTransports )
def getMsgFactory( self ):
return self.__msgFactory
def useMessageObjects( self, bD ):
self.__useMessageObjects = bD
# Message id generation
def __generateMsgId( self ):
self.__msgCounterLock.acquire()
try:
msgId = "%s:%d" % ( self.__name, self.__msgCounter )
self.__msgCounter += 1
return msgId
finally:
self.__msgCounterLock.release()
def getTransportPool( self ):
return self.__trPool
# Add and remove transport to/from broker
def addTransport( self, transport, *args, **kwargs ):
trid = self.__trPool.add( transport )
try:
result = self.addTransportId( trid, *args, **kwargs )
except Exception as e:
gLogger.exception( "Cannot add transport id", lException = e )
result = S_ERROR( "Cannot add transport id" )
if not result[ 'OK' ]:
self.__trPool.remove( trid )
return result
return S_OK( trid )
def addTransportId( self, trid, svcName,
receiveMessageCallback = None, disconnectCallback = None,
idleRead = False, listenToConnection = True ):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
return S_OK()
tr = self.__trPool.get( trid )
if not tr:
return S_ERROR( "No transport with id %s registered" % trid )
self.__messageTransports[ trid ] = { 'transport' : tr,
'svcName' : svcName,
'cbReceiveMessage': receiveMessageCallback,
'cbDisconnect' : disconnectCallback,
'listen' : listenToConnection,
'idleRead' : idleRead }
self.__startListeningThread()
return S_OK()
finally:
self.__trInOutLock.release()
def listenToTransport( self, trid, listen = True ):
self.__trInOutLock.acquire()
try:
if trid in self.__messageTransports:
self.__messageTransports[ trid ][ 'listen' ] = listen
self.__startListeningThread()
finally:
self.__trInOutLock.release()
# Listen to connections
def __startListeningThread( self ):
threadDead = self.__listeningForMessages and self.__listenThread is not None and not self.__listenThread.isAlive()
if not self.__listeningForMessages or threadDead:
self.__listeningForMessages = True
self.__listenThread = threading.Thread( target = self.__listenAutoReceiveConnections )
self.__listenThread.setDaemon( True )
self.__listenThread.start()
def __listenAutoReceiveConnections( self ):
while self.__listeningForMessages:
self.__trInOutLock.acquire()
try:
sIdList = []
for trid in self.__messageTransports:
mt = self.__messageTransports[ trid ]
if not mt[ 'listen' ]:
continue
sIdList.append( ( trid, mt[ 'transport' ].getSocket() ) )
if not sIdList:
self.__listeningForMessages = False
return
finally:
self.__trInOutLock.release()
try:
try:
inList, _outList, _exList = select.select( [ pos[1] for pos in sIdList ] , [], [], 1 )
if len( inList ) == 0:
continue
except socket.error:
time.sleep( 0.001 )
continue
except select.error:
time.sleep( 0.001 )
continue
except Exception as e:
gLogger.exception( "Exception while selecting persistent connections", lException = e )
continue
for sock in inList:
for iPos in range( len( sIdList ) ):
if sock == sIdList[ iPos ][1]:
trid = sIdList[ iPos ][0]
if trid in self.__messageTransports:
result = self.__receiveMsgDataAndQueue( trid )
if not result[ 'OK' ]:
self.removeTransport( trid )
break
#Process received data functions
def __receiveMsgDataAndQueue( self, trid ):
#Receive
result = self.__trPool.receive( trid,
blockAfterKeepAlive = False,
idleReceive = self.__messageTransports[ trid ][ 'idleRead' ] )
self.__log.debug( "[trid %s] Received data: %s" % ( trid, str( result ) ) )
#If error close transport and exit
if not result[ 'OK' ]:
self.__log.debug( "[trid %s] ERROR RCV DATA %s" % ( trid, result[ 'Message' ] ) )
gLogger.warn( "Error while receiving message", "from %s : %s" % ( self.__trPool.get( trid ).getFormattedCredentials(),
result[ 'Message' ] ) )
return self.removeTransport( trid )
self.__threadPool.generateJobAndQueueIt( self.__processIncomingData,
args = ( trid, result ) )
return S_OK()
def __processIncomingData( self, trid, receivedResult ):
#If keep alive, return OK
if 'keepAlive' in receivedResult and receivedResult[ 'keepAlive' ]:
return S_OK()
#If idle read return
self.__trInOutLock.acquire()
try:
idleRead = self.__messageTransports[ trid ][ 'idleRead' ]
except KeyError:
return S_ERROR( "Transport %s unknown" % trid )
finally:
self.__trInOutLock.release()
if idleRead:
if receivedResult[ 'Value' ]:
gLogger.fatal( "OOOops. Idle read has returned data!" )
return S_OK()
if not receivedResult[ 'Value' ]:
self.__log.debug( "Transport %s closed connection" % trid )
return self.removeTransport( trid )
#This is a message req/resp
msg = receivedResult[ 'Value' ]
#Valid message?
if 'request' not in msg:
gLogger.warn( "Received data does not seem to be a message !!!!" )
return self.removeTransport( trid )
#Decide if it's a response or a request
if msg[ 'request' ]:
#If message has Id return ACK to received
if 'id' in msg:
self.__sendResponse( trid, msg[ 'id' ], S_OK() )
#Process msg
result = self.__processIncomingRequest( trid, msg )
else:
result = self.__processIncomingResponse( trid, msg )
#If error close the transport
if not result[ 'OK' ]:
gLogger.info( "Closing transport because of error while processing message", result[ 'Message' ] )
return self.removeTransport( trid )
return S_OK()
def __processIncomingRequest( self, trid, msg ):
self.__trInOutLock.acquire()
try:
rcvCB = self.__messageTransports[ trid ][ 'cbReceiveMessage' ]
except KeyError:
return S_ERROR( "Transport %s unknown" % trid )
finally:
self.__trInOutLock.release()
if not rcvCB:
gLogger.fatal( "Transport %s does not have a callback defined and a message arrived!" % trid )
return S_ERROR( "No message was expected in for this transport" )
#Check message has id and name
for requiredField in [ 'name' ]:
if requiredField not in msg:
gLogger.error( "Message does not have required field", requiredField )
return S_ERROR( "Message does not have %s" % requiredField )
#Load message
if 'attrs' in msg:
attrs = msg[ 'attrs' ]
if not isinstance( attrs, (tuple, list) ):
return S_ERROR( "Message args has to be a tuple or a list, not %s" % type( attrs ) )
else:
attrs = None
#Do we "unpack" or do we send the raw data to the callback?
if self.__useMessageObjects:
result = self.__msgFactory.createMessage( self.__messageTransports[ trid ][ 'svcName' ], msg[ 'name' ], attrs )
if not result[ 'OK' ]:
return result
msgObj = result[ 'Value' ]
else:
msgObj = DummyMessage( msg )
#Is msg ok?
if not msgObj.isOK():
return S_ERROR( "Messsage is invalid" )
try:
#Callback it and return response
result = rcvCB( trid, msgObj )
if not isReturnStructure( result ):
return S_ERROR( "Request function does not return a result structure" )
return result
except Exception as e:
#Whoops. Show exception and return
gLogger.exception( "Exception while processing message %s" % msg[ 'name' ], lException = e )
return S_ERROR( "Exception while processing message %s: %s" % ( msg[ 'name' ], str( e ) ) )
def __processIncomingResponse( self, trid, msg ):
#This is a message response
for requiredField in ( 'id', 'result' ):
if requiredField not in msg:
gLogger.error( "Message does not have required field", requiredField )
return S_ERROR( "Message does not have %s" % requiredField )
if not isReturnStructure( msg[ 'result' ] ):
return S_ERROR( "Message response did not return a result structure" )
return self.__notifyCallback( msg[ 'id' ], msg[ 'result' ] )
#Sending functions
def __sendResponse( self, trid, msgId, msgResult ):
msgResponse = { 'request' : False, 'id' : msgId, 'result' : msgResult }
_result = self.__trPool.send( trid, S_OK( msgResponse ) )
def sendMessage( self, trid, msgObj ):
if not msgObj.isOK():
return S_ERROR( "Message is not ready to be sent" )
result = self.__sendMessage( trid, msgObj )
if not result[ 'OK' ]:
self.removeTransport( trid )
return result
def __sendMessage( self, trid, msgObj ):
if not self.__trPool.exists( trid ):
return S_ERROR( "Not transport with id %s defined for messaging" % trid )
msg = { 'request' : True, 'name' : msgObj.getName() }
attrs = msgObj.dumpAttrs()[ 'Value' ]
msg[ 'attrs' ] = attrs
waitForAck = msgObj.getWaitForAck()
if not waitForAck:
return self.__trPool.send( trid, S_OK( msg ) )
msgId = self.__generateMsgId()
msg[ 'id' ] = msgId
self.__generateMessageResponse( trid, msgId )
result = self.__trPool.send( trid, S_OK( msg ) )
#Lock and generate and wait
self.__callbacksLock.acquire()
try:
if not result[ 'OK' ]:
#Release lock and exit
self.__clearCallback( msgId )
return result
return self.__waitForMessageResponse( msgId )
finally:
self.__callbacksLock.release()
#Callback nightmare
#Lock need to have been aquired prior to func
def __generateMessageResponse( self, trid, msgId ):
self.__callbacksLock.acquire()
try:
if msgId in self.__responseCallbacks:
return self.__responseCallbacks[ msgId ]
if trid not in self.__msgInTransport:
self.__msgInTransport[ trid ] = set()
self.__msgInTransport[ trid ].add( msgId )
self.__responseCallbacks[ msgId ] = { 'creationTime' : time.time(),
'trid' : trid
}
return self.__responseCallbacks[ msgId ]
finally:
self.__callbacksLock.release()
#Lock need to have been aquired prior to func
def __waitForMessageResponse( self, msgId ):
if msgId not in self.__responseCallbacks:
return S_ERROR( "Invalid msg id" )
respCallback = self.__responseCallbacks[ msgId ]
while 'result' not in respCallback and time.time() - respCallback[ 'creationTime' ] < 30 :
self.__callbacksLock.wait( 30 )
self.__clearCallback( msgId )
if 'result' in respCallback:
return respCallback[ 'result' ]
return S_ERROR( "Timeout while waiting for message ack" )
def __clearCallback( self, msgId ):
if msgId not in self.__responseCallbacks:
return False
trid = self.__responseCallbacks[ msgId ][ 'trid' ]
self.__responseCallbacks.pop( msgId )
try:
self.__msgInTransport[ trid ].remove( msgId )
except KeyError:
pass
return True
#Lock need to have been aquired prior to func
def __setCallbackResult( self, msgId, result = False ):
if msgId not in self.__responseCallbacks:
return False
self.__responseCallbacks[ msgId ][ 'result' ] = result
return True
def __notifyCallback( self, msgId, msgResult ):
self.__callbacksLock.acquire()
try:
if self.__setCallbackResult( msgId, msgResult ):
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
return S_OK()
def removeTransport( self, trid, closeTransport = True ):
#Delete from the message Transports
self.__trInOutLock.acquire()
try:
if trid not in self.__messageTransports:
return S_OK()
#Save the disconnect callback if it's there
if self.__messageTransports[ trid ][ 'cbDisconnect' ]:
cbDisconnect = self.__messageTransports[ trid ][ 'cbDisconnect' ]
else:
cbDisconnect = False
self.__messageTransports.pop( trid )
if closeTransport:
self.__trPool.close( trid )
finally:
self.__trInOutLock.release()
#Flush remaining messages
self.__callbacksLock.acquire()
try:
msgIds = False
if trid in self.__msgInTransport:
msgIds = set( self.__msgInTransport[ trid ] )
self.__msgInTransport.pop( trid )
for msgId in msgIds:
self.__setCallbackResult( msgId, S_ERROR( "Connection closed by peer" ) )
self.__callbacksLock.notifyAll()
finally:
self.__callbacksLock.release()
#Queue the disconnect CB if it's there
if cbDisconnect:
self.__threadPool.generateJobAndQueueIt( cbDisconnect,
args = ( trid, ) )
return S_OK()
class MessageSender( object ):
def __init__( self, serviceName, msgBroker ):
self.__serviceName = serviceName
self.__msgBroker = msgBroker
def getServiceName( self ):
return self.__serviceName
def sendMessage( self, trid, msgObj ):
return self.__msgBroker.sendMessage( trid, msgObj )
def createMessage( self, msgName ):
return self.__msgBroker.__msgFactory.createMessage( self.__serviceName, msgName )
gMessageBroker = False
def getGlobalMessageBroker():
global gMessageBroker
if not gMessageBroker:
gMessageBroker = MessageBroker( 'GlobalMessageBroker', transportPool = getGlobalTransportPool() )
return gMessageBroker
| gpl-3.0 |
muffin/tutorial-rss-reader | server/vendor/pyquery/cssselectpatch.py | 3 | 7695 | #-*- coding:utf-8 -*-
#
# Copyright (C) 2008 - Olivier Lauzanne <[email protected]>
#
# Distributed under the BSD license, see LICENSE.txt
from cssselect import xpath as cssselect_xpath
from cssselect.xpath import ExpressionError
class JQueryTranslator(cssselect_xpath.HTMLTranslator):
"""This class is used to implement the css pseudo classes
(:first, :last, ...) that are not defined in the css standard,
but are defined in the jquery API.
"""
def xpath_first_pseudo(self, xpath):
"""Matches the first selected element.
"""
xpath.add_post_condition('position() = 1')
return xpath
def xpath_last_pseudo(self, xpath):
"""Matches the last selected element.
"""
xpath.add_post_condition('position() = last()')
return xpath
def xpath_even_pseudo(self, xpath):
"""Matches even elements, zero-indexed.
"""
# the first element is 1 in xpath and 0 in python and js
xpath.add_post_condition('position() mod 2 = 1')
return xpath
def xpath_odd_pseudo(self, xpath):
"""Matches odd elements, zero-indexed.
"""
xpath.add_post_condition('position() mod 2 = 0')
return xpath
def xpath_checked_pseudo(self, xpath):
"""Matches odd elements, zero-indexed.
"""
xpath.add_condition("@checked and name(.) = 'input'")
return xpath
def xpath_selected_pseudo(self, xpath):
"""Matches all elements that are selected.
"""
xpath.add_condition("@selected and name(.) = 'option'")
return xpath
def xpath_disabled_pseudo(self, xpath):
"""Matches all elements that are disabled.
"""
xpath.add_condition("@disabled")
return xpath
def xpath_enabled_pseudo(self, xpath):
"""Matches all elements that are enabled.
"""
xpath.add_condition("not(@disabled) and name(.) = 'input'")
return xpath
def xpath_file_pseudo(self, xpath):
"""Matches all input elements of type file.
"""
xpath.add_condition("@type = 'file' and name(.) = 'input'")
return xpath
def xpath_input_pseudo(self, xpath):
"""Matches all input elements.
"""
xpath.add_condition("(name(.) = 'input' or name(.) = 'select') "
+ "or (name(.) = 'textarea' or name(.) = 'button')")
return xpath
def xpath_button_pseudo(self, xpath):
"""Matches all button input elements and the button element.
"""
xpath.add_condition("(@type = 'button' and name(.) = 'input') "
+ "or name(.) = 'button'")
return xpath
def xpath_radio_pseudo(self, xpath):
"""Matches all radio input elements.
"""
xpath.add_condition("@type = 'radio' and name(.) = 'input'")
return xpath
def xpath_text_pseudo(self, xpath):
"""Matches all text input elements.
"""
xpath.add_condition("@type = 'text' and name(.) = 'input'")
return xpath
def xpath_checkbox_pseudo(self, xpath):
"""Matches all checkbox input elements.
"""
xpath.add_condition("@type = 'checkbox' and name(.) = 'input'")
return xpath
def xpath_password_pseudo(self, xpath):
"""Matches all password input elements.
"""
xpath.add_condition("@type = 'password' and name(.) = 'input'")
return xpath
def xpath_submit_pseudo(self, xpath):
"""Matches all submit input elements.
"""
xpath.add_condition("@type = 'submit' and name(.) = 'input'")
return xpath
def xpath_image_pseudo(self, xpath):
"""Matches all image input elements.
"""
xpath.add_condition("@type = 'image' and name(.) = 'input'")
return xpath
def xpath_reset_pseudo(self, xpath):
"""Matches all reset input elements.
"""
xpath.add_condition("@type = 'reset' and name(.) = 'input'")
return xpath
def xpath_header_pseudo(self, xpath):
"""Matches all header elelements (h1, ..., h6)
"""
# this seems kind of brute-force, is there a better way?
xpath.add_condition(
"(name(.) = 'h1' or name(.) = 'h2' or name (.) = 'h3') "
+ "or (name(.) = 'h4' or name (.) = 'h5' or name(.) = 'h6')")
return xpath
def xpath_parent_pseudo(self, xpath):
"""Match all elements that contain other elements
"""
xpath.add_condition("count(child::*) > 0")
return xpath
def xpath_empty_pseudo(self, xpath):
"""Match all elements that do not contain other elements
"""
xpath.add_condition("count(child::*) = 0")
return xpath
def xpath_eq_function(self, xpath, function):
"""Matches a single element by its index.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :eq(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() = %s' % (value + 1))
return xpath
def xpath_gt_function(self, xpath, function):
"""Matches all elements with an index over the given one.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :gt(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() > %s' % (value + 1))
return xpath
def xpath_lt_function(self, xpath, function):
"""Matches all elements with an index below the given one.
"""
if function.argument_types() != ['NUMBER']:
raise ExpressionError(
"Expected a single integer for :gt(), got %r"
% function.arguments
)
value = int(function.arguments[0].value)
xpath.add_post_condition(
'position() < %s' % (value + 1))
return xpath
def xpath_contains_function(self, xpath, function):
"""Matches all elements that contain the given text
"""
if function.argument_types() != ['STRING']:
raise ExpressionError(
"Expected a single string for :contains(), got %r"
% function.arguments
)
value = str(function.arguments[0].value)
xpath.add_post_condition(
"contains(text(), '%s')" % value)
return xpath
XPathExprOrig = cssselect_xpath.XPathExpr
class XPathExpr(XPathExprOrig):
def __init__(self, path='', element='*', condition='', star_prefix=False):
self.path = path
self.element = element
self.condition = condition
self.post_condition = None
def add_post_condition(self, post_condition):
if self.post_condition:
self.post_condition = '%s and (%s)' % (self.post_condition,
post_condition)
else:
self.post_condition = post_condition
def __str__(self):
path = XPathExprOrig.__str__(self)
if self.post_condition:
path = '%s[%s]' % (path, self.post_condition)
return path
def join(self, combiner, other):
res = XPathExprOrig.join(self, combiner, other)
self.post_condition = other.post_condition
return res
cssselect_xpath.XPathExpr = XPathExpr
| mit |
ionomy/ion | test/functional/token_test-pt1.py | 1 | 8466 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ion Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the functionality of all CLI commands.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from time import sleep
from decimal import Decimal
import re
import sys
import os
import subprocess
ION_TX_FEE = 0.001
ION_AUTH_ADDR = "gAQQQjA4DCT2EZDVK6Jae4mFfB217V43Nt"
class TokenTest (BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
#self.extra_args = [["-debug"],["-debug"]]
def run_test(self):
connect_nodes_bi(self.nodes, 0, 1)
tmpdir=self.options.tmpdir
self.log.info("Generating Tokens...")
self.nodes[0].generate(100)
self.nodes[0].importprivkey("cUnScAFQYLW8J8V9bWr57yj2AopudqTd266s6QuWGMMfMix3Hff4")
self.nodes[0].generate(100)
self.nodes[0].generate(100)
self.nodes[0].sendtoaddress(ION_AUTH_ADDR, 10)
self.nodes[0].generate(1)
MagicTok=self.nodes[0].configuremanagementtoken("MAGIC", "MagicToken", "4", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-MAGIC.json", "4f92d91db24bb0b8ca24a2ec86c4b012ccdc4b2e9d659c2079f5cc358413a765", "true")
self.nodes[0].generate(1)
MagicGroup_ID=MagicTok['groupID']
mintaddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, mintaddr, 500)
self.nodes[0].generate(1)
XDMTok=self.nodes[0].configuremanagementtoken("XDM", "DarkMatter", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-XDM.json", "f5125a90bde180ef073ce1109376d977f5cbddb5582643c81424cc6cc842babd", "true")
XDMGroup_ID=XDMTok['groupID']
AtomTok=self.nodes[0].configuremanagementtoken("ATOM", "Atom", "0", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ATOM.json", "b0425ee4ba234099970c53c28288da749e2a1afc0f49856f4cab82b37f72f6a5", "true")
AtomGroup_ID=AtomTok['groupID']
ELECTok=self.nodes[0].configuremanagementtoken("ELEC", "Electron", "13", "https://github.com/ioncoincore/ATP-descriptions/blob/master/ION-testnet-ELEC.json", "6de2409add060ec4ef03d61c0966dc46508ed3498e202e9459e492a372ddccf5", "true")
ELECGroup_ID=ELECTok['groupID']
self.nodes[0].generate(1)
self.log.info("Token Info %s" % json.dumps(self.nodes[0].tokeninfo("all"), indent=4))
MagicAddr=self.nodes[0].getnewaddress()
XDMAddr=self.nodes[0].getnewaddress()
AtomAddr=self.nodes[0].getnewaddress()
ELECAddr=self.nodes[0].getnewaddress()
HulkAddr=self.nodes[0].getnewaddress()
self.nodes[0].minttoken(MagicGroup_ID, MagicAddr, '4975')
self.nodes[0].generate(1)
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '71')
self.nodes[0].generate(1)
self.nodes[0].minttoken(AtomGroup_ID, AtomAddr, '100')
self.nodes[0].generate(1)
self.nodes[0].minttoken(ELECGroup_ID, ELECAddr, '1')
self.nodes[0].generate(1)
HULKTok=self.nodes[0].configuretoken("HULK", "HulkToken", "10", "https://raw.githubusercontent.com/CeForce/hulktoken/master/hulk.json", "367750e31cb276f5218c013473449c9e6a4019fed603d045b51e25f5db29283a", "true")
HulkGroup_ID=HULKTok['groupID']
self.nodes[0].generate(1)
self.nodes[0].minttoken(HulkGroup_ID, HulkAddr, '15')
self.nodes[0].generate(1)
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
self.log.info("XDM Ticker %s" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenAuth=self.nodes[0].listtokenauthorities()
for authority in tokenAuth:
self.log.info("Ticker %s" % authority['ticker'])
self.log.info("Authority address %s\n" % authority['address'])
self.log.info("Token Authorities %s" % authority['tokenAuthorities'])
self.log.info("Drop Mint Authoritiy for XDM")
XDMDrop=self.nodes[0].listtokenauthorities(XDMGroup_ID)
self.nodes[0].droptokenauthorities(XDMGroup_ID, XDMDrop[0]['txid'], str(XDMDrop[0]['vout']), 'configure')
self.nodes[0].generate(1)
tokenAuthority=(self.nodes[0].listtokenauthorities(XDMGroup_ID))
tokenXDMAddr=tokenAuthority[0]['address']
self.log.info("Token authorities XDM %s\n" % tokenXDMAddr)
try:
self.log.info("Try minting XDM tokens with mint flag removed")
self.nodes[0].minttoken(XDMGroup_ID, XDMAddr, '100')
except Exception as e:
self.log.info(e)
#self.log.info("Re-Enable mint XDM")
#time.sleep(3600)
#self.nodes[0].createtokenauthorities(XDMGroup_ID, tokenXDMAddr, 'configure')
self.log.info("XDM Scan Tokens %s" % self.nodes[0].scantokens('start', XDMGroup_ID))
tokenBalance=self.nodes[0].gettokenbalance()
for balance in tokenBalance:
self.log.info("Token Name %s" % balance['name'])
self.log.info("Token Balance %s" % balance['balance'])
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s" % AtomBalance['balance'])
self.log.info("Melt 10 tokens from ATOM Group")
self.nodes[0].melttoken(AtomGroup_ID, '10')
AtomBalance=self.nodes[0].gettokenbalance(AtomGroup_ID)
self.log.info("Atom Balance %s\n" % AtomBalance['balance'])
self.log.info("Token info all (from node1)\n%s\n" % json.dumps(self.nodes[1].tokeninfo('all'), indent=4))
self.log.info("Token info ticker XDM\n%s\n" % json.dumps(self.nodes[0].tokeninfo('ticker', 'XDM'), indent=4))
self.log.info("Token info name DarkMatter\n%s\n" % json.dumps(self.nodes[0].tokeninfo('name', 'darkmatter'), indent=4))
self.log.info("Token info groupid %s\n%s\n" % (XDMGroup_ID, json.dumps(self.nodes[0].tokeninfo('groupid', XDMGroup_ID), indent=4)))
ELEC_Trans=self.nodes[0].listtokentransactions(ELECGroup_ID)
self.log.info("Token Transactions Electron Token\n%s\n" % ELEC_Trans)
ElecTrans=ELEC_Trans[0]['txid']
ELEC_BlockHash=self.nodes[0].getblockhash(200)
self.log.info("Electron Transaction\n%s" % self.nodes[0].gettokentransaction(ElecTrans))
self.log.info("Blockhash block 200 %s" % ELEC_BlockHash)
self.log.info("\nTransaction ID %s" % ElecTrans)
self.log.info("Transaction Details %s" % self.nodes[0].gettokentransaction(ElecTrans, ELEC_BlockHash))
self.log.info("\nList tokens since block 200 Hulk\n%s" % self.nodes[0].listtokenssinceblock(ELECGroup_ID, ELEC_BlockHash))
tokenHulkUnspent=self.nodes[0].listunspenttokens(HulkGroup_ID)
newHulk=self.nodes[0].getnewaddress()
self.log.info("Send tokens to new address %s" % self.nodes[0].sendtoken(HulkGroup_ID, newHulk, 2))
self.nodes[0].generate(1)
self.log.info(self.nodes[1].getaddressbalance)
subgroupID=self.nodes[0].getsubgroupid(HulkGroup_ID,"Bruce_Banner")
self.log.info("Subgroup Info %s " % self.nodes[0].tokeninfo('groupid',subgroupID))
self.log.info("\nUnspent Tokens Hulk Token\n%s\n" % tokenHulkUnspent)
tokenReceiveAddr=self.nodes[1].getnewaddress()
rawTxid=tokenHulkUnspent[0]['txid']
rawVout=tokenHulkUnspent[0]['vout']
rawAddr=tokenReceiveAddr
rawAmount=0.01
self.log.info("txid %s" % rawTxid)
self.log.info("vout %s" % rawVout)
self.log.info("recaddr %s" % rawAddr)
self.log.info("amount %s" % rawAmount )
inputs=[{ "txid" : rawTxid, "vout" : rawVout }]
outputs={ rawAddr : rawAmount }
token={ rawAddr : { "amount" : 0.001, "groupid" : HulkGroup_ID, "token_amount" : 0.1 }}
self.log.info(str(inputs))
self.log.info(outputs)
self.log.info(token)
# ICC 86
#rawtx=self.nodes[0].createrawtokentransaction(inputs, outputs, token)
#self.log.info(rawtx)
#time.sleep(3600)
if __name__ == '__main__':
TokenTest().main()
| mit |
axmachado/simplepos | simplepos/objfile/module.py | 1 | 5296 | # -*- coding: utf-8 -*-
"""
Copyright © 2017 - Alexandre Machado <[email protected]>
This file is part of Simple POS Compiler.
Simnple POS Compiler is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Simple POS Compiler is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Simple POS Compiler. If not, see <http://www.gnu.org/licenses/>.
@author: Alexandre Machado <[email protected]>
Module definition code
A Module is the result of a SimplePOS source file compilation.
Modules can be named (main modules) or
anonymous (linked along with main modules)
"""
from .block import CallableBlock
class Module(CallableBlock):
"""
The main construct of the intermediate representation
of the program, a Module is an image of a source code file
Attributes:
sourceFile: the source file name
objectFile: the object file name
functions: name indexed dictionary of functions defined in this module
externalFunctions: list of function names defined in other modules and that
must be linked with this one.
"""
def __init__(self, sourceFile, objFile):
super(Module, self).__init__()
self.sourceFile = sourceFile
self.objFile = objFile
self.functions = {}
self.externalFunctions = {}
self.constants = {}
self.externalConstants = {}
def printStats(self):
print("Module name:", self.name)
print("Source file: ", self.sourceFile)
print("Object file: ", self.objFile)
super(Module, self).printStats()
print("Defined functions:", len(self.functions))
print(" " + ", ".join(self.functions[x].name
for x in self.functions))
def findFunction(self, name):
"""
Find a function on the scope of the module.
This method will find all functions defined inside the module,
and all built in functions.
"""
from .functions import UndefinedFunction
from ..api import findApiFunction
try:
# first, try to find it as an API functions
func = findApiFunction(name)
except KeyError:
if name in self.functions:
# defined here
func = self.functions[name]
elif name in self.externalFunctions:
# already used and defined as external
func = self.externalFunctions[name]
else:
# not found, adding as an external reference
func = UndefinedFunction(name)
self.externalFunctions[name] = func
return func
def canResolveUndefined(self, function):
from .typedefs import UNDEF
theUndef = self.externalFunctions[function.name]
if len(theUndef.arguments) > 0:
if len(function.arguments) != len(theUndef.arguments):
return False
combo = zip(function.arguments, theUndef.arguments)
for (argf, argu) in combo:
if argf.type_ != argu.type_:
return False
if theUndef.returnType != UNDEF:
if theUndef.returnType != function.returnType:
return False
return True
def addFunction(self, function):
fname = function.name
if fname in self.externalFunctions:
if self.canResolveUndefined(function):
del self.externalFunctions[fname]
else:
raise ValueError('Defined function incompatible with '
'previous calls: ' + fname)
if fname in self.functions:
raise ValueError('Duplicated function definition: ' + fname)
self.functions[fname] = function
def addExternalConstant(self, name, value):
self.externalConstants[name] = value
def addLocalConstant(self, name, value):
self.constants[name] = value
def replaceVariableReferences(self, varName, variable):
super(Module, self).replaceVariableReferences(varName, variable)
for function in self.functions.values():
function.replaceGlobalVariableReferences(varName, variable)
def resolveExternalConstant(self, name, value):
if name in self.externalConstants:
super(Module, self).resolveExternalConstant(name, value)
for function in self.functions.values():
function.resolveExternalConstant(name, value)
del self.externalConstants[name]
def __str__(self):
partial = super(Module, self).__str__()
if len(self.functions) > 0:
partial += "\n\n"
for fcn in self.functions:
partial += str(self.functions[fcn])
partial += "\n"
return partial
| gpl-3.0 |
praekelt/molo | molo/core/migrations/0020_add-social-media-fields-to-article-page.py | 1 | 1042 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0010_change_on_delete_behaviour'),
('core', '0019_add_tags_to_article'),
]
operations = [
migrations.AddField(
model_name='articlepage',
name='social_media_description',
field=models.TextField(null=True, verbose_name=b'description', blank=True),
),
migrations.AddField(
model_name='articlepage',
name='social_media_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, verbose_name=b'Image', blank=True, to='wagtailimages.Image', null=True),
),
migrations.AddField(
model_name='articlepage',
name='social_media_title',
field=models.TextField(null=True, verbose_name=b'title', blank=True),
),
]
| bsd-2-clause |
chunyisong/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
flar2/m7wl-Bulletproof | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
befelix/GPy | GPy/util/normalizer.py | 1 | 2996 | '''
Created on Aug 27, 2014
@author: Max Zwiessele
'''
import numpy as np
class _Norm(object):
def __init__(self):
pass
def scale_by(self, Y):
"""
Use data matrix Y as normalization space to work in.
"""
raise NotImplementedError
def normalize(self, Y):
"""
Project Y into normalized space
"""
if not self.scaled():
raise AttributeError("Norm object not initialized yet, try calling scale_by(data) first.")
def inverse_mean(self, X):
"""
Project the normalized object X into space of Y
"""
raise NotImplementedError
def inverse_variance(self, var):
return var
def inverse_covariance(self, covariance):
"""
Convert scaled covariance to unscaled.
Args:
covariance - numpy array of shape (n, n)
Returns:
covariance - numpy array of shape (n, n, m) where m is number of
outputs
"""
raise NotImplementedError
def scaled(self):
"""
Whether this Norm object has been initialized.
"""
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
def _to_dict(self):
input_dict = {}
return input_dict
@staticmethod
def from_dict(input_dict):
import copy
input_dict = copy.deepcopy(input_dict)
normalizer_class = input_dict.pop('class')
import GPy
normalizer_class = eval(normalizer_class)
return normalizer_class._from_dict(normalizer_class, input_dict)
@staticmethod
def _from_dict(normalizer_class, input_dict):
return normalizer_class(**input_dict)
class Standardize(_Norm):
def __init__(self):
self.mean = None
def scale_by(self, Y):
Y = np.ma.masked_invalid(Y, copy=False)
self.mean = Y.mean(0).view(np.ndarray)
self.std = Y.std(0).view(np.ndarray)
def normalize(self, Y):
super(Standardize, self).normalize(Y)
return (Y-self.mean)/self.std
def inverse_mean(self, X):
return (X*self.std)+self.mean
def inverse_variance(self, var):
return (var*(self.std**2))
def inverse_covariance(self, covariance):
return (covariance[..., np.newaxis]*(self.std**2))
def scaled(self):
return self.mean is not None
def to_dict(self):
input_dict = super(Standardize, self)._to_dict()
input_dict["class"] = "GPy.util.normalizer.Standardize"
if self.mean is not None:
input_dict["mean"] = self.mean.tolist()
input_dict["std"] = self.std.tolist()
return input_dict
@staticmethod
def _from_dict(kernel_class, input_dict):
s = Standardize()
if "mean" in input_dict:
s.mean = np.array(input_dict["mean"])
if "std" in input_dict:
s.std = np.array(input_dict["std"])
return s
| bsd-3-clause |
greenape/gem-module | gaussianemulation/uncertainty.py | 1 | 2386 | from sympy import *
from mpmath import *
from util import *
def E(r_h, b_hat, r_t, e):
return r_h.T*b_hat + r_t.T*e
def V(sigma, u, r_t, A_inv, r_h, g, w):
res = Matrix([u])
res -= r_t.T*A_inv*r_t
res += (r_h - g.T*r_t).T*w*(r_h-g.T*r_t)
res *= sigma
return sigma*(u - r_t.T*A_inv*r_t + (r_h - g.T*r_t).T*w*(r_h-g.T*r_t))
def do_E_var(i_i, i_2, V_, E_):
return (i_i-v) + (i_2 - power(E_, 2.))
def E_var():
r_tt = R_tt(D, C, B, m, v)
r_hh = R_hh(m, B)
r_ht = R_ht(D, B, C, v, m, h)
i_1 = I_1(s_hat_sq, A_inv, r_tt, w, r_hh, r_ht, g)
i_2 = I_2(b_hat, r_hh, r_ht, e_, r_tt)
return do_E_var(i_1, i_2[0,0], V_[0,0], E_[0,0])
def I_1(sigma, A_inv, r_tt, w, r_hh, r_ht, g):
return sigma*(mpf(1)-Trace(A_inv*r_tt) + Trace(w*(r_hh - 2*r_ht*g + g.T*r_tt*g)))
def I_2(beta, r_hh, r_ht, e_, r_tt):
return beta.T*r_hh*beta + 2*beta.T*r_ht*e_ + e_.T*r_tt*e_
def Q_kl(x, xk, xl, C, B, m):
return 2*(x - xk).T*C*(x - xk) + 2*(x - xl).T*C*(x - xl) + (x - m).T*B*(x - m)
def Q_k(x, xk, m, B, C):
return (2*(x - xk).T*C*(x - xk) + (x-m).T*B*(x-m))[0,0]
def m_kl(xk, xl, C, B, m):
return ((4*C + B)**-1)*(2*C*xk + 2*C*xl + B*m)
def m_k(x, C, B, m):
return ((2*C + B)**-1)*(2*C*x + B*m)
def R_h(m):
return Matrix([1]).col_join(m)
def R_hh(m, B):
#np.vstack((np.hstack(([[1]], m.T)), np.hstack((m, m.dot(m.T) + B.getI()))))
return Matrix([1]).row_join(m.T).col_join(m.row_join(m*m.T + B**-1))
def R_ht(D, B, C, v, m, h):
return reduce(lambda x, y: x.row_join(y),map(lambda k: R_ht_elem(D, k, B, C, v, m, h), range(D.cols))) #matrix
def R_ht_elem(X, k, B, C, v, m, h):
x = X[:,k]
m_prime_k = m_k(x, C, B, m)
return R_t(X, k, B, C, v, m)*Matrix([1]).col_join(m_prime_k)
def R_tt(D, C, B, m, v):
return Matrix(D.cols, D.cols, lambda i, j: R_tt_element(D, i, j, C, B, m, v))
def R_tt_element(x, k, l, C, B, m, v):
xk = x[:,k]
xl = x[:,l]
qkl = Q_kl(m_kl(xk, xl, C, B, m), xk, xl, C, B, m)[0,0]
return power(1-v, 2.)*power(det(B), 0.5)*power(det(4*C + B), -0.5)*exp(- qkl/2.)
def R_t(D, B, C, v, m):
return Matrix(map(lambda k: R_t_elem(D, k, B, C, v, m), range(D.cols)))
def R_t_elem(X, k, B, C, v, m):
X = X[:,k]
m_prime_k = m_k(X, C, B, m)
q_k = Q_k(m_prime_k, X, m, B, C)
return (1-v)*power(det(B), 0.5)*power(det(2*C + B), -0.5)*exp(-q_k/2.) | mpl-2.0 |
zorroblue/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
jralls/gramps | gramps/gen/filters/rules/note/_hasreferencecountof.py | 6 | 1716 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Stephane Charette
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .._hasreferencecountbase import HasReferenceCountBase
#-------------------------------------------------------------------------
# "Notes with a certain reference count"
#-------------------------------------------------------------------------
class HasReferenceCountOf(HasReferenceCountBase):
"""Notes with a reference count of <count>"""
name = _('Notes with a reference count of <count>')
description = _("Matches notes with a certain reference count")
| gpl-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.