repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
avinetworks/avi-heat | avi/heat/resources/ssl.py | 1 | 35692 | # GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
class SSLKeyRSAParams(object):
# all schemas
key_size_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_KEY_2048_BITS)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_KEY_1024_BITS', 'SSL_KEY_2048_BITS', 'SSL_KEY_3072_BITS', 'SSL_KEY_4096_BITS']),
],
)
exponent_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 65537)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'key_size',
'exponent',
)
# mapping of properties to their schemas
properties_schema = {
'key_size': key_size_schema,
'exponent': exponent_schema,
}
class SSLVersion(object):
# all schemas
type_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_VERSION_TLS1_1)"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_VERSION_SSLV3', 'SSL_VERSION_TLS1', 'SSL_VERSION_TLS1_1', 'SSL_VERSION_TLS1_2']),
],
)
# properties list
PROPERTIES = (
'type',
)
# mapping of properties to their schemas
properties_schema = {
'type': type_schema,
}
unique_keys = {
'my_key': 'type',
}
class CertificateAuthority(object):
# all schemas
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
ca_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'name',
'ca_uuid',
)
# mapping of properties to their schemas
properties_schema = {
'name': name_schema,
'ca_uuid': ca_uuid_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ca_uuid': 'sslkeyandcertificate',
}
class SSLKeyECParams(object):
# all schemas
curve_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_KEY_EC_CURVE_SECP256R1)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_KEY_EC_CURVE_SECP256R1', 'SSL_KEY_EC_CURVE_SECP384R1', 'SSL_KEY_EC_CURVE_SECP521R1']),
],
)
# properties list
PROPERTIES = (
'curve',
)
# mapping of properties to their schemas
properties_schema = {
'curve': curve_schema,
}
class SSLCertificateDescription(object):
# all schemas
common_name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
email_address_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
organization_unit_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
organization_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
locality_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
state_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
country_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
distinguished_name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'common_name',
'email_address',
'organization_unit',
'organization',
'locality',
'state',
'country',
'distinguished_name',
)
# mapping of properties to their schemas
properties_schema = {
'common_name': common_name_schema,
'email_address': email_address_schema,
'organization_unit': organization_unit_schema,
'organization': organization_schema,
'locality': locality_schema,
'state': state_schema,
'country': country_schema,
'distinguished_name': distinguished_name_schema,
}
class CertificateManagementProfile(AviResource):
resource_name = "certificatemanagementprofile"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("Name of the PKI Profile"),
required=True,
update_allowed=True,
)
script_params_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=CustomParams.properties_schema,
required=True,
update_allowed=False,
)
script_params_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=script_params_item_schema,
required=False,
update_allowed=True,
)
script_path_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'script_params',
'script_path',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'script_params': script_params_schema,
'script_path': script_path_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'script_params': getattr(CustomParams, 'field_references', {}),
}
unique_keys = {
'script_params': getattr(CustomParams, 'unique_keys', {}),
}
class SSLRating(object):
# all schemas
security_score_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
performance_rating_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_SCORE_AVERAGE', 'SSL_SCORE_BAD', 'SSL_SCORE_EXCELLENT', 'SSL_SCORE_GOOD', 'SSL_SCORE_NOT_SECURE', 'SSL_SCORE_VERY_BAD']),
],
)
compatibility_rating_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_SCORE_AVERAGE', 'SSL_SCORE_BAD', 'SSL_SCORE_EXCELLENT', 'SSL_SCORE_GOOD', 'SSL_SCORE_NOT_SECURE', 'SSL_SCORE_VERY_BAD']),
],
)
# properties list
PROPERTIES = (
'security_score',
'performance_rating',
'compatibility_rating',
)
# mapping of properties to their schemas
properties_schema = {
'security_score': security_score_schema,
'performance_rating': performance_rating_schema,
'compatibility_rating': compatibility_rating_schema,
}
class CRL(object):
# all schemas
server_url_schema = properties.Schema(
properties.Schema.STRING,
_("URL of a server that issues the Certificate Revocation list. If this is configured, CRL will be periodically downloaded either based on the configured update interval or the next update interval in the CRL. CRL itself is stored in the body."),
required=False,
update_allowed=True,
)
body_schema = properties.Schema(
properties.Schema.STRING,
_("Certificate Revocation list from a given issuer in PEM format. This can either be configured directly or via the server_url. "),
required=False,
update_allowed=True,
)
last_update_schema = properties.Schema(
properties.Schema.STRING,
_("The date when this CRL was last issued"),
required=False,
update_allowed=True,
)
next_update_schema = properties.Schema(
properties.Schema.STRING,
_("The date when a newer CRL will be available. Also conveys the date after which the CRL should be considered obsolete."),
required=False,
update_allowed=True,
)
update_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_("Interval in minutes to check for CRL update. If not specified, interval will be 1 day (Units: MIN)"),
required=False,
update_allowed=True,
)
etag_schema = properties.Schema(
properties.Schema.STRING,
_("Cached etag to optimize the download of the CRL"),
required=False,
update_allowed=True,
)
text_schema = properties.Schema(
properties.Schema.STRING,
_("Certificate Revocation list in plain text for readability"),
required=False,
update_allowed=True,
)
common_name_schema = properties.Schema(
properties.Schema.STRING,
_("Common name of the issuer in the Certificate Revocation list"),
required=False,
update_allowed=True,
)
fingerprint_schema = properties.Schema(
properties.Schema.STRING,
_("Fingerprint of the CRL. Used to avoid configuring duplicates"),
required=False,
update_allowed=True,
)
distinguished_name_schema = properties.Schema(
properties.Schema.STRING,
_("Distinguished name of the issuer in the Certificate Revocation list"),
required=False,
update_allowed=True,
)
last_refreshed_schema = properties.Schema(
properties.Schema.STRING,
_("Last time CRL was refreshed by the system. This is an internal field used by the system"),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'server_url',
'body',
'last_update',
'next_update',
'update_interval',
'etag',
'text',
'common_name',
'fingerprint',
'distinguished_name',
'last_refreshed',
)
# mapping of properties to their schemas
properties_schema = {
'server_url': server_url_schema,
'body': body_schema,
'last_update': last_update_schema,
'next_update': next_update_schema,
'update_interval': update_interval_schema,
'etag': etag_schema,
'text': text_schema,
'common_name': common_name_schema,
'fingerprint': fingerprint_schema,
'distinguished_name': distinguished_name_schema,
'last_refreshed': last_refreshed_schema,
}
class SSLKeyParams(object):
# all schemas
algorithm_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_KEY_ALGORITHM_RSA)"),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_KEY_ALGORITHM_EC', 'SSL_KEY_ALGORITHM_RSA']),
],
)
rsa_params_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLKeyRSAParams.properties_schema,
required=False,
update_allowed=True,
)
ec_params_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLKeyECParams.properties_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'algorithm',
'rsa_params',
'ec_params',
)
# mapping of properties to their schemas
properties_schema = {
'algorithm': algorithm_schema,
'rsa_params': rsa_params_schema,
'ec_params': ec_params_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ec_params': getattr(SSLKeyECParams, 'field_references', {}),
'rsa_params': getattr(SSLKeyRSAParams, 'field_references', {}),
}
unique_keys = {
'ec_params': getattr(SSLKeyECParams, 'unique_keys', {}),
'rsa_params': getattr(SSLKeyRSAParams, 'unique_keys', {}),
}
class SSLProfile(AviResource):
resource_name = "sslprofile"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
accepted_versions_item_schema = properties.Schema(
properties.Schema.MAP,
_("Set of versions accepted by the server"),
schema=SSLVersion.properties_schema,
required=True,
update_allowed=False,
)
accepted_versions_schema = properties.Schema(
properties.Schema.LIST,
_("Set of versions accepted by the server"),
schema=accepted_versions_item_schema,
required=False,
update_allowed=True,
)
accepted_ciphers_schema = properties.Schema(
properties.Schema.STRING,
_("Ciphers suites represented as defined by U(http://www.openssl.org/docs/apps/ciphers.html)"),
required=False,
update_allowed=True,
)
cipher_enums_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA', 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA', 'TLS_RSA_WITH_AES_128_CBC_SHA256', 'TLS_RSA_WITH_AES_128_GCM_SHA256', 'TLS_RSA_WITH_AES_256_CBC_SHA', 'TLS_RSA_WITH_AES_256_CBC_SHA256', 'TLS_RSA_WITH_AES_256_GCM_SHA384', 'TLS_RSA_WITH_RC4_128_SHA']),
],
)
cipher_enums_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=cipher_enums_item_schema,
required=False,
update_allowed=True,
)
tags_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=Tag.properties_schema,
required=True,
update_allowed=False,
)
tags_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=tags_item_schema,
required=False,
update_allowed=True,
)
ssl_rating_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLRating.properties_schema,
required=False,
update_allowed=True,
)
send_close_notify_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Send 'close notify' alert message for a clean shutdown of the SSL connection. (Default: True)"),
required=False,
update_allowed=True,
)
dhparam_schema = properties.Schema(
properties.Schema.STRING,
_("DH Parameters used in SSL. At this time, it is not configurable and is set to 2048 bits."),
required=False,
update_allowed=False,
)
prefer_client_cipher_ordering_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Prefer the SSL cipher ordering presented by the client during the SSL handshake over the one specified in the SSL Profile. (Default: False)"),
required=False,
update_allowed=True,
)
enable_ssl_session_reuse_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable SSL session re-use. (Default: True)"),
required=False,
update_allowed=True,
)
ssl_session_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("The amount of time before an SSL session expires. (Units: SEC) (Default: 86400)"),
required=False,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.8) SSL Profile Type. (Default: SSL_PROFILE_TYPE_APPLICATION)"),
required=False,
update_allowed=False,
constraints=[
constraints.AllowedValues(['SSL_PROFILE_TYPE_APPLICATION', 'SSL_PROFILE_TYPE_SYSTEM']),
],
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'accepted_versions',
'accepted_ciphers',
'cipher_enums',
'tags',
'ssl_rating',
'send_close_notify',
'dhparam',
'prefer_client_cipher_ordering',
'enable_ssl_session_reuse',
'ssl_session_timeout',
'type',
'description',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'accepted_versions': accepted_versions_schema,
'accepted_ciphers': accepted_ciphers_schema,
'cipher_enums': cipher_enums_schema,
'tags': tags_schema,
'ssl_rating': ssl_rating_schema,
'send_close_notify': send_close_notify_schema,
'dhparam': dhparam_schema,
'prefer_client_cipher_ordering': prefer_client_cipher_ordering_schema,
'enable_ssl_session_reuse': enable_ssl_session_reuse_schema,
'ssl_session_timeout': ssl_session_timeout_schema,
'type': type_schema,
'description': description_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ssl_rating': getattr(SSLRating, 'field_references', {}),
'accepted_versions': getattr(SSLVersion, 'field_references', {}),
'tags': getattr(Tag, 'field_references', {}),
}
unique_keys = {
'ssl_rating': getattr(SSLRating, 'unique_keys', {}),
'accepted_versions': getattr(SSLVersion, 'unique_keys', {}),
'tags': getattr(Tag, 'unique_keys', {}),
}
class SSLCertificate(object):
# all schemas
version_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
serial_number_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
self_signed_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
issuer_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLCertificateDescription.properties_schema,
required=False,
update_allowed=True,
)
subject_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLCertificateDescription.properties_schema,
required=False,
update_allowed=True,
)
key_params_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLKeyParams.properties_schema,
required=False,
update_allowed=True,
)
public_key_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
signature_algorithm_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
signature_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
not_before_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
not_after_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
certificate_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
certificate_signing_request_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
text_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
fingerprint_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
expiry_status_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_CERTIFICATE_GOOD)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_CERTIFICATE_EXPIRED', 'SSL_CERTIFICATE_EXPIRY_WARNING', 'SSL_CERTIFICATE_GOOD']),
],
)
chain_verified_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(""),
required=False,
update_allowed=True,
)
subject_alt_names_item_schema = properties.Schema(
properties.Schema.STRING,
_("subjectAltName that provides additional subject identities"),
required=True,
update_allowed=False,
)
subject_alt_names_schema = properties.Schema(
properties.Schema.LIST,
_("subjectAltName that provides additional subject identities"),
schema=subject_alt_names_item_schema,
required=False,
update_allowed=True,
)
days_until_expire_schema = properties.Schema(
properties.Schema.NUMBER,
_(" (Default: 365)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'version',
'serial_number',
'self_signed',
'issuer',
'subject',
'key_params',
'public_key',
'signature_algorithm',
'signature',
'not_before',
'not_after',
'certificate',
'certificate_signing_request',
'text',
'fingerprint',
'expiry_status',
'chain_verified',
'subject_alt_names',
'days_until_expire',
)
# mapping of properties to their schemas
properties_schema = {
'version': version_schema,
'serial_number': serial_number_schema,
'self_signed': self_signed_schema,
'issuer': issuer_schema,
'subject': subject_schema,
'key_params': key_params_schema,
'public_key': public_key_schema,
'signature_algorithm': signature_algorithm_schema,
'signature': signature_schema,
'not_before': not_before_schema,
'not_after': not_after_schema,
'certificate': certificate_schema,
'certificate_signing_request': certificate_signing_request_schema,
'text': text_schema,
'fingerprint': fingerprint_schema,
'expiry_status': expiry_status_schema,
'chain_verified': chain_verified_schema,
'subject_alt_names': subject_alt_names_schema,
'days_until_expire': days_until_expire_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'key_params': getattr(SSLKeyParams, 'field_references', {}),
'subject': getattr(SSLCertificateDescription, 'field_references', {}),
'issuer': getattr(SSLCertificateDescription, 'field_references', {}),
}
unique_keys = {
'key_params': getattr(SSLKeyParams, 'unique_keys', {}),
'subject': getattr(SSLCertificateDescription, 'unique_keys', {}),
'issuer': getattr(SSLCertificateDescription, 'unique_keys', {}),
}
class PKIProfile(AviResource):
resource_name = "pkiprofile"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_("Name of the PKI Profile"),
required=True,
update_allowed=True,
)
ca_certs_item_schema = properties.Schema(
properties.Schema.MAP,
_("List of Certificate Authorities (Root and Intermediate) trusted that is used for certificate validation"),
schema=SSLCertificate.properties_schema,
required=True,
update_allowed=False,
)
ca_certs_schema = properties.Schema(
properties.Schema.LIST,
_("List of Certificate Authorities (Root and Intermediate) trusted that is used for certificate validation"),
schema=ca_certs_item_schema,
required=False,
update_allowed=True,
)
crls_item_schema = properties.Schema(
properties.Schema.MAP,
_("Certificate Revocation Lists"),
schema=CRL.properties_schema,
required=True,
update_allowed=False,
)
crls_schema = properties.Schema(
properties.Schema.LIST,
_("Certificate Revocation Lists"),
schema=crls_item_schema,
required=False,
update_allowed=True,
)
ignore_peer_chain_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("When enabled, Avi will not trust Intermediate and Root certs presented by a client. Instead, only the chain certs configured in the Certificate Authority section will be used to verify trust of the client's cert. (Default: False)"),
required=False,
update_allowed=True,
)
crl_check_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("When enabled, Avi will verify via CRL checks that certificates in the trust chain have not been revoked. (Default: True)"),
required=False,
update_allowed=True,
)
validate_only_leaf_crl_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("When enabled, Avi will only validate the revocation status of the leaf certificate using CRL. To enable validation for the entire chain, disable this option and provide all the relevant CRLs (Default: True)"),
required=False,
update_allowed=True,
)
created_by_schema = properties.Schema(
properties.Schema.STRING,
_("Creator name"),
required=False,
update_allowed=True,
)
is_federated_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.3) This field describes the object's replication scope. If the field is set to false, then the object is visible within the controller-cluster and its associated service-engines. If the field is set to true, then the object is replicated across the federation. (Default: False)"),
required=False,
update_allowed=False,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'ca_certs',
'crls',
'ignore_peer_chain',
'crl_check',
'validate_only_leaf_crl',
'created_by',
'is_federated',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'ca_certs': ca_certs_schema,
'crls': crls_schema,
'ignore_peer_chain': ignore_peer_chain_schema,
'crl_check': crl_check_schema,
'validate_only_leaf_crl': validate_only_leaf_crl_schema,
'created_by': created_by_schema,
'is_federated': is_federated_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'ca_certs': getattr(SSLCertificate, 'field_references', {}),
'crls': getattr(CRL, 'field_references', {}),
}
unique_keys = {
'ca_certs': getattr(SSLCertificate, 'unique_keys', {}),
'crls': getattr(CRL, 'unique_keys', {}),
}
class SSLKeyAndCertificate(AviResource):
resource_name = "sslkeyandcertificate"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
type_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_CERTIFICATE_TYPE_CA', 'SSL_CERTIFICATE_TYPE_SYSTEM', 'SSL_CERTIFICATE_TYPE_VIRTUALSERVICE']),
],
)
certificate_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLCertificate.properties_schema,
required=True,
update_allowed=True,
)
key_params_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=SSLKeyParams.properties_schema,
required=False,
update_allowed=True,
)
key_schema = properties.Schema(
properties.Schema.STRING,
_("Private key"),
required=False,
update_allowed=True,
)
status_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: SSL_CERTIFICATE_FINISHED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SSL_CERTIFICATE_FINISHED', 'SSL_CERTIFICATE_PENDING']),
],
)
ca_certs_item_schema = properties.Schema(
properties.Schema.MAP,
_("CA certificates in certificate chain"),
schema=CertificateAuthority.properties_schema,
required=True,
update_allowed=False,
)
ca_certs_schema = properties.Schema(
properties.Schema.LIST,
_("CA certificates in certificate chain"),
schema=ca_certs_item_schema,
required=False,
update_allowed=True,
)
enckey_base64_schema = properties.Schema(
properties.Schema.STRING,
_("Encrypted private key corresponding to the private key (e.g. those generated by an HSM such as Thales nShield)"),
required=False,
update_allowed=True,
)
enckey_name_schema = properties.Schema(
properties.Schema.STRING,
_("Name of the encrypted private key (e.g. those generated by an HSM such as Thales nShield)"),
required=False,
update_allowed=True,
)
hardwaresecuritymodulegroup_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
certificate_management_profile_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
dynamic_params_item_schema = properties.Schema(
properties.Schema.MAP,
_("Dynamic parameters needed for certificate management profile"),
schema=CustomParams.properties_schema,
required=True,
update_allowed=False,
)
dynamic_params_schema = properties.Schema(
properties.Schema.LIST,
_("Dynamic parameters needed for certificate management profile"),
schema=dynamic_params_item_schema,
required=False,
update_allowed=True,
)
created_by_schema = properties.Schema(
properties.Schema.STRING,
_("Creator name"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'type',
'certificate',
'key_params',
'key',
'status',
'ca_certs',
'enckey_base64',
'enckey_name',
'hardwaresecuritymodulegroup_uuid',
'certificate_management_profile_uuid',
'dynamic_params',
'created_by',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'type': type_schema,
'certificate': certificate_schema,
'key_params': key_params_schema,
'key': key_schema,
'status': status_schema,
'ca_certs': ca_certs_schema,
'enckey_base64': enckey_base64_schema,
'enckey_name': enckey_name_schema,
'hardwaresecuritymodulegroup_uuid': hardwaresecuritymodulegroup_uuid_schema,
'certificate_management_profile_uuid': certificate_management_profile_uuid_schema,
'dynamic_params': dynamic_params_schema,
'created_by': created_by_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'hardwaresecuritymodulegroup_uuid': 'hardwaresecuritymodulegroup',
'certificate': getattr(SSLCertificate, 'field_references', {}),
'dynamic_params': getattr(CustomParams, 'field_references', {}),
'ca_certs': getattr(CertificateAuthority, 'field_references', {}),
'certificate_management_profile_uuid': 'certificatemanagementprofile',
'key_params': getattr(SSLKeyParams, 'field_references', {}),
}
unique_keys = {
'key_params': getattr(SSLKeyParams, 'unique_keys', {}),
'ca_certs': getattr(CertificateAuthority, 'unique_keys', {}),
'dynamic_params': getattr(CustomParams, 'unique_keys', {}),
'certificate': getattr(SSLCertificate, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::SSLKeyAndCertificate': SSLKeyAndCertificate,
'Avi::LBaaS::SSLProfile': SSLProfile,
'Avi::LBaaS::PKIProfile': PKIProfile,
'Avi::LBaaS::CertificateManagementProfile': CertificateManagementProfile,
}
| apache-2.0 |
trujunzhang/djzhang-targets | cwcraigs/cwcraigs/spiders/craigs.py | 1 | 2305 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from cwcraigs.items import CraigslistSampleItem
class CraigsSpider(CrawlSpider):
name = "craigs"
allowed_domains = ["sfbay.craigslist.org"]
start_urls = ["http://sfbay.craigslist.org/search/vnn"]
# the below is next page url
# 'http://sfbay.craigslist.org/search/npo?s=100'
# 'http://sfbay.craigslist.org/search/npo?s=200'
rules = (
# Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//div[@class="pageNumbers"]/a',)), callback="parse_items", follow= True),
Rule(SgmlLinkExtractor(allow=(), restrict_xpaths=('//span[@class="buttons"]/a',)), callback="parse_items",
follow=True),
)
def __init__(self, name=None, **kwargs):
from cwcraigs.DBUtils import DBUtils
self.dbutils = DBUtils(kwargs['mongo_uri'], kwargs['mongo_db'], kwargs['collection_name'])
self.dbutils.open_spider()
super(CraigsSpider, self).__init__(name, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return super(CraigsSpider, cls).from_crawler(crawler,
args,
mongo_uri=crawler.settings.get('MONGODB_SERVER'),
mongo_db=crawler.settings.get('MONGODB_DB', 'items'),
collection_name=(
"__history" + crawler.settings.get('MONGODB_COLLECTION'))
)
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.xpath('//span[@class="pl"]')
items = []
count = 0
for title in titles:
item = CraigslistSampleItem()
item["url"] = (response.url+"-{0}").format(count)
item["title"] = title.xpath('a/span/text()').extract()
item["link"] = title.xpath("a/@href").extract()
items.append(item)
count += 1
return(items) | mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/ARB/sync.py | 9 | 3182 | '''OpenGL extension ARB.sync
This module customises the behaviour of the
OpenGL.raw.GL.ARB.sync to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces the concept of "sync objects". Sync
objects are a synchronization primitive - a representation of events
whose completion status can be tested or waited upon. One specific
type of sync object, the "fence sync object", is supported in this
extension, and additional types can easily be added in the future.
Fence sync objects have corresponding fences, which are inserted
into the OpenGL command stream at the time the sync object is
created. A sync object can be queried for a given condition. The
only condition supported for fence sync objects is completion of the
corresponding fence command. Fence completion allows applications to
request a partial Finish, wherein all commands prior to the fence
will be forced to complete before control is returned to the calling
process.
These new mechanisms allow for synchronization between the host CPU
and the GPU, which may be accessing the same resources (typically
memory), as well as between multiple GL contexts bound to multiple
threads in the host CPU.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/sync.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.sync import *
from OpenGL.raw.GL.ARB.sync import _EXTENSION_NAME
def glInitSyncARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glGetInteger64v=wrapper.wrapper(glGetInteger64v).setOutput(
'data',size=_glgets._glget_size_mapping,pnameArg='pname',orPassIn=True
)
glGetSynciv=wrapper.wrapper(glGetSynciv).setOutput(
'length',size=(1,),orPassIn=True
).setOutput(
'values',size=lambda x:(x,),pnameArg='bufSize',orPassIn=True
)
### END AUTOGENERATED SECTION
from OpenGL.raw.GL._types import GLint
from OpenGL.arrays import GLintArray
def glGetSync( sync, pname, bufSize=1,length=None,values=None ):
"""Wrapper around glGetSynciv that auto-allocates buffers
sync -- the GLsync struct pointer (see glGetSynciv)
pname -- constant to retrieve (see glGetSynciv)
bufSize -- defaults to 1, maximum number of items to retrieve,
currently all constants are defined to return a single
value
length -- None or a GLint() instance (ONLY!), must be a byref()
capable object with a .value attribute which retrieves the
set value
values -- None or an array object, if None, will be a default
return-array-type of length bufSize
returns values[:length.value], i.e. an array with the values set
by the call, currently always a single-value array.
"""
if values is None:
values = GLintArray.zeros( (bufSize,) )
if length is None:
length = GLint()
glGetSynciv( sync, pname, bufSize, length, values )
written = length.value
return values[:written]
| gpl-3.0 |
anant-dev/django | tests/custom_managers/models.py | 238 | 6791 | """
Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super(PublishedBookManager, self).get_queryset().filter(is_published=True)
class CustomQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super(CustomQuerySet, self).filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
class BaseCustomManager(models.Manager):
def __init__(self, arg):
super(BaseCustomManager, self).__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super(BaseCustomManager, self).filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
class CustomInitQuerySet(models.QuerySet):
# QuerySet with an __init__() method that takes an additional argument.
def __init__(self, custom_optional_arg=None, model=None, query=None, using=None, hints=None):
super(CustomInitQuerySet, self).__init__(model=model, query=query, using=using, hints=hints)
class DeconstructibleCustomManager(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super(DeconstructibleCustomManager, self).__init__(a)
class FunPeopleManager(models.Manager):
def get_queryset(self):
return super(FunPeopleManager, self).get_queryset().filter(fun=True)
class BoringPeopleManager(models.Manager):
def get_queryset(self):
return super(BoringPeopleManager, self).get_queryset().filter(fun=False)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=False)
favorite_book = models.ForeignKey('Book', models.SET_NULL, null=True, related_name='favorite_books')
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = PersonManager()
fun_people = FunPeopleManager()
boring_people = BoringPeopleManager()
custom_queryset_default_manager = CustomQuerySet.as_manager()
custom_queryset_custom_manager = CustomManager('hello')
custom_init_queryset_manager = CustomInitQuerySet.as_manager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class FunPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=True)
favorite_book = models.ForeignKey(
'Book',
models.SET_NULL,
null=True,
related_name='fun_people_favorite_books',
)
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = FunPeopleManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
fun_authors = models.ManyToManyField(FunPerson, related_name='books')
favorite_things = GenericRelation(Person,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
fun_people_favorite_things = GenericRelation(FunPerson,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
def __str__(self):
return self.title
class FastCarManager(models.Manager):
def get_queryset(self):
return super(FastCarManager, self).get_queryset().filter(top_speed__gt=150)
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
class RestrictedManager(models.Manager):
def get_queryset(self):
return super(RestrictedManager, self).get_queryset().filter(is_public=True)
@python_2_unicode_compatible
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
class AbstractPerson(models.Model):
abstract_persons = models.Manager()
objects = models.CharField(max_length=30)
class Meta:
abstract = True
class PersonFromAbstract(AbstractPerson):
pass
| bsd-3-clause |
goddardl/cortex | test/IECoreRI/MultipleContextsTest.py | 7 | 2940 | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreRI
import os.path
import os
class MultipleContextsTest( IECoreRI.TestCase ) :
def test( self ) :
r1 = IECoreRI.Renderer( "test/IECoreRI/output/contextOne.rib" )
r2 = IECoreRI.Renderer( "test/IECoreRI/output/contextTwo.rib" )
self.assertEqual( r1.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
self.assertEqual( r2.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
r1.setAttribute( "doubleSided", IECore.BoolData( False ) )
self.assertEqual( r1.getAttribute( "doubleSided" ), IECore.BoolData( False ) )
self.assertEqual( r2.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
r1.setAttribute( "doubleSided", IECore.BoolData( True ) )
self.assertEqual( r1.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
self.assertEqual( r2.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
r2.setAttribute( "doubleSided", IECore.BoolData( False ) )
self.assertEqual( r1.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
self.assertEqual( r2.getAttribute( "doubleSided" ), IECore.BoolData( False ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
kustodian/ansible | lib/ansible/modules/cloud/vmware/vmware_dvs_host.py | 23 | 10278 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# Copyright: (c) 2019, VMware Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vmware_dvs_host
short_description: Add or remove a host from distributed virtual switch
description:
- Manage a host system from distributed virtual switch.
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Abhijeet Kasurde (@Akasurde)
- Joseph Andreatta (@vmwjoseph)
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.7"
- PyVmomi
options:
esxi_hostname:
description:
- The ESXi hostname.
required: True
type: str
switch_name:
description:
- The name of the Distributed vSwitch.
required: True
type: str
vmnics:
description:
- The ESXi hosts vmnics to use with the Distributed vSwitch.
required: True
type: list
state:
description:
- If the host should be present or absent attached to the vSwitch.
choices: [ present, absent ]
required: True
default: 'present'
type: str
vendor_specific_config:
description:
- List of key,value dictionaries for the Vendor Specific Configuration.
- 'Element attributes are:'
- '- C(key) (str): Key of setting. (default: None)'
- '- C(value) (str): Value of setting. (default: None)'
required: False
version_added: '2.9'
type: list
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Add Host to dVS
vmware_dvs_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
switch_name: dvSwitch
vmnics:
- vmnic0
- vmnic1
state: present
delegate_to: localhost
- name: Add Host to dVS/enable learnswitch (https://labs.vmware.com/flings/learnswitch)
vmware_dvs_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
switch_name: dvSwitch
vendor_specific_config:
- key: com.vmware.netoverlay.layer1
value: learnswitch
vmnics:
- vmnic0
- vmnic1
state: present
delegate_to: localhost
'''
try:
from collections import Counter
HAS_COLLECTIONS_COUNTER = True
except ImportError as e:
HAS_COLLECTIONS_COUNTER = False
try:
from pyVmomi import vim, vmodl
except ImportError as e:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (PyVmomi, find_dvs_by_name, find_hostsystem_by_name,
vmware_argument_spec, wait_for_task)
from ansible.module_utils._text import to_native
class VMwareDvsHost(PyVmomi):
def __init__(self, module):
super(VMwareDvsHost, self).__init__(module)
self.dv_switch = None
self.uplink_portgroup = None
self.host = None
self.dv_switch = None
self.nic = None
self.state = self.module.params['state']
self.switch_name = self.module.params['switch_name']
self.esxi_hostname = self.module.params['esxi_hostname']
self.vmnics = self.module.params['vmnics']
self.vendor_specific_config = self.module.params['vendor_specific_config']
def process_state(self):
dvs_host_states = {
'absent': {
'present': self.state_destroy_dvs_host,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_dvs_host,
'present': self.state_exit_unchanged,
'absent': self.state_create_dvs_host,
}
}
try:
dvs_host_states[self.state][self.check_dvs_host_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=to_native(runtime_fault.msg))
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=to_native(method_fault.msg))
except Exception as e:
self.module.fail_json(msg=to_native(e))
def find_dvs_uplink_pg(self):
# There should only always be a single uplink port group on
# a distributed virtual switch
dvs_uplink_pg = self.dv_switch.config.uplinkPortgroup[0] if len(self.dv_switch.config.uplinkPortgroup) else None
return dvs_uplink_pg
# operation should be edit, add and remove
def modify_dvs_host(self, operation):
changed, result = False, None
spec = vim.DistributedVirtualSwitch.ConfigSpec()
spec.configVersion = self.dv_switch.config.configVersion
spec.host = [vim.dvs.HostMember.ConfigSpec()]
spec.host[0].operation = operation
spec.host[0].host = self.host
if self.vendor_specific_config:
config = list()
for item in self.vendor_specific_config:
config.append(vim.dvs.KeyedOpaqueBlob(key=item['key'], opaqueData=item['value']))
spec.host[0].vendorSpecificConfig = config
if operation in ("edit", "add"):
spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
count = 0
for nic in self.vmnics:
spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
spec.host[0].backing.pnicSpec[count].pnicDevice = nic
spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
count += 1
try:
task = self.dv_switch.ReconfigureDvs_Task(spec)
changed, result = wait_for_task(task)
except vmodl.fault.NotSupported as not_supported:
self.module.fail_json(msg="Failed to configure DVS host %s as it is not"
" compatible with the VDS version." % self.esxi_hostname,
details=to_native(not_supported.msg))
return changed, result
def state_destroy_dvs_host(self):
operation, changed, result = ("remove", True, None)
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=to_native(result))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_dvs_host(self):
operation, changed, result = ("edit", True, None)
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=to_native(result))
def state_create_dvs_host(self):
operation, changed, result = ("add", True, None)
if not self.module.check_mode:
changed, result = self.modify_dvs_host(operation)
self.module.exit_json(changed=changed, result=to_native(result))
def find_host_attached_dvs(self):
for dvs_host_member in self.dv_switch.config.host:
if dvs_host_member.config.host.name == self.esxi_hostname:
return dvs_host_member.config.host
return None
def check_uplinks(self):
pnic_device = []
for dvs_host_member in self.dv_switch.config.host:
if dvs_host_member.config.host == self.host:
for pnicSpec in dvs_host_member.config.backing.pnicSpec:
pnic_device.append(pnicSpec.pnicDevice)
return Counter(pnic_device) == Counter(self.vmnics)
def check_dvs_host_state(self):
self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
if self.dv_switch is None:
self.module.fail_json(msg="A distributed virtual switch %s "
"does not exist" % self.switch_name)
self.uplink_portgroup = self.find_dvs_uplink_pg()
if self.uplink_portgroup is None:
self.module.fail_json(msg="An uplink portgroup does not exist on"
" the distributed virtual switch %s" % self.switch_name)
self.host = self.find_host_attached_dvs()
if self.host is None:
# We still need the HostSystem object to add the host
# to the distributed vswitch
self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
if self.host is None:
self.module.fail_json(msg="The esxi_hostname %s does not exist "
"in vCenter" % self.esxi_hostname)
return 'absent'
else:
if self.check_uplinks():
return 'present'
else:
return 'update'
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
dict(
esxi_hostname=dict(required=True, type='str'),
switch_name=dict(required=True, type='str'),
vmnics=dict(required=True, type='list'),
state=dict(default='present', choices=['present', 'absent'], type='str'),
vendor_specific_config=dict(
type='list',
elements='dict',
required=False,
options=dict(
key=dict(type='str', required=True),
value=dict(type='str', required=True),
),
),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_COLLECTIONS_COUNTER:
module.fail_json(msg='collections.Counter from Python-2.7 is required for this module')
vmware_dvs_host = VMwareDvsHost(module)
vmware_dvs_host.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
bruderstein/PythonScript | PythonLib/min/encodings/raw_unicode_escape.py | 852 | 1208 | """ Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.raw_unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.raw_unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='raw-unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
isnnn/Sick-Beard-TPB | lib/rtorrent/rpc/__init__.py | 158 | 10775 | # Copyright (c) 2013 Chris Lucas, <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import inspect
import rtorrent
import re
from rtorrent.common import bool_to_int, convert_version_tuple_to_str,\
safe_repr
from rtorrent.err import MethodError
from rtorrent.compat import xmlrpclib
def get_varname(rpc_call):
"""Transform rpc method into variable name.
@newfield example: Example
@example: if the name of the rpc method is 'p.get_down_rate', the variable
name will be 'down_rate'
"""
# extract variable name from xmlrpc func name
r = re.search(
"([ptdf]\.|system\.|get\_|is\_|set\_)+([^=]*)", rpc_call, re.I)
if r:
return(r.groups()[-1])
else:
return(None)
def _handle_unavailable_rpc_method(method, rt_obj):
msg = "Method isn't available."
if rt_obj._get_client_version_tuple() < method.min_version:
msg = "This method is only available in " \
"RTorrent version v{0} or later".format(
convert_version_tuple_to_str(method.min_version))
raise MethodError(msg)
class DummyClass:
def __init__(self):
pass
class Method:
"""Represents an individual RPC method"""
def __init__(self, _class, method_name,
rpc_call, docstring=None, varname=None, **kwargs):
self._class = _class # : Class this method is associated with
self.class_name = _class.__name__
self.method_name = method_name # : name of public-facing method
self.rpc_call = rpc_call # : name of rpc method
self.docstring = docstring # : docstring for rpc method (optional)
self.varname = varname # : variable for the result of the method call, usually set to self.varname
self.min_version = kwargs.get("min_version", (
0, 0, 0)) # : Minimum version of rTorrent required
self.boolean = kwargs.get("boolean", False) # : returns boolean value?
self.post_process_func = kwargs.get(
"post_process_func", None) # : custom post process function
self.aliases = kwargs.get(
"aliases", []) # : aliases for method (optional)
self.required_args = []
#: Arguments required when calling the method (not utilized)
self.method_type = self._get_method_type()
if self.varname is None:
self.varname = get_varname(self.rpc_call)
assert self.varname is not None, "Couldn't get variable name."
def __repr__(self):
return safe_repr("Method(method_name='{0}', rpc_call='{1}')",
self.method_name, self.rpc_call)
def _get_method_type(self):
"""Determine whether method is a modifier or a retriever"""
if self.method_name[:4] == "set_": return('m') # modifier
else:
return('r') # retriever
def is_modifier(self):
if self.method_type == 'm':
return(True)
else:
return(False)
def is_retriever(self):
if self.method_type == 'r':
return(True)
else:
return(False)
def is_available(self, rt_obj):
if rt_obj._get_client_version_tuple() < self.min_version or \
self.rpc_call not in rt_obj._get_rpc_methods():
return(False)
else:
return(True)
class Multicall:
def __init__(self, class_obj, **kwargs):
self.class_obj = class_obj
if class_obj.__class__.__name__ == "RTorrent":
self.rt_obj = class_obj
else:
self.rt_obj = class_obj._rt_obj
self.calls = []
def add(self, method, *args):
"""Add call to multicall
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
@param args: call arguments
"""
# if a raw rpc method was given instead of a Method instance,
# try and find the instance for it. And if all else fails, create a
# dummy Method instance
if isinstance(method, str):
result = find_method(method)
# if result not found
if result == -1:
method = Method(DummyClass, method, method)
else:
method = result
# ensure method is available before adding
if not method.is_available(self.rt_obj):
_handle_unavailable_rpc_method(method, self.rt_obj)
self.calls.append((method, args))
def list_calls(self):
for c in self.calls:
print(c)
def call(self):
"""Execute added multicall calls
@return: the results (post-processed), in the order they were added
@rtype: tuple
"""
m = xmlrpclib.MultiCall(self.rt_obj._get_conn())
for call in self.calls:
method, args = call
rpc_call = getattr(method, "rpc_call")
getattr(m, rpc_call)(*args)
results = m()
results = tuple(results)
results_processed = []
for r, c in zip(results, self.calls):
method = c[0] # Method instance
result = process_result(method, r)
results_processed.append(result)
# assign result to class_obj
exists = hasattr(self.class_obj, method.varname)
if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)):
setattr(self.class_obj, method.varname, result)
return(tuple(results_processed))
def call_method(class_obj, method, *args):
"""Handles single RPC calls
@param class_obj: Peer/File/Torrent/Tracker/RTorrent instance
@type class_obj: object
@param method: L{Method} instance or name of raw RPC method
@type method: Method or str
"""
if method.is_retriever():
args = args[:-1]
else:
assert args[-1] is not None, "No argument given."
if class_obj.__class__.__name__ == "RTorrent":
rt_obj = class_obj
else:
rt_obj = class_obj._rt_obj
# check if rpc method is even available
if not method.is_available(rt_obj):
_handle_unavailable_rpc_method(method, rt_obj)
m = Multicall(class_obj)
m.add(method, *args)
# only added one method, only getting one result back
ret_value = m.call()[0]
####### OBSOLETE ##########################################################
# if method.is_retriever():
# #value = process_result(method, ret_value)
# value = ret_value #MultiCall already processed the result
# else:
# # we're setting the user's input to method.varname
# # but we'll return the value that xmlrpc gives us
# value = process_result(method, args[-1])
##########################################################################
return(ret_value)
def find_method(rpc_call):
"""Return L{Method} instance associated with given RPC call"""
method_lists = [
rtorrent.methods,
rtorrent.file.methods,
rtorrent.tracker.methods,
rtorrent.peer.methods,
rtorrent.torrent.methods,
]
for l in method_lists:
for m in l:
if m.rpc_call.lower() == rpc_call.lower():
return(m)
return(-1)
def process_result(method, result):
"""Process given C{B{result}} based on flags set in C{B{method}}
@param method: L{Method} instance
@type method: Method
@param result: result to be processed (the result of given L{Method} instance)
@note: Supported Processing:
- boolean - convert ones and zeros returned by rTorrent and
convert to python boolean values
"""
# handle custom post processing function
if method.post_process_func is not None:
result = method.post_process_func(result)
# is boolean?
if method.boolean:
if result in [1, '1']:
result = True
elif result in [0, '0']:
result = False
return(result)
def _build_rpc_methods(class_, method_list):
"""Build glorified aliases to raw RPC methods"""
instance = None
if not inspect.isclass(class_):
instance = class_
class_ = instance.__class__
for m in method_list:
class_name = m.class_name
if class_name != class_.__name__:
continue
if class_name == "RTorrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, bool_to_int(arg))
elif class_name == "Torrent":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name in ["Tracker", "File"]:
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Peer":
caller = lambda self, arg = None, method = m:\
call_method(self, method, self.rpc_id,
bool_to_int(arg))
elif class_name == "Group":
caller = lambda arg = None, method = m: \
call_method(instance, method, bool_to_int(arg))
if m.docstring is None:
m.docstring = ""
# print(m)
docstring = """{0}
@note: Variable where the result for this method is stored: {1}.{2}""".format(
m.docstring,
class_name,
m.varname)
caller.__doc__ = docstring
for method_name in [m.method_name] + list(m.aliases):
if instance is None:
setattr(class_, method_name, caller)
else:
setattr(instance, method_name, caller)
| gpl-3.0 |
marcore/edx-platform | lms/djangoapps/gating/tests/test_api.py | 11 | 6365 | """
Unit tests for gating.signals module
"""
from mock import patch
from nose.plugins.attrib import attr
from ddt import ddt, data, unpack
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.helpers import LoginEnrollmentTestCase
from milestones import api as milestones_api
from milestones.tests.utils import MilestonesTestCaseMixin
from openedx.core.lib.gating import api as gating_api
from gating.api import _get_xblock_parent, evaluate_prerequisite
class GatingTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Base TestCase class for setting up a basic course structure
and testing the gating feature
"""
def setUp(self):
"""
Initial data setup
"""
super(GatingTestCase, self).setUp()
# Patch Milestones feature flag
self.settings_patcher = patch.dict('django.conf.settings.FEATURES', {'MILESTONES_APP': True})
self.settings_patcher.start()
# create course
self.course = CourseFactory.create(
org='edX',
number='EDX101',
run='EDX101_RUN1',
display_name='edX 101'
)
self.course.enable_subsection_gating = True
self.course.save()
self.store.update_item(self.course, 0)
# create chapter
self.chapter1 = ItemFactory.create(
parent_location=self.course.location,
category='chapter',
display_name='untitled chapter 1'
)
# create sequentials
self.seq1 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 1'
)
self.seq2 = ItemFactory.create(
parent_location=self.chapter1.location,
category='sequential',
display_name='untitled sequential 2'
)
# create vertical
self.vert1 = ItemFactory.create(
parent_location=self.seq1.location,
category='vertical',
display_name='untitled vertical 1'
)
# create problem
self.prob1 = ItemFactory.create(
parent_location=self.vert1.location,
category='problem',
display_name='untitled problem 1'
)
# create orphan
self.prob2 = ItemFactory.create(
parent_location=self.course.location,
category='problem',
display_name='untitled problem 2'
)
def tearDown(self):
"""
Tear down initial setup
"""
self.settings_patcher.stop()
super(GatingTestCase, self).tearDown()
class TestGetXBlockParent(GatingTestCase):
"""
Tests for the get_xblock_parent function
"""
def test_get_direct_parent(self):
""" Test test_get_direct_parent """
result = _get_xblock_parent(self.vert1)
self.assertEqual(result.location, self.seq1.location)
def test_get_parent_with_category(self):
""" Test test_get_parent_of_category """
result = _get_xblock_parent(self.vert1, 'sequential')
self.assertEqual(result.location, self.seq1.location)
result = _get_xblock_parent(self.vert1, 'chapter')
self.assertEqual(result.location, self.chapter1.location)
def test_get_parent_none(self):
""" Test test_get_parent_none """
result = _get_xblock_parent(self.vert1, 'unit')
self.assertIsNone(result)
@attr('shard_3')
@ddt
class TestEvaluatePrerequisite(GatingTestCase, MilestonesTestCaseMixin):
"""
Tests for the evaluate_prerequisite function
"""
def setUp(self):
super(TestEvaluatePrerequisite, self).setUp()
self.user_dict = {'id': self.user.id}
self.prereq_milestone = None
def _setup_gating_milestone(self, min_score):
"""
Setup a gating milestone for testing
"""
gating_api.add_prerequisite(self.course.id, self.seq1.location)
gating_api.set_required_content(self.course.id, self.seq2.location, self.seq1.location, min_score)
self.prereq_milestone = gating_api.get_gating_milestone(self.course.id, self.seq1.location, 'fulfills')
@patch('courseware.grades.get_module_score')
@data((.5, True), (1, True), (0, False))
@unpack
def test_min_score_achieved(self, module_score, result, mock_module_score):
""" Test test_min_score_achieved """
self._setup_gating_milestone(50)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
@patch('gating.api.log.warning')
@patch('courseware.grades.get_module_score')
@data((.5, False), (1, True))
@unpack
def test_invalid_min_score(self, module_score, result, mock_module_score, mock_log):
""" Test test_invalid_min_score """
self._setup_gating_milestone(None)
mock_module_score.return_value = module_score
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertEqual(milestones_api.user_has_milestone(self.user_dict, self.prereq_milestone), result)
self.assertTrue(mock_log.called)
@patch('courseware.grades.get_module_score')
def test_orphaned_xblock(self, mock_module_score):
""" Test test_orphaned_xblock """
evaluate_prerequisite(self.course, self.prob2.location, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('courseware.grades.get_module_score')
def test_no_prerequisites(self, mock_module_score):
""" Test test_no_prerequisites """
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertFalse(mock_module_score.called)
@patch('courseware.grades.get_module_score')
def test_no_gated_content(self, mock_module_score):
""" Test test_no_gated_content """
# Setup gating milestones data
gating_api.add_prerequisite(self.course.id, self.seq1.location)
evaluate_prerequisite(self.course, self.prob1.location, self.user.id)
self.assertFalse(mock_module_score.called)
| agpl-3.0 |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/timeseries/examples/multivariate_test.py | 91 | 1330 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the multivariate example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import multivariate
from tensorflow.python.platform import test
class MultivariateExampleTest(test.TestCase):
def test_shapes_structural(self):
times, values = multivariate.multivariate_train_and_sample(
export_directory=self.get_temp_dir(), training_steps=5)
self.assertAllEqual([1100], times.shape)
self.assertAllEqual([1100, 5], values.shape)
if __name__ == "__main__":
test.main()
| apache-2.0 |
quanvm009/codev7 | openerp/addons/base/res/res_country.py | 46 | 4208 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
def location_name_search(self, cr, user, name='', args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
ids = []
if len(name) == 2:
ids = self.search(cr, user, [('code', 'ilike', name)] + args,
limit=limit, context=context)
search_domain = [('name', operator, name)]
if ids: search_domain.append(('id', 'not in', ids))
ids.extend(self.search(cr, user, search_domain + args,
limit=limit, context=context))
locations = self.name_get(cr, user, ids, context)
return sorted(locations, key=lambda (id, name): ids.index(id))
class Country(osv.osv):
_name = 'res.country'
_description = 'Country'
_columns = {
'name': fields.char('Country Name', size=64,
help='The full name of the country.', required=True, translate=True),
'code': fields.char('Country Code', size=2,
help='The ISO country code in two chars.\n'
'You can use this field for quick search.'),
'address_format': fields.text('Address Format', help="""You can state here the usual format to use for the \
addresses belonging to this country.\n\nYou can use the python-style string patern with all the field of the address \
(for example, use '%(street)s' to display the field 'street') plus
\n%(state_name)s: the name of the state
\n%(state_code)s: the code of the state
\n%(country_name)s: the name of the country
\n%(country_code)s: the code of the country"""),
'currency_id': fields.many2one('res.currency', 'Currency'),
}
_sql_constraints = [
('name_uniq', 'unique (name)',
'The name of the country must be unique !'),
('code_uniq', 'unique (code)',
'The code of the country must be unique !')
]
_defaults = {
'address_format': "%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s",
}
_order='name'
name_search = location_name_search
def create(self, cursor, user, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).create(cursor, user, vals,
context=context)
def write(self, cursor, user, ids, vals, context=None):
if vals.get('code'):
vals['code'] = vals['code'].upper()
return super(Country, self).write(cursor, user, ids, vals,
context=context)
class CountryState(osv.osv):
_description="Country state"
_name = 'res.country.state'
_columns = {
'country_id': fields.many2one('res.country', 'Country',
required=True),
'name': fields.char('State Name', size=64, required=True,
help='Administrative divisions of a country. E.g. Fed. State, Departement, Canton'),
'code': fields.char('State Code', size=3,
help='The state code in max. three chars.', required=True),
}
_order = 'code'
name_search = location_name_search
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fpiot/mbed-ats | workspace_tools/compliance/ioper_runner.py | 106 | 4777 | #!/usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <[email protected]>
"""
import sys
import mbed_lstools
from prettytable import PrettyTable
try:
from colorama import init
except:
pass
COLORAMA = 'colorama' in sys.modules
from ioper_base import IOperTestCaseBase
from ioper_test_fs import IOperTest_FileStructure_Basic
from ioper_test_fs import IOperTest_FileStructure_MbedEnabled
from ioper_test_target_id import IOperTest_TargetID_Basic
from ioper_test_target_id import IOperTest_TargetID_MbedEnabled
TEST_LIST = [IOperTest_TargetID_Basic('basic'),
IOperTest_TargetID_MbedEnabled('mbed-enabled'),
IOperTest_FileStructure_Basic('basic'),
IOperTest_FileStructure_MbedEnabled('mbed-enabled'),
IOperTestCaseBase('all'), # Dummy used to add 'all' option
]
class IOperTestRunner():
""" Calls all i/face interoperability tests
"""
def __init__(self, scope=None):
""" Test scope:
'pedantic' - all
'mbed-enabled' - let's try to check if this device is mbed-enabled
'basic' - just simple, passive tests (no device flashing)
"""
self.requested_scope = scope # Test scope given by user
self.raw_test_results = {} # Raw test results, can be used by exporters: { Platform: [test results]}
# Test scope definitions
self.SCOPE_BASIC = 'basic' # Basic tests, sanity checks
self.SCOPE_MBED_ENABLED = 'mbed-enabled' # Let's try to check if this device is mbed-enabled
self.SCOPE_PEDANTIC = 'pedantic' # Extensive tests
self.SCOPE_ALL = 'all' # All tests, equal to highest scope level
# This structure will help us sort test scopes so we can include them
# e.g. pedantic also includes basic and mbed-enabled tests
self.scopes = {self.SCOPE_BASIC : 0,
self.SCOPE_MBED_ENABLED : 1,
self.SCOPE_PEDANTIC : 2,
self.SCOPE_ALL : 99,
}
if COLORAMA:
init() # colorama.init()
def run(self):
""" Run tests, calculate overall score and print test results
"""
mbeds = mbed_lstools.create()
muts_list = mbeds.list_mbeds()
test_base = IOperTestCaseBase()
self.raw_test_results = {}
for i, mut in enumerate(muts_list):
result = []
self.raw_test_results[mut['platform_name']] = []
print "MBEDLS: Detected %s, port: %s, mounted: %s"% (mut['platform_name'],
mut['serial_port'],
mut['mount_point'])
print "Running interoperability test suite, scope '%s'" % (self.requested_scope)
for test_case in TEST_LIST:
if self.scopes[self.requested_scope] >= self.scopes[test_case.scope]:
res = test_case.test(param=mut)
result.extend(res)
self.raw_test_results[mut['platform_name']].extend(res)
columns = ['Platform', 'Test Case', 'Result', 'Scope', 'Description']
pt = PrettyTable(columns)
for col in columns:
pt.align[col] = 'l'
for tr in result:
severity, tr_name, tr_scope, text = tr
tr = (test_base.COLOR(severity, mut['platform_name']),
test_base.COLOR(severity, tr_name),
test_base.COLOR(severity, severity),
test_base.COLOR(severity, tr_scope),
test_base.COLOR(severity, text))
pt.add_row(list(tr))
print pt.get_string(border=True, sortby='Result')
if i + 1 < len(muts_list):
print
return self.raw_test_results
def get_available_oper_test_scopes():
""" Get list of available test scopes
"""
scopes = set()
for oper_test in TEST_LIST:
if oper_test.scope is not None:
scopes.add(oper_test.scope)
return list(scopes)
| apache-2.0 |
aprefontaine/TMScheduler | django/contrib/localflavor/ca/ca_provinces.py | 199 | 1397 | """
An alphabetical list of provinces and territories for use as `choices`
in a formfield., and a mapping of province misspellings/abbreviations to
normalized abbreviations
Source: http://www.canada.gc.ca/othergov/prov_e.html
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
PROVINCE_CHOICES = (
('AB', 'Alberta'),
('BC', 'British Columbia'),
('MB', 'Manitoba'),
('NB', 'New Brunswick'),
('NF', 'Newfoundland and Labrador'),
('NT', 'Northwest Territories'),
('NS', 'Nova Scotia'),
('NU', 'Nunavut'),
('ON', 'Ontario'),
('PE', 'Prince Edward Island'),
('QC', 'Quebec'),
('SK', 'Saskatchewan'),
('YK', 'Yukon')
)
PROVINCES_NORMALIZED = {
'ab': 'AB',
'alberta': 'AB',
'bc': 'BC',
'b.c.': 'BC',
'british columbia': 'BC',
'mb': 'MB',
'manitoba': 'MB',
'nb': 'NB',
'new brunswick': 'NB',
'nf': 'NF',
'newfoundland': 'NF',
'newfoundland and labrador': 'NF',
'nt': 'NT',
'northwest territories': 'NT',
'ns': 'NS',
'nova scotia': 'NS',
'nu': 'NU',
'nunavut': 'NU',
'on': 'ON',
'ontario': 'ON',
'pe': 'PE',
'pei': 'PE',
'p.e.i.': 'PE',
'prince edward island': 'PE',
'qc': 'QC',
'quebec': 'QC',
'sk': 'SK',
'saskatchewan': 'SK',
'yk': 'YK',
'yukon': 'YK',
} | bsd-3-clause |
htc-msm8660/android_kernel_htc_msm8660 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
raymondxyang/tensorflow | tensorflow/python/training/session_run_hook.py | 23 | 10423 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A SessionRunHook extends `session.run()` calls for the `MonitoredSession`.
SessionRunHooks are useful to track training, report progress, request early
stopping and more. SessionRunHooks use the observer pattern and notify at the
following points:
- when a session starts being used
- before a call to the `session.run()`
- after a call to the `session.run()`
- when the session closed
A SessionRunHook encapsulates a piece of reusable/composable computation that
can piggyback a call to `MonitoredSession.run()`. A hook can add any
ops-or-tensor/feeds to the run call, and when the run call finishes with success
gets the outputs it requested. Hooks are allowed to add ops to the graph in
`hook.begin()`. The graph is finalized after the `begin()` method is called.
There are a few pre-defined monitors:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
class ExampleHook(SessionRunHook):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def after_create_session(self, session, coord):
# When this is called, the graph is finalized and
# ops can no longer be added to the graph.
print('Session created.')
def before_run(self, run_context):
print('Before calling session.run().')
return SessionRunArgs(self.your_tensor)
def after_run(self, run_context, run_values):
print('Done running one step. The value of my tensor: %s',
run_values.results)
if you-need-to-stop-loop:
run_context.request_stop()
def end(self, session):
print('Done with the session.')
To understand how hooks interact with calls to `MonitoredSession.run()`,
look at following code:
with MonitoredTrainingSession(hooks=your_hooks, ...) as sess:
while not sess.should_stop():
sess.run(your_fetches)
Above user code leads to following execution:
call hooks.begin()
sess = tf.Session()
call hooks.after_create_session()
while not stop is requested:
call hooks.before_run()
try:
results = sess.run(merged_fetches, feed_dict=merged_feeds)
except (errors.OutOfRangeError, StopIteration):
break
call hooks.after_run()
call hooks.end()
sess.close()
Note that if sess.run() raises OutOfRangeError or StopIteration then
hooks.after_run() will not be called but hooks.end() will still be called.
If sess.run() raises any other exception then neither hooks.after_run() nor
hooks.end() will be called.
@@SessionRunHook
@@SessionRunArgs
@@SessionRunContext
@@SessionRunValues
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
class SessionRunHook(object):
"""Hook to extend calls to MonitoredSession.run()."""
def begin(self):
"""Called once before using the session.
When called, the default graph is the one that will be launched in the
session. The hook can modify the graph by adding new operations to it.
After the `begin()` call the graph will be finalized and the other callbacks
can not modify the graph anymore. Second call of `begin()` on the same
graph, should not change the graph.
"""
pass
def after_create_session(self, session, coord): # pylint: disable=unused-argument
"""Called when new TensorFlow session is created.
This is called to signal the hooks that a new session has been created. This
has two essential differences with the situation in which `begin` is called:
* When this is called, the graph is finalized and ops can no longer be added
to the graph.
* This method will also be called as a result of recovering a wrapped
session, not only at the beginning of the overall session.
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
pass
def before_run(self, run_context): # pylint: disable=unused-argument
"""Called before each call to run().
You can return from this call a `SessionRunArgs` object indicating ops or
tensors to add to the upcoming `run()` call. These ops/tensors will be run
together with the ops/tensors originally passed to the original run() call.
The run args you return can also contain feeds to be added to the run()
call.
The `run_context` argument is a `SessionRunContext` that provides
information about the upcoming `run()` call: the originally requested
op/tensors, the TensorFlow Session.
At this point graph is finalized and you can not add ops.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return None
def after_run(self,
run_context, # pylint: disable=unused-argument
run_values): # pylint: disable=unused-argument
"""Called after each call to run().
The `run_values` argument contains results of requested ops/tensors by
`before_run()`.
The `run_context` argument is the same one send to `before_run` call.
`run_context.request_stop()` can be called to stop the iteration.
If `session.run()` raises any exceptions then `after_run()` is not called.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
pass
def end(self, session): # pylint: disable=unused-argument
"""Called at the end of session.
The `session` argument can be used in case the hook wants to run final ops,
such as saving a last checkpoint.
If `session.run()` raises exception other than OutOfRangeError or
StopIteration then `end()` is not called.
Note the difference between `end()` and `after_run()` behavior when
`session.run()` raises OutOfRangeError or StopIteration. In that case
`end()` is called but `after_run()` is not called.
Args:
session: A TensorFlow Session that will be soon closed.
"""
pass
class SessionRunArgs(
collections.namedtuple("SessionRunArgs",
["fetches", "feed_dict", "options"])):
"""Represents arguments to be added to a `Session.run()` call.
Args:
fetches: Exactly like the 'fetches' argument to Session.Run().
Can be a single tensor or op, a list of 'fetches' or a dictionary
of fetches. For example:
fetches = global_step_tensor
fetches = [train_op, summary_op, global_step_tensor]
fetches = {'step': global_step_tensor, 'summ': summary_op}
Note that this can recurse as expected:
fetches = {'step': global_step_tensor,
'ops': [train_op, check_nan_op]}
feed_dict: Exactly like the `feed_dict` argument to `Session.Run()`
options: Exactly like the `options` argument to `Session.run()`, i.e., a
config_pb2.RunOptions proto.
"""
def __new__(cls, fetches, feed_dict=None, options=None):
return super(SessionRunArgs, cls).__new__(cls, fetches, feed_dict, options)
class SessionRunContext(object):
"""Provides information about the `session.run()` call being made.
Provides information about original request to `Session.Run()` function.
SessionRunHook objects can stop the loop by calling `request_stop()` of
`run_context`. In the future we may use this object to add more information
about run without changing the Hook API.
"""
def __init__(self, original_args, session):
"""Initializes SessionRunContext."""
self._original_args = original_args
self._session = session
self._stop_requested = False
@property
def original_args(self):
"""A `SessionRunArgs` object holding the original arguments of `run()`.
If user called `MonitoredSession.run(fetches=a, feed_dict=b)`, then this
field is equal to SessionRunArgs(a, b).
Returns:
A `SessionRunArgs` object
"""
return self._original_args
@property
def session(self):
"""A TensorFlow session object which will execute the `run`."""
return self._session
@property
def stop_requested(self):
"""Returns whether a stop is requested or not.
If true, `MonitoredSession` stops iterations.
Returns:
A `bool`
"""
return self._stop_requested
def request_stop(self):
"""Sets stop requested field.
Hooks can use this function to request stop of iterations.
`MonitoredSession` checks whether this is called or not.
"""
self._stop_requested = True
class SessionRunValues(
collections.namedtuple("SessionRunValues",
["results", "options", "run_metadata"])):
"""Contains the results of `Session.run()`.
In the future we may use this object to add more information about result of
run without changing the Hook API.
Args:
results: The return values from `Session.run()` corresponding to the fetches
attribute returned in the RunArgs. Note that this has the same shape as
the RunArgs fetches. For example:
fetches = global_step_tensor
=> results = nparray(int)
fetches = [train_op, summary_op, global_step_tensor]
=> results = [None, nparray(string), nparray(int)]
fetches = {'step': global_step_tensor, 'summ': summary_op}
=> results = {'step': nparray(int), 'summ': nparray(string)}
options: `RunOptions` from the `Session.run()` call.
run_metadata: `RunMetadata` from the `Session.run()` call.
"""
| apache-2.0 |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/filebrowser/src/filebrowser/forms.py | 22 | 7669 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib
from django import forms
from django.contrib.auth.models import User, Group
from django.forms import FileField, CharField, BooleanField, Textarea
from django.forms.formsets import formset_factory, BaseFormSet, ManagementForm
from desktop.lib import i18n
from filebrowser.lib import rwx
from hadoop.fs import normpath
from django.utils.translation import ugettext_lazy as _
logger = logging.getLogger(__name__)
class FormSet(BaseFormSet):
def __init__(self, data=None, prefix=None, *args, **kwargs):
self.prefix = prefix or self.get_default_prefix()
if data:
self.data = {}
# Add management field info
# This is hard coded given that none of these keys or info is exportable
# This could be a problem point if the management form changes in later releases
self.data['%s-TOTAL_FORMS' % self.prefix] = len(data)
self.data['%s-INITIAL_FORMS' % self.prefix] = len(data)
self.data['%s-MAX_NUM_FORMS' % self.prefix] = 0
# Add correct data
for i in range(0, len(data)):
prefix = self.add_prefix(i)
for field in data[i]:
self.data['%s-%s' % (prefix, field)] = data[i][field]
BaseFormSet.__init__(self, self.data, self.prefix, *args, **kwargs)
class PathField(CharField):
def __init__(self, label, help_text=None, **kwargs):
kwargs.setdefault('required', True)
kwargs.setdefault('min_length', 1)
forms.CharField.__init__(self, label=label, help_text=help_text, **kwargs)
def clean(self, value):
return normpath(CharField.clean(self, value))
class EditorForm(forms.Form):
path = PathField(label=_("File to edit"))
contents = CharField(widget=Textarea, label=_("Contents"), required=False)
encoding = CharField(label=_('Encoding'), required=False)
def clean_path(self):
return urllib.unquote(self.cleaned_data.get('path', ''))
def clean_contents(self):
return self.cleaned_data.get('contents', '').replace('\r\n', '\n')
def clean_encoding(self):
encoding = self.cleaned_data.get('encoding', '').strip()
if not encoding:
return i18n.get_site_encoding()
return encoding
class RenameForm(forms.Form):
op = "rename"
src_path = CharField(label=_("File to rename"), help_text=_("The file to rename."))
dest_path = CharField(label=_("New name"), help_text=_("Rename the file to:"))
class BaseRenameFormSet(FormSet):
op = "rename"
RenameFormSet = formset_factory(RenameForm, formset=BaseRenameFormSet, extra=0)
class CopyForm(forms.Form):
op = "copy"
src_path = CharField(label=_("File to copy"), help_text=_("The file to copy."))
dest_path = CharField(label=_("Destination location"), help_text=_("Copy the file to:"))
class BaseCopyFormSet(FormSet):
op = "copy"
CopyFormSet = formset_factory(CopyForm, formset=BaseCopyFormSet, extra=0)
class UploadFileForm(forms.Form):
op = "upload"
# The "hdfs" prefix in "hdfs_file" triggers the HDFSfileUploadHandler
hdfs_file = FileField(forms.Form, label=_("File to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Filename or directory to upload to."))
class UploadArchiveForm(forms.Form):
op = "upload"
archive = FileField(forms.Form, label=_("Archive to Upload"))
dest = PathField(label=_("Destination Path"), help_text=_("Archive to upload to."))
class RemoveForm(forms.Form):
op = "remove"
path = PathField(label=_("File to remove"))
class RmDirForm(forms.Form):
op = "rmdir"
path = PathField(label=_("Directory to remove"))
class RmTreeForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Directory to remove (recursively)"))
class BaseRmTreeFormset(FormSet):
op = "rmtree"
RmTreeFormSet = formset_factory(RmTreeForm, formset=BaseRmTreeFormset, extra=0)
class RestoreForm(forms.Form):
op = "rmtree"
path = PathField(label=_("Path to restore"))
class BaseRestoreFormset(FormSet):
op = "restore"
RestoreFormSet = formset_factory(RestoreForm, formset=BaseRestoreFormset, extra=0)
class TrashPurgeForm(forms.Form):
op = "purge_trash"
class MkDirForm(forms.Form):
op = "mkdir"
path = PathField(label=_("Path in which to create the directory"))
name = PathField(label=_("Directory Name"))
class TouchForm(forms.Form):
op = "touch"
path = PathField(label=_("Path in which to create the file"))
name = PathField(label=_("File Name"))
class ChownForm(forms.Form):
op = "chown"
path = PathField(label=_("Path to change user/group ownership"))
# These could be "ChoiceFields", listing only users and groups
# that the current user has permissions for.
user = CharField(label=_("User"), min_length=1)
user_other = CharField(label=_("OtherUser"), min_length=1, required=False)
group = CharField(label=_("Group"), min_length=1)
group_other = CharField(label=_("OtherGroup"), min_length=1, required=False)
recursive = BooleanField(label=_("Recursive"), required=False)
def __init__(self, *args, **kwargs):
super(ChownForm, self).__init__(*args, **kwargs)
self.all_groups = [ group.name for group in Group.objects.all() ]
self.all_users = [ user.username for user in User.objects.all() ]
class BaseChownFormSet(FormSet):
op = "chown"
ChownFormSet = formset_factory(ChownForm, formset=BaseChownFormSet, extra=0)
class ChmodForm(forms.Form):
op = "chmod"
path = PathField(label=_("Path to change permissions"))
# By default, BooleanField only validates when
# it's checked.
user_read = BooleanField(required=False)
user_write = BooleanField(required=False)
user_execute = BooleanField(required=False)
group_read = BooleanField(required=False)
group_write = BooleanField(required=False)
group_execute = BooleanField(required=False)
other_read = BooleanField(required=False)
other_write = BooleanField(required=False)
other_execute = BooleanField(required=False)
sticky = BooleanField(required=False)
recursive = BooleanField(required=False)
names = ("user_read", "user_write", "user_execute",
"group_read", "group_write", "group_execute",
"other_read", "other_write", "other_execute",
"sticky")
def __init__(self, initial, *args, **kwargs):
logging.info(dir(self))
logging.info(dir(type(self)))
# Convert from string representation.
mode = initial.get("mode")
if mode is not None:
mode = int(mode, 8)
bools = rwx.expand_mode(mode)
for name, b in zip(self.names, bools):
initial[name] = b
logging.debug(initial)
kwargs['initial'] = initial
forms.Form.__init__(self, *args, **kwargs)
def full_clean(self):
forms.Form.full_clean(self)
if hasattr(self, "cleaned_data"):
self.cleaned_data["mode"] = rwx.compress_mode(map(lambda name: self.cleaned_data[name], self.names))
class BaseChmodFormSet(FormSet):
op = "chmod"
ChmodFormSet = formset_factory(ChmodForm, formset=BaseChmodFormSet, extra=0)
| gpl-2.0 |
tulir/maubot | maubot/management/api/plugin_upload.py | 1 | 4301 | # maubot - A plugin-based Matrix bot system.
# Copyright (C) 2019 Tulir Asokan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from io import BytesIO
from time import time
import traceback
import os.path
import re
from aiohttp import web
from packaging.version import Version
from ...loader import PluginLoader, ZippedPluginLoader, MaubotZipImportError
from .responses import resp
from .base import routes, get_config
@routes.put("/plugin/{id}")
async def put_plugin(request: web.Request) -> web.Response:
plugin_id = request.match_info.get("id", None)
content = await request.read()
file = BytesIO(content)
try:
pid, version = ZippedPluginLoader.verify_meta(file)
except MaubotZipImportError as e:
return resp.plugin_import_error(str(e), traceback.format_exc())
if pid != plugin_id:
return resp.pid_mismatch
plugin = PluginLoader.id_cache.get(plugin_id, None)
if not plugin:
return await upload_new_plugin(content, pid, version)
elif isinstance(plugin, ZippedPluginLoader):
return await upload_replacement_plugin(plugin, content, version)
else:
return resp.unsupported_plugin_loader
@routes.post("/plugins/upload")
async def upload_plugin(request: web.Request) -> web.Response:
content = await request.read()
file = BytesIO(content)
try:
pid, version = ZippedPluginLoader.verify_meta(file)
except MaubotZipImportError as e:
return resp.plugin_import_error(str(e), traceback.format_exc())
plugin = PluginLoader.id_cache.get(pid, None)
if not plugin:
return await upload_new_plugin(content, pid, version)
elif not request.query.get("allow_override"):
return resp.plugin_exists
elif isinstance(plugin, ZippedPluginLoader):
return await upload_replacement_plugin(plugin, content, version)
else:
return resp.unsupported_plugin_loader
async def upload_new_plugin(content: bytes, pid: str, version: Version) -> web.Response:
path = os.path.join(get_config()["plugin_directories.upload"], f"{pid}-v{version}.mbp")
with open(path, "wb") as p:
p.write(content)
try:
plugin = ZippedPluginLoader.get(path)
except MaubotZipImportError as e:
ZippedPluginLoader.trash(path)
return resp.plugin_import_error(str(e), traceback.format_exc())
return resp.created(plugin.to_dict())
async def upload_replacement_plugin(plugin: ZippedPluginLoader, content: bytes,
new_version: Version) -> web.Response:
dirname = os.path.dirname(plugin.path)
old_filename = os.path.basename(plugin.path)
if str(plugin.meta.version) in old_filename:
replacement = (str(new_version) if plugin.meta.version != new_version
else f"{new_version}-ts{int(time())}")
filename = re.sub(f"{re.escape(str(plugin.meta.version))}(-ts[0-9]+)?",
replacement, old_filename)
else:
filename = old_filename.rstrip(".mbp")
filename = f"{filename}-v{new_version}.mbp"
path = os.path.join(dirname, filename)
with open(path, "wb") as p:
p.write(content)
old_path = plugin.path
await plugin.stop_instances()
try:
await plugin.reload(new_path=path)
except MaubotZipImportError as e:
try:
await plugin.reload(new_path=old_path)
await plugin.start_instances()
except MaubotZipImportError:
pass
return resp.plugin_import_error(str(e), traceback.format_exc())
await plugin.start_instances()
ZippedPluginLoader.trash(old_path, reason="update")
return resp.updated(plugin.to_dict())
| agpl-3.0 |
cuboxi/android_external_chromium_org | gpu/gles2_conform_support/generate_gles2_embedded_data.py | 138 | 3451 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""generates files to embed the gles2 conformance test data in executable."""
import os
import sys
class GenerateEmbeddedFiles(object):
"""generates files to embed the gles2 conform test data in executable"""
paths_to_ignore = set([
".",
"..",
".svn",
".git",
".hg",
])
extensions_to_include = set([
".vert",
".frag",
".test",
".run",
])
def __init__(self, scan_dir, base_dir):
self.scan_dir = scan_dir
self.base_dir = base_dir
self.count = 0;
if self.base_dir != None:
self.files_data_h = open(os.path.join(base_dir, "FilesDATA.h"), "wb")
self.files_data_c = open(os.path.join(base_dir, "FilesDATA.c"), "wb")
self.files_toc_c = open(os.path.join(base_dir, "FilesTOC.c"), "wb")
self.files_data_h.write("#ifndef FilesDATA_h\n\n")
self.files_data_h.write("#define FilesDATA_h\n\n");
self.files_data_c.write("#include \"FilesDATA.h\"\n\n")
self.files_toc_c.write("#include \"FilesTOC.h\"\n\n");
self.files_toc_c.write("struct GTFVectorFileEntry tempFiles;\n\n");
self.files_toc_c.write("struct FileEntry files[] = {\n");
self.AddFiles(scan_dir)
if self.base_dir != None:
self.files_toc_c.write("\n};\n\n");
self.files_toc_c.write(
"int numFileEntrys = sizeof(files) / sizeof(struct FileEntry);\n");
self.files_data_h.write("\n\n#endif // FilesDATA_h\n");
self.files_data_c.close()
self.files_data_h.close()
self.files_toc_c.close()
def AddFiles(self, scan_dir):
"""Scan a folder and embed the contents of files."""
files = os.listdir(scan_dir)
sub_dirs = []
for file in files:
full_path = os.path.join(scan_dir, file)
ext = os.path.splitext(file)[1]
base_path = full_path[len(self.scan_dir) + 1:]
if os.path.isdir(full_path):
if not file in GenerateEmbeddedFiles.paths_to_ignore:
sub_dirs.append(full_path)
elif ext in GenerateEmbeddedFiles.extensions_to_include:
if self.base_dir == None:
print full_path.replace("\\", "/")
else:
self.count += 1
name = "_FILE_%s_%d" % (ext.upper(), self.count)
name = name.replace(".", "_")
self.files_data_h.write("extern const char %s[];\n" % name)
self.files_data_c.write("const char %s[] = \n" % name)
data = open(full_path, "r")
lines = data.readlines();
data.close()
for line in lines:
line = line.replace("\n", "")
line = line.replace("\r", "")
line = line.replace("\\", "\\\\")
line = line.replace("\"", "\\\"")
self.files_data_c.write('"%s\\n"\n' % line)
self.files_data_c.write(";\n")
self.files_toc_c.write("\t{ \"%s\", %s, 0 },\n" % (
base_path.replace("\\", "/"), name))
for sub_dir in sub_dirs:
self.AddFiles(sub_dir)
def main(argv):
"""This is the main function."""
if len(argv) >= 1:
scan_dir = argv[0]
else:
scan_dir = '.'
if len(argv) >= 2:
base_dir = argv[1]
else:
base_dir = None
GenerateEmbeddedFiles(scan_dir, base_dir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
AMOboxTV/AMOBox.LegoBuild | plugin.video.titan/resources/lib/resolvers/hdcastorg.py | 3 | 1464 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.libraries import client
def resolve(url):
try:
page = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
page = 'http://www.hdcast.org/embedlive2.php?u=%s&vw=670&vh=390' % page
try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except: referer = page
result = client.request(page, referer=referer)
streamer = re.compile('file\s*:\s*\'(.+?)\'').findall(result)[0]
token = 'SECURET0KEN#yw%.?()@W!'
url = '%s swfUrl=http://player.hdcast.org/jws/jwplayer.flash.swf pageUrl=%s token=%s swfVfy=1 live=1 timeout=15' % (streamer, page, token)
return url
except:
return
| gpl-2.0 |
vikatory/kbengine | kbe/src/lib/python/Lib/test/test_io.py | 60 | 128793 | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.script_helper import assert_python_ok
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
LinTeX9527/linux | tools/perf/scripts/python/check-perf-trace.py | 1997 | 2539 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
aferr/LatticeMemCtl | src/arch/x86/isa/insts/general_purpose/control_transfer/xreturn.py | 20 | 3641 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop RET_NEAR
{
# Make the default data size of rets 64 bits in 64 bit mode
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp]
# Check address of return
addi rsp, rsp, dsz
wripi t1, 0
};
def macroop RET_NEAR_I
{
# Make the default data size of rets 64 bits in 64 bit mode
.adjust_env oszIn64Override
limm t2, imm
ld t1, ss, [1, t0, rsp]
# Check address of return
addi rsp, rsp, dsz
add rsp, rsp, t2
wripi t1, 0
};
def macroop RET_FAR {
.adjust_env oszIn64Override
# Get the return RIP
ld t1, ss, [1, t0, rsp]
# Get the return CS
ld t2, ss, [1, t0, rsp], ssz
# Get the rpl
andi t3, t2, 0x3
# Get the cpl
# Here we'd check if we're changing priviledge levels. We'll just hope
# that doesn't happen yet.
# Do stuff if they're equal
andi t0, t2, 0xFC, flags=(EZF,), dataSize=2
br label("processDescriptor"), flags=(CEZF,)
andi t3, t2, 0xF8, dataSize=8
andi t0, t2, 0x4, flags=(EZF,), dataSize=2
br label("globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t3], dataSize=8
br label("processDescriptor")
globalDescriptor:
ld t3, tsg, [1, t0, t3], dataSize=8
processDescriptor:
chks t2, t3, IretCheck, dataSize=8
# There should be validity checks on the RIP checks here, but I'll do
# that later.
wrdl cs, t3, t2
wrsel cs, t2
wrip t0, t1
br label("end")
# Do other stuff if they're not.
end:
fault "NoFault"
};
'''
| bsd-3-clause |
mickele77/FreeCAD | src/Mod/Test/TestApp.py | 1 | 4140 | #***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
import FreeCAD
import sys
import unittest
#---------------------------------------------------------------------------
# define the functions to test the FreeCAD base code
#---------------------------------------------------------------------------
def All():
suite = unittest.TestSuite()
# Base system tests
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("UnicodeTests"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("Document"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("UnitTests"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("BaseTests"))
# Base system gui test
if (FreeCAD.GuiUp == 1):
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("Workbench"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("Menu"))
# add the module tests
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestFem"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("MeshTestsApp"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestSketcherApp"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestPartApp"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestPartDesignApp"))
# gui tests of modules
if (FreeCAD.GuiUp == 1):
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestSketcherGui"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestPartGui"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestPartDesignGui"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestDraft"))
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("TestArch"))
return suite
def TestText(s):
s = unittest.defaultTestLoader.loadTestsFromName(s)
r = unittest.TextTestRunner(stream=sys.stdout, verbosity=2)
r.run(s)
def Test(s):
TestText(s)
def testAll():
r = unittest.TextTestRunner(stream=sys.stdout, verbosity=2)
r.run(All())
def testUnit():
TestText(unittest.TestLoader().loadTestsFromName('UnitTests'))
def testDocument():
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromName("Document"))
TestText(suite)
| lgpl-2.1 |
inasafe/inasafe | safe/gui/tools/shake_grid/shakemap_converter_dialog.py | 3 | 14355 | # coding=utf-8
"""A dialog for converting grid.xml file."""
import logging
import os
from qgis.core import (QgsApplication, QgsProject, QgsRasterLayer,
QgsVectorLayer)
# noinspection PyPackageRequirements
from qgis.PyQt import QtCore, QtGui
# noinspection PyPackageRequirements
from qgis.PyQt.QtCore import QFileInfo, pyqtSlot
# noinspection PyPackageRequirements
from qgis.PyQt.QtWidgets import (
QDialog,
QDialogButtonBox,
QFileDialog,
QMessageBox
)
from qgis.utils import iface
from safe import messaging as m
from safe.common.version import get_version
from safe.definitions.constants import (
NONE_SMOOTHING,
NUMPY_SMOOTHING,
SCIPY_SMOOTHING
)
from safe.definitions.extra_keywords import (
extra_keyword_earthquake_event_id,
extra_keyword_earthquake_source
)
from safe.gui.tools.help.shakemap_converter_help import shakemap_converter_help
from safe.gui.tools.shake_grid.shake_grid import convert_mmi_data
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
from safe.messaging import styles
from safe.utilities.i18n import tr
from safe.utilities.resources import (
get_ui_class,
html_footer,
html_header,
resources_path
)
from safe.utilities.settings import setting
from safe.utilities.styling import mmi_ramp_roman
try:
import scipy # NOQA
from scipy.ndimage.filters import gaussian_filter # NOQA
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
INFO_STYLE = styles.BLUE_LEVEL_4_STYLE
LOGGER = logging.getLogger('InaSAFE')
FORM_CLASS = get_ui_class('shakemap_importer_dialog_base.ui')
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
class ShakemapConverterDialog(QDialog, FORM_CLASS):
"""Importer for shakemap grid.xml files."""
def __init__(self, parent=None, iface=None, dock_widget=None):
"""Constructor for the dialog.
Show the grid converter dialog.
:param parent: parent - widget to use as parent.
:type parent: QWidget
:param iface: QGIS QgisAppInterface instance.
:type iface: QgisAppInterface
:param dock_widget: Dock widget instance.
:type dock_widget: Dock
"""
QDialog.__init__(self, parent)
self.parent = parent
self.iface = iface
self.dock_widget = dock_widget
self.setupUi(self)
self.setWindowTitle(
tr('InaSAFE %s Shakemap Converter' % get_version()))
icon = resources_path('img', 'icons', 'show-converter-tool.svg')
self.setWindowIcon(QtGui.QIcon(icon))
self.warning_text = set()
self.on_input_path_textChanged()
self.on_output_path_textChanged()
self.update_warning()
self.output_layer = None
# Event register
# noinspection PyUnresolvedReferences
self.use_output_default.toggled.connect(
self.get_output_from_input)
# noinspection PyUnresolvedReferences
self.input_path.textChanged.connect(self.on_input_path_textChanged)
# noinspection PyUnresolvedReferences
self.output_path.textChanged.connect(self.on_output_path_textChanged)
self.load_result.clicked.connect(self.load_result_toggled)
# Set up things for context help
self.help_button = self.button_box.button(QDialogButtonBox.Help)
# Allow toggling the help button
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self.help_toggled)
self.main_stacked_widget.setCurrentIndex(1)
self.check_box_custom_shakemap_id.toggled.connect(
self.line_edit_shakemap_id.setEnabled)
# Set value for EQ source type combo box
self.combo_box_source_type.addItem(tr('N/A'), '')
for source_type in extra_keyword_earthquake_source['options']:
self.combo_box_source_type.addItem(
source_type['name'], source_type['key'])
self.combo_box_source_type.setCurrentIndex(0)
self.update_warning()
if not setting('developer_mode', expected_type=bool):
self.smoothing_group_box.hide()
self.use_ascii_mode.setToolTip(tr(
'This algorithm will convert the grid xml to a ascii raster file. '
'If the cell width and height is different, it will use the width '
'(length cell in x axis).'))
if not HAS_SCIPY:
if self.scipy_smoothing.isChecked:
self.none_smoothing.setChecked(True)
self.scipy_smoothing.setToolTip(tr(
'You can not use select this option since you do not have '
'scipy installed in you system.'))
self.scipy_smoothing.setEnabled(False)
else:
self.scipy_smoothing.setEnabled(True)
self.scipy_smoothing.setToolTip('')
# noinspection PyPep8Naming
def on_output_path_textChanged(self):
"""Action when output file name is changed."""
output_path = self.output_path.text()
output_not_xml_msg = tr('output file is not .tif')
if output_path and not output_path.endswith('.tif'):
self.warning_text.add(output_not_xml_msg)
elif output_path and output_not_xml_msg in self.warning_text:
self.warning_text.remove(output_not_xml_msg)
self.update_warning()
# noinspection PyPep8Naming
def on_input_path_textChanged(self):
"""Action when input file name is changed."""
input_path = self.input_path.text()
input_not_grid_msg = tr('input file is not .xml')
if input_path and not input_path.endswith('.xml'):
self.warning_text.add(input_not_grid_msg)
elif input_path and input_not_grid_msg in self.warning_text:
self.warning_text.remove(input_not_grid_msg)
if self.use_output_default.isChecked():
self.get_output_from_input()
self.update_warning()
# noinspection PyPep8Naming
def prepare_place_layer(self):
"""Action when input place layer name is changed."""
if os.path.exists(self.input_place.text()):
self.place_layer = QgsVectorLayer(
self.input_place.text(),
tr('Nearby Cities'),
'ogr'
)
if self.place_layer.isValid():
LOGGER.debug('Get field information')
self.name_field.setLayer(self.place_layer)
self.population_field.setLayer(self.place_layer)
else:
LOGGER.debug('failed to set name field')
def update_warning(self):
"""Update warning message and enable/disable Ok button."""
if len(self.warning_text) == 0:
self.button_box.button(QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(QDialogButtonBox.Ok).setEnabled(False)
header = html_header()
footer = html_footer()
string = header
heading = m.Heading(tr('Shakemap Grid Importer'), **INFO_STYLE)
tips = m.BulletedList()
message = m.Message()
message.add(heading)
for warning in self.warning_text:
tips.add(warning)
message.add(tips)
string += message.to_html()
string += footer
self.info_web_view.setHtml(string)
def get_output_from_input(self):
"""Create default output location based on input location."""
input_path = self.input_path.text()
if input_path.endswith('.xml'):
output_path = input_path[:-3] + 'tif'
elif input_path == '':
output_path = ''
else:
last_dot = input_path.rfind('.')
if last_dot == -1:
output_path = ''
else:
output_path = input_path[:last_dot + 1] + 'tif'
self.output_path.setText(output_path)
def accept(self):
"""Handler for when OK is clicked."""
input_path = self.input_path.text()
input_title = self.line_edit_title.text()
input_source = self.line_edit_source.text()
output_path = self.output_path.text()
if not output_path.endswith('.tif'):
# noinspection PyArgumentList,PyCallByClass,PyTypeChecker
QMessageBox.warning(
self,
tr('InaSAFE'),
tr('Output file name must be tif file'))
if not os.path.exists(input_path):
# noinspection PyArgumentList,PyCallByClass,PyTypeChecker
QMessageBox.warning(
self,
tr('InaSAFE'),
tr('Input file does not exist'))
return
algorithm = 'nearest'
if self.nearest_mode.isChecked():
algorithm = 'nearest'
elif self.inverse_distance_mode.isChecked():
algorithm = 'invdist'
elif self.use_ascii_mode.isChecked():
algorithm = 'use_ascii'
# Smoothing
smoothing_method = NONE_SMOOTHING
if self.numpy_smoothing.isChecked():
smoothing_method = NUMPY_SMOOTHING
if self.scipy_smoothing.isChecked():
smoothing_method = SCIPY_SMOOTHING
# noinspection PyUnresolvedReferences
QgsApplication.instance().setOverrideCursor(
QtGui.QCursor(QtCore.Qt.WaitCursor)
)
extra_keywords = {}
if self.check_box_custom_shakemap_id.isChecked():
event_id = self.line_edit_shakemap_id.text()
extra_keywords[extra_keyword_earthquake_event_id['key']] = event_id
current_index = self.combo_box_source_type.currentIndex()
source_type = self.combo_box_source_type.itemData(current_index)
if source_type:
extra_keywords[
extra_keyword_earthquake_source['key']] = source_type
file_name = convert_mmi_data(
input_path,
input_title,
input_source,
output_path,
algorithm=algorithm,
algorithm_filename_flag=True,
smoothing_method=smoothing_method,
extra_keywords=extra_keywords
)
file_info = QFileInfo(file_name)
base_name = file_info.baseName()
self.output_layer = QgsRasterLayer(file_name, base_name)
# noinspection PyUnresolvedReferences
QgsApplication.instance().restoreOverrideCursor()
if self.load_result.isChecked():
# noinspection PyTypeChecker
mmi_ramp_roman(self.output_layer)
self.output_layer.saveDefaultStyle()
if not self.output_layer.isValid():
LOGGER.debug("Failed to load")
else:
# noinspection PyArgumentList
QgsProject.instance().addMapLayer(self.output_layer)
iface.zoomToActiveLayer()
if (self.keyword_wizard_checkbox.isChecked()
and self.keyword_wizard_checkbox.isEnabled()):
self.launch_keyword_wizard()
self.done(self.Accepted)
@pyqtSlot() # prevents actions being handled twice
def on_open_input_tool_clicked(self):
"""Autoconnect slot activated when open input tool button is clicked.
"""
input_path = self.input_path.text()
if not input_path:
input_path = os.path.expanduser('~')
# noinspection PyCallByClass,PyTypeChecker
filename, __ = QFileDialog.getOpenFileName(
self, tr('Input file'), input_path, tr('Raw grid file (*.xml)'))
if filename:
self.input_path.setText(filename)
@pyqtSlot() # prevents actions being handled twice
def on_open_output_tool_clicked(self):
"""Autoconnect slot activated when open output tool button is clicked.
"""
output_path = self.output_path.text()
if not output_path:
output_path = os.path.expanduser('~')
# noinspection PyCallByClass,PyTypeChecker
filename, __ = QFileDialog.getSaveFileName(
self, tr('Output file'), output_path, tr('Raster file (*.tif)'))
if filename:
self.output_path.setText(filename)
@pyqtSlot()
def on_open_place_tool_clicked(self):
input_place = self.input_place.text()
if not input_place:
input_place = os.path.expanduser('~')
filename, __ = QFileDialog.getOpenFileName(
self, tr('Input place layer'), input_place, tr('All Files (*.*)'))
if filename:
self.input_place.setText(filename)
def load_result_toggled(self):
"""Function that perform action when load_result checkbox is clicked.
"""
self.keyword_wizard_checkbox.setEnabled(self.load_result.isChecked())
@pyqtSlot(bool) # prevents actions being handled twice
def help_toggled(self, flag):
"""Show or hide the help tab in the stacked widget.
.. versionadded: 3.2.1
:param flag: Flag indicating whether help should be shown or hidden.
:type flag: bool
"""
if flag:
self.help_button.setText(tr('Hide Help'))
self.show_help()
else:
self.help_button.setText(tr('Show Help'))
self.hide_help()
def hide_help(self):
"""Hide the usage info from the user.
.. versionadded: 3.2.1
"""
self.main_stacked_widget.setCurrentIndex(1)
def show_help(self):
"""Show usage info to the user."""
# Read the header and footer html snippets
self.main_stacked_widget.setCurrentIndex(0)
header = html_header()
footer = html_footer()
string = header
message = shakemap_converter_help()
string += message.to_html()
string += footer
self.help_web_view.setHtml(string)
def launch_keyword_wizard(self):
"""Launch keyword creation wizard."""
# make sure selected layer is the output layer
if self.iface.activeLayer() != self.output_layer:
return
# launch wizard dialog
keyword_wizard = WizardDialog(
self.iface.mainWindow(), self.iface, self.dock_widget)
keyword_wizard.set_keywords_creation_mode(self.output_layer)
keyword_wizard.exec_() # modal
| gpl-3.0 |
CapOM/ChromiumGStreamerBackend | tools/telemetry/telemetry/story/story_unittest.py | 24 | 1797 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import story
from telemetry.story import shared_state
# pylint: disable=abstract-method
class SharedStateBar(shared_state.SharedState):
pass
class StoryFoo(story.Story):
def __init__(self, name='', labels=None):
super(StoryFoo, self).__init__(
SharedStateBar, name, labels)
class StoryTest(unittest.TestCase):
def testStoriesHaveDifferentIds(self):
s0 = story.Story(SharedStateBar, 'foo')
s1 = story.Story(SharedStateBar, 'bar')
self.assertNotEqual(s0.id, s1.id)
def testNamelessStoryDisplayName(self):
s = StoryFoo()
self.assertEquals('StoryFoo', s.display_name)
def testNamedStoryDisplayName(self):
s = StoryFoo('Bar')
self.assertEquals('Bar', s.display_name)
def testStoryFileSafeName(self):
s = StoryFoo('Foo Bar:Baz~0')
self.assertEquals('Foo_Bar_Baz_0', s.file_safe_name)
def testNamelessStoryAsDict(self):
s = story.Story(SharedStateBar)
s_dict = s.AsDict()
self.assertEquals(s_dict['id'], s.id)
self.assertNotIn('name', s_dict)
def testNamedStoryAsDict(self):
s = story.Story(SharedStateBar, 'Foo')
s_dict = s.AsDict()
self.assertEquals(s_dict['id'], s.id)
self.assertEquals('Foo', s_dict['name'])
def testMakeJavaScriptDeterministic(self):
s = story.Story(SharedStateBar)
self.assertTrue(s.make_javascript_deterministic)
s = story.Story(SharedStateBar, make_javascript_deterministic=False)
self.assertFalse(s.make_javascript_deterministic)
s = story.Story(SharedStateBar, make_javascript_deterministic=True)
self.assertTrue(s.make_javascript_deterministic)
| bsd-3-clause |
tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/util/util3d.py | 1 | 1819 | from __future__ import absolute_import
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import math
class Vector3(object):
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def __copy__(self):
return Vector3(self.x, self.y, self.z)
def copy(self):
return Vector3(self.x, self.y, self.z)
def __repr__(self):
return 'V[%s, %s, %s]' % ( self.x, self.y, self.z )
def __add__(self, v):
return Vector3( self.x + v.x, self.y + v.y, self.z + v.z )
def __sub__(self, v):
return Vector3( self.x - v.x, self.y - v.y, self.z - v.z )
def __mul__(self, v):
return Vector3( self.x * v, self.y * v, self.z * v )
def __div__(self, v):
return Vector3( self.x / v, self.y / v, self.z / v )
__truediv__ = __div__
def __neg__(self):
return Vector3( - self.x, - self.y, - self.z )
def __iadd__(self, v):
self.x += v.x
self.y += v.y
self.z += v.z
return self
def __isub__(self, v):
self.x += v.x
self.y += v.y
self.z += v.z
return self
def __imul__(self, v):
self.x *= v
self.y *= v
self.z *= v
return self
def __idiv__(self, v):
self.x /= v
self.y /= v
self.z /= v
return self
def almostEqual(self, v):
return (abs(self.x - v.x) + abs(self.y - v.y) + abs(self.z - v.z)) < 0.00001
def cross(self, v):
return Vector3(self.y * v.z - self.z * v.y, -self.x * v.z + self.z * v.x, self.x * v.y - self.y * v.x)
def vsize(self):
return math.sqrt( self.x * self.x + self.y * self.y + self.z * self.z )
def normalize(self):
f = self.vsize()
if f != 0.0:
self.x /= f
self.y /= f
self.z /= f
def min(self, v):
return Vector3(min(self.x, v.x), min(self.y, v.y), min(self.z, v.z))
def max(self, v):
return Vector3(max(self.x, v.x), max(self.y, v.y), max(self.z, v.z))
| agpl-3.0 |
kubat/drqueue | etc/vray_sg.py | 4 | 2629 | #
# THIS IS A PYTHON SCRIPT FILE
#
# Default configuration for V-Ray script generator
#
# Python variables
# SCENE
#
# shell variables
# DRQUEUE_BLOCKSIZE, DRQUEUE_COMPID, DRQUEUE_ENDFRAME, DRQUEUE_ETC, DRQUEUE_FRAME,
# DRQUEUE_JOBID, DRQUEUE_JOBNAME, DRQUEUE_OS, DRQUEUE_OWNER, DRQUEUE_PADFRAME,
# DRQUEUE_PADFRAMES, DRQUEUE_STARTFRAME, DRQUEUE_STEPFRAME
#
#
# For platform dependend environment setting a form like this
# can be used :
#
# if DRQUEUE_OS == "LINUX":
# # Environment for Linux
# elsif DRQUEUE_OS == "IRIX":
# # Environment for Irix
# else
# # Some error messages
#
import os,signal,subprocess,sys
os.umask(0)
# fetch DrQueue environment
DRQUEUE_BLOCKSIZE = int(os.getenv("DRQUEUE_BLOCKSIZE"))
DRQUEUE_COMPID = int(os.getenv("DRQUEUE_COMPID"))
DRQUEUE_ENDFRAME = int(os.getenv("DRQUEUE_ENDFRAME"))
DRQUEUE_ETC = os.getenv("DRQUEUE_ETC")
DRQUEUE_FRAME = int(os.getenv("DRQUEUE_FRAME"))
DRQUEUE_JOBID = int(os.getenv("DRQUEUE_JOBID"))
DRQUEUE_JOBNAME = os.getenv("DRQUEUE_JOBNAME")
DRQUEUE_OS = os.getenv("DRQUEUE_OS")
DRQUEUE_OWNER = os.getenv("DRQUEUE_OWNER")
DRQUEUE_PADFRAME = int(os.getenv("DRQUEUE_PADFRAME"))
DRQUEUE_PADFRAMES = int(os.getenv("DRQUEUE_PADFRAMES"))
DRQUEUE_STARTFRAME = int(os.getenv("DRQUEUE_STARTFRAME"))
DRQUEUE_STEPFRAME = int(os.getenv("DRQUEUE_STEPFRAME"))
if DRQUEUE_OS == "WINDOWS":
# convert to windows path with drive letter
SCENE = subprocess.Popen(["cygpath.exe", "-w "+SCENE], stdout=subprocess.PIPE).communicate()[0]
RENDERDIR = subprocess.Popen(["cygpath.exe", "-w "+RENDERDIR], stdout=subprocess.PIPE).communicate()[0]
PROJECTDIR = subprocess.Popen(["cygpath.exe", "-w "+PROJECTDIR], stdout=subprocess.PIPE).communicate()[0]
BLOCK = DRQUEUE_FRAME + DRQUEUE_BLOCKSIZE - 1
if BLOCK > DRQUEUE_ENDFRAME:
BLOCK = DRQUEUE_ENDFRAME
ENGINE_PATH="vray"
# number of threads to use (0 means automatic detection)
THREADS="0"
command = ENGINE_PATH+" -scenefile="+SCENE+ " -numThreads="+THREADS+" -autoClose=1 -frames="+str(DRQUEUE_FRAME)+" -display=0"
print(command)
sys.stdout.flush()
p = subprocess.Popen(command, shell=True)
sts = os.waitpid(p.pid, 0)
# This should requeue the frame if failed
if sts[1] != 0:
print("Requeueing frame...")
os.kill(os.getppid(), signal.SIGINT)
exit(1)
else:
#if DRQUEUE_OS != "WINDOWS" then:
# The frame was rendered properly
# We don't know the output image name. If we knew we could set this correctly
# chown_block RF_OWNER RD/IMAGE DRQUEUE_FRAME BLOCK
# change userid and groupid
#chown 1002:1004 $SCENE:h/*
print("Finished.")
#
# Notice that the exit code of the last command is received by DrQueue
#
| gpl-3.0 |
luzpaz/QGIS | tests/src/python/test_qgsrelationeditwidget.py | 21 | 17425 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for edit widgets.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Matthias Kuhn'
__date__ = '28/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (
QgsFeature,
QgsVectorLayer,
QgsProject,
QgsRelation,
QgsTransaction,
QgsFeatureRequest,
QgsVectorLayerTools,
QgsGeometry
)
from qgis.gui import (
QgsGui,
QgsRelationWidgetWrapper,
QgsAttributeEditorContext,
QgsMapCanvas,
QgsAdvancedDigitizingDockWidget
)
from qgis.PyQt.QtCore import QTimer
from qgis.PyQt.QtWidgets import (
QToolButton,
QMessageBox,
QDialogButtonBox,
QTableView,
QDialog
)
from qgis.testing import start_app, unittest
start_app()
class TestQgsRelationEditWidget(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the involved layers and relations for a n:m relation
:return:
"""
cls.mapCanvas = QgsMapCanvas()
QgsGui.editorWidgetRegistry().initEditors(cls.mapCanvas)
cls.dbconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layer
cls.vl_books = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books" sql=', 'books', 'postgres')
cls.vl_authors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."authors" sql=', 'authors', 'postgres')
cls.vl_editors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'fk_book,fk_author\' table="qgis_test"."editors" sql=', 'editors', 'postgres')
cls.vl_link_books_authors = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."books_authors" sql=', 'books_authors', 'postgres')
QgsProject.instance().addMapLayer(cls.vl_books)
QgsProject.instance().addMapLayer(cls.vl_authors)
QgsProject.instance().addMapLayer(cls.vl_editors)
QgsProject.instance().addMapLayer(cls.vl_link_books_authors)
cls.relMgr = QgsProject.instance().relationManager()
cls.rel_a = QgsRelation()
cls.rel_a.setReferencingLayer(cls.vl_link_books_authors.id())
cls.rel_a.setReferencedLayer(cls.vl_authors.id())
cls.rel_a.addFieldPair('fk_author', 'pk')
cls.rel_a.setId('rel_a')
assert(cls.rel_a.isValid())
cls.relMgr.addRelation(cls.rel_a)
cls.rel_b = QgsRelation()
cls.rel_b.setReferencingLayer(cls.vl_link_books_authors.id())
cls.rel_b.setReferencedLayer(cls.vl_books.id())
cls.rel_b.addFieldPair('fk_book', 'pk')
cls.rel_b.setId('rel_b')
assert(cls.rel_b.isValid())
cls.relMgr.addRelation(cls.rel_b)
# Our mock QgsVectorLayerTools, that allow injecting data where user input is expected
cls.vltools = VlTools()
assert(cls.vl_authors.isValid())
assert(cls.vl_books.isValid())
assert(cls.vl_editors.isValid())
assert(cls.vl_link_books_authors.isValid())
def setUp(self):
self.startTransaction()
def tearDown(self):
self.rollbackTransaction()
del self.transaction
def test_delete_feature(self):
"""
Check if a feature can be deleted properly
"""
self.createWrapper(self.vl_authors, '"name"=\'Erich Gamma\'')
self.assertEqual(self.table_view.model().rowCount(), 1)
self.assertEqual(1, len([f for f in self.vl_books.getFeatures()]))
fid = next(self.vl_books.getFeatures(QgsFeatureRequest().setFilterExpression('"name"=\'Design Patterns. Elements of Reusable Object-Oriented Software\''))).id()
self.widget.featureSelectionManager().select([fid])
btn = self.widget.findChild(QToolButton, 'mDeleteFeatureButton')
def clickOk():
# Click the "Delete features" button on the confirmation message
# box
widget = self.widget.findChild(QMessageBox)
buttonBox = widget.findChild(QDialogButtonBox)
deleteButton = next((b for b in buttonBox.buttons() if buttonBox.buttonRole(b) == QDialogButtonBox.AcceptRole))
deleteButton.click()
QTimer.singleShot(1, clickOk)
btn.click()
# This is the important check that the feature is deleted
self.assertEqual(0, len([f for f in self.vl_books.getFeatures()]))
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(0, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 0)
def test_list(self):
"""
Simple check if several related items are shown
"""
wrapper = self.createWrapper(self.vl_books) # NOQA
self.assertEqual(self.table_view.model().rowCount(), 4)
def test_add_feature(self):
"""
Check if a new related feature is added
"""
self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'')
self.assertEqual(self.table_view.model().rowCount(), 0)
self.vltools.setValues([None, 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
btn = self.widget.findChild(QToolButton, 'mAddFeatureButton')
btn.click()
# Book entry has been created
self.assertEqual(2, len([f for f in self.vl_books.getFeatures()]))
# Link entry has been created
self.assertEqual(5, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_link_feature(self):
"""
Check if an existing feature can be linked
"""
wrapper = self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'') # NOQA
f = QgsFeature(self.vl_books.fields())
f.setAttributes([self.vl_books.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
self.vl_books.addFeature(f)
btn = self.widget.findChild(QToolButton, 'mLinkFeatureButton')
btn.click()
dlg = self.widget.findChild(QDialog)
dlg.setSelectedFeatures([f.id()])
dlg.accept()
# magically the above code selects the feature here...
link_feature = next(self.vl_link_books_authors.getFeatures(QgsFeatureRequest().setFilterExpression('"fk_book"={}'.format(f[0]))))
self.assertIsNotNone(link_feature[0])
self.assertEqual(self.table_view.model().rowCount(), 1)
def test_unlink_feature(self):
"""
Check if a linked feature can be unlinked
"""
wrapper = self.createWrapper(self.vl_books) # NOQA
# All authors are listed
self.assertEqual(self.table_view.model().rowCount(), 4)
it = self.vl_authors.getFeatures(
QgsFeatureRequest().setFilterExpression('"name" IN (\'Richard Helm\', \'Ralph Johnson\')'))
self.widget.featureSelectionManager().select([f.id() for f in it])
self.assertEqual(2, self.widget.featureSelectionManager().selectedFeatureCount())
btn = self.widget.findChild(QToolButton, 'mUnlinkFeatureButton')
btn.click()
# This is actually more checking that the database on delete action is properly set on the relation
self.assertEqual(2, len([f for f in self.vl_link_books_authors.getFeatures()]))
self.assertEqual(2, self.table_view.model().rowCount())
def test_discover_relations(self):
"""
Test the automatic discovery of relations
"""
relations = self.relMgr.discoverRelations([], [self.vl_authors, self.vl_books, self.vl_link_books_authors])
relations = {r.name(): r for r in relations}
self.assertEqual({'books_authors_fk_book_fkey', 'books_authors_fk_author_fkey'}, set(relations.keys()))
ba2b = relations['books_authors_fk_book_fkey']
self.assertTrue(ba2b.isValid())
self.assertEqual('books_authors', ba2b.referencingLayer().name())
self.assertEqual('books', ba2b.referencedLayer().name())
self.assertEqual([0], ba2b.referencingFields())
self.assertEqual([0], ba2b.referencedFields())
ba2a = relations['books_authors_fk_author_fkey']
self.assertTrue(ba2a.isValid())
self.assertEqual('books_authors', ba2a.referencingLayer().name())
self.assertEqual('authors', ba2a.referencedLayer().name())
self.assertEqual([1], ba2a.referencingFields())
self.assertEqual([0], ba2a.referencedFields())
self.assertEqual([], self.relMgr.discoverRelations([self.rel_a, self.rel_b], [self.vl_authors, self.vl_books, self.vl_link_books_authors]))
self.assertEqual(1, len(self.relMgr.discoverRelations([], [self.vl_authors, self.vl_link_books_authors])))
# composite keys relation
relations = self.relMgr.discoverRelations([], [self.vl_books, self.vl_editors])
self.assertEqual(len(relations), 1)
relation = relations[0]
self.assertEqual('books_fk_editor_fkey', relation.name())
self.assertTrue(relation.isValid())
self.assertEqual('books', relation.referencingLayer().name())
self.assertEqual('editors', relation.referencedLayer().name())
self.assertEqual([2, 3], relation.referencingFields())
self.assertEqual([0, 1], relation.referencedFields())
def test_selection(self):
fbook = QgsFeature(self.vl_books.fields())
fbook.setAttributes([self.vl_books.dataProvider().defaultValueClause(0), 'The Hitchhiker\'s Guide to the Galaxy', 'Sputnik Editions', 1961])
self.vl_books.addFeature(fbook)
flink = QgsFeature(self.vl_link_books_authors.fields())
flink.setAttributes([fbook.id(), 5])
self.vl_link_books_authors.addFeature(flink)
self.createWrapper(self.vl_authors, '"name"=\'Douglas Adams\'')
self.zoomToButton = self.widget.findChild(QToolButton, "mDeleteFeatureButton")
self.assertTrue(self.zoomToButton)
self.assertTrue(not self.zoomToButton.isEnabled())
selectionMgr = self.widget.featureSelectionManager()
self.assertTrue(selectionMgr)
self.vl_books.select(fbook.id())
self.assertEqual([fbook.id()], selectionMgr.selectedFeatureIds())
self.assertTrue(self.zoomToButton.isEnabled())
selectionMgr.deselect([fbook.id()])
self.assertEqual([], selectionMgr.selectedFeatureIds())
self.assertTrue(not self.zoomToButton.isEnabled())
self.vl_books.select([1, fbook.id()])
self.assertEqual([fbook.id()], selectionMgr.selectedFeatureIds())
self.assertTrue(self.zoomToButton.isEnabled())
def test_add_feature_geometry(self):
"""
Test to add a feature with a geometry
"""
vl_pipes = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."pipes" (geom) sql=', 'pipes', 'postgres')
vl_leaks = QgsVectorLayer(self.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."leaks" (geom) sql=', 'leaks', 'postgres')
vl_leaks.startEditing()
QgsProject.instance().addMapLayer(vl_pipes)
QgsProject.instance().addMapLayer(vl_leaks)
self.assertEqual(vl_pipes.featureCount(), 2)
self.assertEqual(vl_leaks.featureCount(), 3)
rel = QgsRelation()
rel.setReferencingLayer(vl_leaks.id())
rel.setReferencedLayer(vl_pipes.id())
rel.addFieldPair('pipe', 'id')
rel.setId('rel_pipe_leak')
self.assertTrue(rel.isValid())
self.relMgr.addRelation(rel)
# Mock vector layer tool to just set default value on created feature
class DummyVlTools(QgsVectorLayerTools):
def addFeature(self, layer, defaultValues, defaultGeometry):
f = QgsFeature(layer.fields())
for idx, value in defaultValues.items():
f.setAttribute(idx, value)
f.setGeometry(defaultGeometry)
ok = layer.addFeature(f)
return ok, f
wrapper = QgsRelationWidgetWrapper(vl_leaks, rel)
context = QgsAttributeEditorContext()
vltool = DummyVlTools()
context.setVectorLayerTools(vltool)
context.setMapCanvas(self.mapCanvas)
cadDockWidget = QgsAdvancedDigitizingDockWidget(self.mapCanvas)
context.setCadDockWidget(cadDockWidget)
wrapper.setContext(context)
widget = wrapper.widget()
widget.show()
pipe = next(vl_pipes.getFeatures())
self.assertEqual(pipe.id(), 1)
wrapper.setFeature(pipe)
table_view = widget.findChild(QTableView)
self.assertEqual(table_view.model().rowCount(), 1)
btn = widget.findChild(QToolButton, 'mAddFeatureGeometryButton')
self.assertTrue(btn.isVisible())
self.assertTrue(btn.isEnabled())
btn.click()
self.assertTrue(self.mapCanvas.mapTool())
feature = QgsFeature(vl_leaks.fields())
feature.setGeometry(QgsGeometry.fromWkt('POINT(0 0.8)'))
self.mapCanvas.mapTool().digitizingCompleted.emit(feature)
self.assertEqual(table_view.model().rowCount(), 2)
self.assertEqual(vl_leaks.featureCount(), 4)
request = QgsFeatureRequest()
request.addOrderBy("id", False)
# get new created feature
feat = next(vl_leaks.getFeatures('"id" is NULL'))
self.assertTrue(feat.isValid())
self.assertTrue(feat.geometry().equals(QgsGeometry.fromWkt('POINT(0 0.8)')))
vl_leaks.rollBack()
def startTransaction(self):
"""
Start a new transaction and set all layers into transaction mode.
:return: None
"""
lyrs = [self.vl_authors, self.vl_books, self.vl_link_books_authors]
self.transaction = QgsTransaction.create(lyrs)
self.transaction.begin()
for l in lyrs:
l.startEditing()
def rollbackTransaction(self):
"""
Rollback all changes done in this transaction.
We always rollback and never commit to have the database in a pristine
state at the end of each test.
:return: None
"""
lyrs = [self.vl_authors, self.vl_books, self.vl_link_books_authors]
for l in lyrs:
l.commitChanges()
self.transaction.rollback()
def createWrapper(self, layer, filter=None):
"""
Basic setup of a relation widget wrapper.
Will create a new wrapper and set its feature to the one and only book
in the table.
It will also assign some instance variables to help
* self.widget The created widget
* self.table_view The table view of the widget
:return: The created wrapper
"""
if layer == self.vl_books:
relation = self.rel_b
nmrel = self.rel_a
else:
relation = self.rel_a
nmrel = self.rel_b
self.wrapper = QgsRelationWidgetWrapper(layer, relation)
self.wrapper.setConfig({'nm-rel': nmrel.id()})
context = QgsAttributeEditorContext()
context.setMapCanvas(self.mapCanvas)
context.setVectorLayerTools(self.vltools)
self.wrapper.setContext(context)
self.widget = self.wrapper.widget()
self.widget.show()
request = QgsFeatureRequest()
if filter:
request.setFilterExpression(filter)
book = next(layer.getFeatures(request))
self.wrapper.setFeature(book)
self.table_view = self.widget.findChild(QTableView)
return self.wrapper
class VlTools(QgsVectorLayerTools):
"""
Mock the QgsVectorLayerTools
Since we don't have a user on the test server to input this data for us, we can just use this.
"""
def setValues(self, values):
"""
Set the values for the next feature to insert
:param values: An array of values that shall be used for the next inserted record
:return: None
"""
self.values = values
def addFeature(self, layer, defaultValues, defaultGeometry):
"""
Overrides the addFeature method
:param layer: vector layer
:param defaultValues: some default values that may be provided by QGIS
:param defaultGeometry: a default geometry that may be provided by QGIS
:return: tuple(ok, f) where ok is if the layer added the feature and f is the added feature
"""
values = list()
for i, v in enumerate(self.values):
if v:
values.append(v)
else:
values.append(layer.dataProvider().defaultValueClause(i))
f = QgsFeature(layer.fields())
f.setAttributes(self.values)
f.setGeometry(defaultGeometry)
ok = layer.addFeature(f)
return ok, f
def startEditing(self, layer):
pass
def stopEditing(self, layer, allowCancel):
pass
def saveEdits(self, layer):
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Craig-Macomber/Panda3D-Shader-Generator | shaderBuilder.py | 1 | 22044 | import itertools
import collections
import os
import renderState
import param
import nodes
import inspect
from panda3d.core import Shader
from direct.showbase.AppRunnerGlobal import appRunner
isP3d=bool(appRunner)
"""
A system for generating shader generators based on the generator specifications (the graph files).
IMPORTANT:
The script files do NOT discribe specific shaders.
They describe a shader generator (ShaderBuilder), which takes input (render states) and outputs shaders.
To enable this, the nodes is the shader graph files (defined by the libraries) are not all simply shader functions.
They are all code generators, which may or may not produce the same code in all cases.
The graph files specifify how to instance and connect the code generator nodes together.
Thus, this shaderBuilder system is an implementation of a Shader Meta-Language. It is NOT a Shader Language.
Specifically, a script file and library files, together with any NodeType subclasses,
are used as souce code (in the Shader Meta-Language) to
essencially compile a function (ShaderBuilder instance) that accepts renderStates and retuns CG Shader code.
Usage:
- Load up a Library instance from a list of folders on disc
- Use Library.loadScript to load up a shader generator specification graph file (perhaps made with the editor)
that uses the NodeTypes from the Library. Returns a ShaderBuilder
- Use one or more ShaderBuilders to generate (from one or more Libraryies and scripts) to generate shaders for
your scene
TODO :
Loaders really need to assoiate file name and line numbers with all the loaded items
so error reporting can be far more useful!
TODO :
Deployment system could concatenate libraries down to 1 file if desired,
or one could pregenerate shaders and store them with their models in a cache
if they don't need dynamic generation
TODO :
auto generate semantics?
TODO :
fix generator graph node text for first line (line 1)
"""
from direct.stdpy import file
def join(*paths):
""" an join function for file paths that works in p3d, and regular file systems"""
if len(paths)>1 and paths[0]=='': return join(*paths[1:])
if len(paths)==1: return paths[0]
return reduce(file.join,paths)
# if true, adds comments to generated source to aid debugging
debugText=True
def _parseFile(path):
majorSections=collections.defaultdict(list)
f = open(path, 'r')
majorSection=None
section=None
lineNum=0
def warnText(): return "Warning: "+path+" line "+str(lineNum)+": "
for t in f.readlines():
lineNum+=1
# Remove comments
i=t.find('#')
if i!=-1: t=t[:i]
# Strip excess whitespace
t=t.strip()
if len(t)>0:
# Process line
if len(t)>1 and t[0:2]=='::':
section=None
majorSection=t[2:].lower().strip()
majorSectionList=majorSections[majorSection]
d={}
majorSectionList.append(d)
elif t[0]==':':
# if section header, prefixed with :
if majorSection is None:
print warnText()+"throwing away invalid section occuring before first majorSection in: "+path
else:
currentList=[]
section=t[1:].lower().strip()
d[section]=currentList
else:
if section is None:
print warnText()+"throwing away invalid line occuring before first section in: "+path+" section: "+str(section)
elif currentList!=None:
currentList.append(t)
f.close()
return majorSections
def _parseInfoLines(lines,currentFile):
info={}
for line in lines:
s=line.split(None, 1)
if len(s)!=2:
print "invalid info entry '"+line+"' in: "+currentFile
else:
info[s[0]]=s[1]
return info
class NodeWrapper(object):
"""
A wrapper around a node,
intended to be returned as a node in script files
sourceTracker is an optional debug dict for link to source scriptNode tracking
"""
def __init__(self,scriptNode,sourceTracker=None):
self._scriptNode=scriptNode
self.sourceTracker=sourceTracker
def __getattr__(self,name):
link=self._scriptNode.getLink(name)
if self.sourceTracker is not None: self.sourceTracker[link]=self._scriptNode
return link
def _preprocessParam(param,sourceTracker=None):
if isinstance(param,NodeWrapper):
link=param._scriptNode.getDefaultLink()
if sourceTracker is not None: sourceTracker[link]=param._scriptNode
return link
else:
return param
class Library(object):
def __init__(self,paths,nodeTypeClassMap={}):
"""
path should be a path to a library folder
builds an instance made from the contents of the passed folder path.
nodeTypeClassMap should be a dict mapping strings to NodeType subclasses.
The strings should correspond to the "class" info field used in the nodes in the library.
no "class" info (a None in the dictionary) maps to NodeType, not a subclass.
"""
self.nodeTypeClassMap=dict(nodes.defaultNodeClasses)
self.nodeTypeClassMap.update(nodeTypeClassMap)
self.loadPath(paths)
def loadPath(self,paths):
"""
called by init, but can be called again if you wish to reload the same paths, or a different one
"""
libs=[]
for root, dirs, files in itertools.chain.from_iterable(os.walk(path) for path in paths):
for name in files:
ext=os.path.splitext(name)[1]
if ext==".txt":
currentFile=join(root, name)
for key,xitems in _parseFile(currentFile).iteritems():
if key=="node":
for items in xitems:
if "info" not in items:
print "node missing info section in: "+currentFile
else:
info=_parseInfoLines(items["info"],currentFile)
if "name" not in info:
print "invalid info entry missing name in: "+currentFile
else:
name=info["name"]
shaderInputs=[]
if "shaderinputs" in items:
for s in items["shaderinputs"]:
shaderInputs.append(param.shaderParamFromDefCode(s))
if "output" in info:
o=info["output"]
assert o in ["True","False"]
isOutPut=o=="True"
assert "stage" in info
stage=info["stage"]
else:
isOutPut=False
stage=None
inLinks=[]
if "inlinks" in items:
for s in items["inlinks"]:
inLinks.append(param.linkEndFromDefCode(s))
outLinks=[]
if "outlinks" in items:
for s in items["outlinks"]:
outLinks.append(param.linkEndFromDefCode(s))
code=""
if "code" in items:
code="\n".join(items["code"])
node=nodes.metaCodeNode(name,code,shaderInputs,inLinks,outLinks,isOutPut=isOutPut,stage=stage)
if name in self.nodeTypeClassMap:
print "Warning: overwriting node "+repr(self.nodeTypeClassMap[name])+" with "+repr(node)+" from "+currentFile
self.nodeTypeClassMap[name]=node
elif key=="lib":
libs.append(xitems)
else:
print "Warning: throwing away invalid majorSection with unrecognized name: "+key+" in file: "+currentFile
libSource="\n".join(itertools.chain.from_iterable(lib["code"] for lib in itertools.chain.from_iterable(libs) if "code" in lib))
self.libSource=libSource
def loadScript(self,path,viewGraph=False):
"""
loads a generater script at path, returns a ShaderBuilder
"""
return ShaderBuilder(self._parseScript(path,viewGraph),self.libSource)
def _parseScript(self,path,viewGraph=False):
# setup some globals with the names of the Node classes in self.nodeTypeClassMap
scriptGlobals={}
if viewGraph:
nodeInfoDict={}
sourceTracker={}
else:
sourceTracker=None
for name,nodeType in self.nodeTypeClassMap.iteritems():
# this closure is the auctual item put into the scriptGlobals for the script
# it poses as a Node class, but produces NodeWrappers instead of Nodes,
# and also runs _preprocessParam on all passed arguments
def wrapperMaker(name,nodeType):
def scriptNodeWrapper(*args,**kargs):
pargs=[_preprocessParam(param,sourceTracker) for param in args]
for name,param in kargs.iteritems():
kargs[name]=_preprocessParam(param)
node=nodeType(*pargs,**kargs)
nodeList.append(node)
if viewGraph:
stack=inspect.stack()
frame, filename, lineNum, functionName, contextLines, contextIndex=stack[1]
debugInfo=(filename, lineNum, contextLines[contextIndex].rstrip(), pargs, kargs)
nodeInfoDict[node]=debugInfo
return NodeWrapper(node,sourceTracker)
return scriptNodeWrapper
scriptGlobals[name]=wrapperMaker(name,nodeType)
# run the script with the newly made scriptGlobals
nodeList=[]
if isP3d:
# don't use execfile since this works easier within p3d files
exec open(path).read() in scriptGlobals
else:
execfile(path,scriptGlobals,{})
if viewGraph:
import pydot
graph = pydot.Dot(graph_type='digraph')
for node,info in nodeInfoDict.iteritems():
filename, lineNum, line, pargs, kargs=info
graph.add_node(pydot.Node(strId(node), label=str(lineNum)+": "+line, shape="rectangle"))
for a in pargs:
if isinstance(a,nodes.Link):
e = pydot.Edge( strId(sourceTracker[a]),strId(node), label=str(a) )
graph.add_edge(e)
for name,a in kargs.iteritems():
if isinstance(a,nodes.Link):
e = pydot.Edge(strId(sourceTracker[a]),strId(node), label=name+"="+str(a))
graph.add_edge(e)
writeGraph(graph,path)
return nodeList
# a little helper for naming stuff in viewGraph's graphs
def strId(obj): return str(id(obj))
# writes out a graph in the chosen format (svg for now)
def writeGraph(graph,path):
format="svg"
finalPath=path+"."+format
print 'Making Graph: '+finalPath
graph.write(finalPath,format=format)
class ShaderBuilder(object):
"""
A factory for shaders based off a set of Nodes. Make one instance for each distinct set of stages.
"""
def __init__(self,nodes,libSource=""):
"""
Takes an dict of lists of Nodes, and sets this instance up to produce shaders based on them.
"""
self.nodes=nodes
# a cache of finished shaders. Maps RenderState to Shader
self.cache={}
# a cache of finished shaders. Maps set of stage source strings to Shader
self.casheByStages={}
self.header="//Cg\n//AUTO-GENERATED-SHADER//\n\n"+libSource+"\n\n"
self.footer="\n\n//END-AUTO-GENERATED-SHADER//\n"
def setupRenderStateFactory(self,factory=None):
"""
configures and returns a RenderStateFactory (see renderState.RenderStateFactory)
for this ShaderBuilder. Use it to get renderstates for getShader
"""
if factory is None: factory=renderState.RenderStateFactory()
for n in self.nodes:
n.setupRenderStateFactory(factory)
return factory
def getShader(self,renderState,debugFile=None,noChache=False,debugGraphPath=None):
"""
returns a shader appropriate for the passed RenderState
will generate or fetch from cache as needed
noChache forces the generation of the shader (but it will still get cached).
Useful for use with debugFile if you need to see the source, but it may be cached
caching system isn't verg good in the case where the render state is different, but the resulting shader is the same.
It will find the shader in the cache, but it will take a while.
"""
shader=self.cache.get(renderState)
if shader and not noChache:
#if debugFile: print "Shader is cached (renderState cache). Skipping generating shader to: "+debugFile
return shader
if debugGraphPath:
debugGraphPath+=str(len(self.casheByStages))
stages=makeStages(self.nodes,renderState,debugGraphPath)
stages=frozenset(stages)
shader=self.casheByStages.get(stages)
if shader and not noChache:
self.cache[renderState]=shader
#if debugFile: print "Shader is cached (renderState cache). Skipping generating shader to: "+debugFile
return shader
# TODO : Auto generate/match unspecified semantics here
stageCode="\n\n".join(stages)
source = self.header+"\n\n"+stageCode+self.footer
if debugFile:
debugFile+=str(len(self.casheByStages))+".sha"
print 'Making Shader: '+debugFile
if debugFile:
fOut=open(debugFile, 'w')
fOut.write(source)
fOut.close()
shader=Shader.make(source, Shader.SLCg)
self.cache[renderState]=shader
self.casheByStages[stages]=shader
return shader
def makeStages(nodes,renderState,debugGraphPath=None):
# process from top down (topological sorted order) to see what part of graph is active, and produce active graph
# nodes are only processed when all nodes above them have been processed.
# set of activeNodes that are needed because they produce output values
# maps stage to its set of outputs
activeOutputs=collections.defaultdict(set)
# linksStatus defaults to false for all links.
# a linkStatus for links (edges) in the active graph may be associated with the link
# by the node that outputs it when generated.
# generally false means inactive/not available, and true means available/active
# though some nodes may use the status differently
linkStatus=collections.defaultdict(lambda:False)
# dict mapping links to the activeNode that outputs them
linkToSource={}
# list of active nodes, in the same order as source nodes, which should be topologically sorted
sortedActive=[]
# traverse nodes, filling in data-structures inited above.
for n in nodes:
aa=n.getActiveNodes(renderState,linkStatus)
for a in aa:
sortedActive.append(a)
for link in a.getOutLinks():
linkToSource[link]=a
if a.isOutPut():
activeOutputs[a.stage].add(a)
# yield the resulting stages.
path=None
for name,outputs in activeOutputs.iteritems():
if debugGraphPath: path=debugGraphPath+name
yield makeStage(name,sortedActive,outputs,linkToSource,path)
def makeStage(name,sortedActive,activeOutputs,linkToSource,debugGraphPath=None):
# walk upward from outputs to find nodes the current stage requires recusrivly (aka needed nodes)
neededSet=set(activeOutputs)
neededNodes=[]
for n in reversed(sortedActive):
if n in neededSet:
neededNodes.append(n)
for link in n.getInLinks():
neededSet.add(linkToSource[link])
if debugGraphPath:
import pydot
graph = pydot.Dot(graph_type='digraph')
for node in neededSet:
if isinstance(node,nodes.ActiveOutput):
n=pydot.Node(strId(node), label=node.stage+" Output: "+str(node.shaderOutput), shape="rectangle")
else:
n=pydot.Node(strId(node), label=node.getComment(), shape="rectangle")
graph.add_node(n)
for link in node.getInLinks():
e = pydot.Edge(strId(linkToSource[link]),strId(node), label=str(link) )
graph.add_edge(e)
writeGraph(graph,debugGraphPath)
return makeStageFromActiveNodes(name,tuple(neededNodes))
stageCache={}
def makeStageFromActiveNodes(name,activeNodes):
key=(name,activeNodes)
s=stageCache.get(key)
if s is None:
b=StageBuilder()
namer=AutoNamer("__"+name+"_")
for node in activeNodes: b.addNode(node,namer)
s=b.generateSource(name)
s="\n\n".join(namer.getItems())+"\n\n"+s
stageCache[key]=s
return s
class AutoNamer(object):
"""
A simple class for associating unique names with hashables
"""
def __init__(self,prefix):
self.items={}
self.prefix=prefix
def addItem(self,item):
if item not in self.items:
self.items[item]=self.nextName()
def getItems(self): return self.items
def nextName(self): return self.prefix+str(len(self.items))
class StageBuilder(object):
"""
Used by ShaderBuilder to build the different stages in the shaders
All nodes used in here are ActiveNodes
built bottom up
"""
def __init__(self):
self.links=AutoNamer("__x")
self.inputs=set()
self.outputs=set()
self.sourceLines=[]
def _addLink(self,link):
self.links.addItem(link)
def addNode(self,node,functionNamer):
"""
links=list of links passed to Node's function. Contains in and out ones.
"""
if isinstance(node,nodes.ActiveOutput):
self._addLink(node.inLink)
o=node.shaderOutput
self.outputs.add(o)
code=o.getName()+"="+self.links.getItems()[node.inLink]+";"
self.sourceLines.append(code)
else:
inputs=node.getShaderInputs()
self.inputs.update(inputs)
inLinks=node.getInLinks()
outLinks=node.getOutLinks()
for link in itertools.chain(inLinks,outLinks):
self._addLink(link)
ld=self.links.getItems()
paramChain=itertools.chain(
(s.getName() for s in inputs),
(ld[s] for s in itertools.chain(inLinks,outLinks)),
)
fname=functionNamer.nextName()
callSource=fname+"("+",".join(paramChain)+");"
self.sourceLines.append(callSource)
# make the function
f="void "+fname+node.getCode()
if debugText:
comment="//"+node.getComment()
f=comment+'\n'+f
self.sourceLines.append('\n'+comment)
functionNamer.addItem(f)
def generateSource(self,name):
paramChain=itertools.chain(
("in "+s.getDefCode() for s in self.inputs),
("out "+s.getDefCode() for s in self.outputs)
)
header="void "+name+"(\n "+",\n ".join(paramChain)+")\n{\n\n"
footer="}"
linkDeclarations='\n'.join(link.getType()+" "+name+";//"+link.name for link,name in self.links.getItems().iteritems())
source='\n'.join(reversed(self.sourceLines))
return header+linkDeclarations+'\n\n'+source+'\n'+footer
| bsd-2-clause |
sag-enorman/selenium | py/selenium/webdriver/firefox/firefox_profile.py | 3 | 14701 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import with_statement
import base64
import copy
import json
import os
import re
import shutil
import sys
import tempfile
import zipfile
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from xml.dom import minidom
from selenium.webdriver.common.proxy import ProxyType
from selenium.common.exceptions import WebDriverException
WEBDRIVER_EXT = "webdriver.xpi"
WEBDRIVER_PREFERENCES = "webdriver_prefs.json"
EXTENSION_NAME = "[email protected]"
class AddonFormatError(Exception):
"""Exception for not well-formed add-on manifest files"""
class FirefoxProfile(object):
ANONYMOUS_PROFILE_NAME = "WEBDRIVER_ANONYMOUS_PROFILE"
DEFAULT_PREFERENCES = None
def __init__(self, profile_directory=None):
"""
Initialises a new instance of a Firefox Profile
:args:
- profile_directory: Directory of profile that you want to use.
This defaults to None and will create a new
directory when object is created.
"""
if not FirefoxProfile.DEFAULT_PREFERENCES:
with open(os.path.join(os.path.dirname(__file__),
WEBDRIVER_PREFERENCES)) as default_prefs:
FirefoxProfile.DEFAULT_PREFERENCES = json.load(default_prefs)
self.default_preferences = copy.deepcopy(
FirefoxProfile.DEFAULT_PREFERENCES['mutable'])
self.native_events_enabled = True
self.profile_dir = profile_directory
self.tempfolder = None
if self.profile_dir is None:
self.profile_dir = self._create_tempfolder()
else:
self.tempfolder = tempfile.mkdtemp()
newprof = os.path.join(self.tempfolder, "webdriver-py-profilecopy")
shutil.copytree(self.profile_dir, newprof,
ignore=shutil.ignore_patterns("parent.lock", "lock", ".parentlock"))
self.profile_dir = newprof
self._read_existing_userjs(os.path.join(self.profile_dir, "user.js"))
self.extensionsDir = os.path.join(self.profile_dir, "extensions")
self.userPrefs = os.path.join(self.profile_dir, "user.js")
# Public Methods
def set_preference(self, key, value):
"""
sets the preference that we want in the profile.
"""
self.default_preferences[key] = value
def add_extension(self, extension=WEBDRIVER_EXT):
self._install_extension(extension)
def update_preferences(self):
for key, value in FirefoxProfile.DEFAULT_PREFERENCES['frozen'].items():
self.default_preferences[key] = value
self._write_user_prefs(self.default_preferences)
# Properties
@property
def path(self):
"""
Gets the profile directory that is currently being used
"""
return self.profile_dir
@property
def port(self):
"""
Gets the port that WebDriver is working on
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port that WebDriver will be running on
"""
if not isinstance(port, int):
raise WebDriverException("Port needs to be an integer")
try:
port = int(port)
if port < 1 or port > 65535:
raise WebDriverException("Port number must be in the range 1..65535")
except (ValueError, TypeError):
raise WebDriverException("Port needs to be an integer")
self._port = port
self.set_preference("webdriver_firefox_port", self._port)
@property
def accept_untrusted_certs(self):
return self.default_preferences["webdriver_accept_untrusted_certs"]
@accept_untrusted_certs.setter
def accept_untrusted_certs(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_accept_untrusted_certs", value)
@property
def assume_untrusted_cert_issuer(self):
return self.default_preferences["webdriver_assume_untrusted_issuer"]
@assume_untrusted_cert_issuer.setter
def assume_untrusted_cert_issuer(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_assume_untrusted_issuer", value)
@property
def native_events_enabled(self):
return self.default_preferences['webdriver_enable_native_events']
@native_events_enabled.setter
def native_events_enabled(self, value):
if value not in [True, False]:
raise WebDriverException("Please pass in a Boolean to this call")
self.set_preference("webdriver_enable_native_events", value)
@property
def encoded(self):
"""
A zipped, base64 encoded string of profile directory
for use with remote WebDriver JSON wire protocol
"""
self.update_preferences()
fp = BytesIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
path_root = len(self.path) + 1 # account for trailing slash
for base, dirs, files in os.walk(self.path):
for fyle in files:
filename = os.path.join(base, fyle)
zipped.write(filename, filename[path_root:])
zipped.close()
return base64.b64encode(fp.getvalue()).decode('UTF-8')
def set_proxy(self, proxy):
import warnings
warnings.warn(
"This method has been deprecated. Please pass in the proxy object to the Driver Object",
DeprecationWarning)
if proxy is None:
raise ValueError("proxy can not be None")
if proxy.proxy_type is ProxyType.UNSPECIFIED:
return
self.set_preference("network.proxy.type", proxy.proxy_type['ff_value'])
if proxy.proxy_type is ProxyType.MANUAL:
self.set_preference("network.proxy.no_proxies_on", proxy.no_proxy)
self._set_manual_proxy_preference("ftp", proxy.ftp_proxy)
self._set_manual_proxy_preference("http", proxy.http_proxy)
self._set_manual_proxy_preference("ssl", proxy.ssl_proxy)
self._set_manual_proxy_preference("socks", proxy.socks_proxy)
elif proxy.proxy_type is ProxyType.PAC:
self.set_preference("network.proxy.autoconfig_url", proxy.proxy_autoconfig_url)
def _set_manual_proxy_preference(self, key, setting):
if setting is None or setting is '':
return
host_details = setting.split(":")
self.set_preference("network.proxy.%s" % key, host_details[0])
if len(host_details) > 1:
self.set_preference("network.proxy.%s_port" % key, int(host_details[1]))
def _create_tempfolder(self):
"""
Creates a temp folder to store User.js and the extension
"""
return tempfile.mkdtemp()
def _write_user_prefs(self, user_prefs):
"""
writes the current user prefs dictionary to disk
"""
with open(self.userPrefs, "w") as f:
for key, value in user_prefs.items():
f.write('user_pref("%s", %s);\n' % (key, json.dumps(value)))
def _read_existing_userjs(self, userjs):
import warnings
PREF_RE = re.compile(r'user_pref\("(.*)",\s(.*)\)')
try:
with open(userjs) as f:
for usr in f:
matches = re.search(PREF_RE, usr)
try:
self.default_preferences[matches.group(1)] = json.loads(matches.group(2))
except:
warnings.warn("(skipping) failed to json.loads existing preference: " +
matches.group(1) + matches.group(2))
except:
# The profile given hasn't had any changes made, i.e no users.js
pass
def _install_extension(self, addon, unpack=True):
"""
Installs addon from a filepath, url
or directory of addons in the profile.
- path: url, path to .xpi, or directory of addons
- unpack: whether to unpack unless specified otherwise in the install.rdf
"""
if addon == WEBDRIVER_EXT:
addon = os.path.join(os.path.dirname(__file__), WEBDRIVER_EXT)
tmpdir = None
xpifile = None
if addon.endswith('.xpi'):
tmpdir = tempfile.mkdtemp(suffix='.' + os.path.split(addon)[-1])
compressed_file = zipfile.ZipFile(addon, 'r')
for name in compressed_file.namelist():
if name.endswith('/'):
if not os.path.isdir(os.path.join(tmpdir, name)):
os.makedirs(os.path.join(tmpdir, name))
else:
if not os.path.isdir(os.path.dirname(os.path.join(tmpdir, name))):
os.makedirs(os.path.dirname(os.path.join(tmpdir, name)))
data = compressed_file.read(name)
with open(os.path.join(tmpdir, name), 'wb') as f:
f.write(data)
xpifile = addon
addon = tmpdir
# determine the addon id
addon_details = self._addon_details(addon)
addon_id = addon_details.get('id')
assert addon_id, 'The addon id could not be found: %s' % addon
# copy the addon to the profile
extensions_path = os.path.join(self.profile_dir, 'extensions')
addon_path = os.path.join(extensions_path, addon_id)
if not unpack and not addon_details['unpack'] and xpifile:
if not os.path.exists(extensions_path):
os.makedirs(extensions_path)
shutil.copy(xpifile, addon_path + '.xpi')
else:
if not os.path.exists(addon_path):
shutil.copytree(addon, addon_path, symlinks=True)
# remove the temporary directory, if any
if tmpdir:
shutil.rmtree(tmpdir)
def _addon_details(self, addon_path):
"""
Returns a dictionary of details about the addon.
:param addon_path: path to the add-on directory or XPI
Returns::
{'id': u'[email protected]', # id of the addon
'version': u'1.4', # version of the addon
'name': u'Rainbow', # name of the addon
'unpack': False } # whether to unpack the addon
"""
details = {
'id': None,
'unpack': False,
'name': None,
'version': None
}
def get_namespace_id(doc, url):
attributes = doc.documentElement.attributes
namespace = ""
for i in range(attributes.length):
if attributes.item(i).value == url:
if ":" in attributes.item(i).name:
# If the namespace is not the default one remove 'xlmns:'
namespace = attributes.item(i).name.split(':')[1] + ":"
break
return namespace
def get_text(element):
"""Retrieve the text value of a given node"""
rc = []
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc).strip()
if not os.path.exists(addon_path):
raise IOError('Add-on path does not exist: %s' % addon_path)
try:
if zipfile.is_zipfile(addon_path):
# Bug 944361 - We cannot use 'with' together with zipFile because
# it will cause an exception thrown in Python 2.6.
try:
compressed_file = zipfile.ZipFile(addon_path, 'r')
manifest = compressed_file.read('install.rdf')
finally:
compressed_file.close()
elif os.path.isdir(addon_path):
with open(os.path.join(addon_path, 'install.rdf'), 'r') as f:
manifest = f.read()
else:
raise IOError('Add-on path is neither an XPI nor a directory: %s' % addon_path)
except (IOError, KeyError) as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
try:
doc = minidom.parseString(manifest)
# Get the namespaces abbreviations
em = get_namespace_id(doc, 'http://www.mozilla.org/2004/em-rdf#')
rdf = get_namespace_id(doc, 'http://www.w3.org/1999/02/22-rdf-syntax-ns#')
description = doc.getElementsByTagName(rdf + 'Description').item(0)
if description is None:
description = doc.getElementsByTagName('Description').item(0)
for node in description.childNodes:
# Remove the namespace prefix from the tag for comparison
entry = node.nodeName.replace(em, "")
if entry in details.keys():
details.update({entry: get_text(node)})
if details.get('id') is None:
for i in range(description.attributes.length):
attribute = description.attributes.item(i)
if attribute.name == em + 'id':
details.update({'id': attribute.value})
except Exception as e:
raise AddonFormatError(str(e), sys.exc_info()[2])
# turn unpack into a true/false value
if isinstance(details['unpack'], str):
details['unpack'] = details['unpack'].lower() == 'true'
# If no ID is set, the add-on is invalid
if details.get('id') is None:
raise AddonFormatError('Add-on id could not be found.')
return details
| apache-2.0 |
sander76/home-assistant | homeassistant/components/starline/entity.py | 3 | 1861 | """StarLine base entity."""
from __future__ import annotations
from typing import Callable
from homeassistant.helpers.entity import Entity
from .account import StarlineAccount, StarlineDevice
class StarlineEntity(Entity):
"""StarLine base entity class."""
def __init__(
self, account: StarlineAccount, device: StarlineDevice, key: str, name: str
):
"""Initialize StarLine entity."""
self._account = account
self._device = device
self._key = key
self._name = name
self._unsubscribe_api: Callable | None = None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._account.api.available
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"starline-{self._key}-{self._device.device_id}"
@property
def name(self):
"""Return the name of the entity."""
return f"{self._device.name} {self._name}"
@property
def device_info(self):
"""Return the device info."""
return self._account.device_info(self._device)
def update(self):
"""Read new state data."""
self.schedule_update_ha_state()
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
await super().async_added_to_hass()
self._unsubscribe_api = self._account.api.add_update_listener(self.update)
async def async_will_remove_from_hass(self):
"""Call when entity is being removed from Home Assistant."""
await super().async_will_remove_from_hass()
if self._unsubscribe_api is not None:
self._unsubscribe_api()
self._unsubscribe_api = None
| apache-2.0 |
cathook/PTTArticleRecommander | src/app_server/modules/proxy_client.py | 2 | 2619 | import logging
import socket
import time
from modules.protocol import net
from modules.protocol import types
class ProxyClientError(Exception):
def __init__(self, *args, **kwargs):
super(ProxyClientError, self).__init__(*args, **kwargs)
class ProxyClient(object):
'''A proxy to the server.
Attributes:
_sock: The socket object.
_logger: The logger.
'''
def __init__(self, addr, port, logger):
'''Constructor.
Args:
addr: Address of the server.
port: Port of the server.
logger: The logger.
'''
self._addr = addr
self._port = port
self._logger = logger
self._sock = None
self._init_sock()
def send_pkg(self, typee, buf):
'''Send a buf with specifying package type.
Args:
typee: The package type.
buf: The package content.
'''
for i in range(10):
try:
self._sock.sendall(
net.PackageHeader(typee, len(buf)).dump() + buf)
return
except socket.error as e:
self._logger.warning('It seems that the server is dead.')
try:
self._sock.close()
except Exception as _:
pass
time.sleep(10)
self._init_sock()
raise ProxyClientError('Cannot send message.')
def recv_header(self):
'''Receives a package header.
Returns: An instance of `net.PackageHeader`
'''
buf = self.recv_all(net.PackageHeader.SIZE)
return net.PackageHeader.load(buf)[0]
def recv_all(self, sz):
'''Receives all bytes from the server.
Args:
sz: Number of bytes to receive.
Returns: An array of bytes.
'''
buf = b''
while len(buf) < sz:
try:
a = self._sock.recv(sz - len(buf))
except socket.error as e:
raise ProxyClientError('Cannot recv: %r' % e)
if not a:
raise ProxyClientError('Cannot recv.')
buf += a
return buf
def _init_sock(self):
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect((self._addr, self._port))
self._logger.info('Connected to the server (%s, %d)'
% (self._addr, self._port))
except socket.error as e:
raise ProxyClientError('Cannot connect to the miner server %r' % e)
| mit |
zverevalexei/trex-http-proxy | trex_client/external_libs/scapy-2.3.1/python2/scapy/contrib/ppi_cace.py | 7 | 2627 | ## This file is (hopefully) part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## <[email protected]>
## This program is published under a GPLv2 license
# scapy.contrib.description = PPI CACE
# scapy.contrib.status = loads
"""
CACE PPI types
"""
import logging,struct
from scapy.config import conf
from scapy.packet import *
from scapy.fields import *
from scapy.layers.l2 import Ether
from scapy.layers.dot11 import Dot11
from scapy.contrib.ppi import *
PPI_DOT11COMMON = 2
PPI_DOT11NMAC = 3
PPI_DOT11NMACPHY = 4
PPI_SPECTRUMMAP = 5
PPI_PROCESSINFO = 6
PPI_CAPTUREINFO = 7
PPI_AGGREGATION = 8
PPI_DOT3 = 9
# PPI 802.11 Common Field Header Fields
class dBmByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "b")
def i2repr(self, pkt, val):
if (val != None):
val = "%4d dBm" % val
return val
class PPITSFTField(LELongField):
def i2h(self, pkt, val):
flags = 0
if (pkt):
flags = pkt.getfieldval("Pkt_Flags")
if not flags:
flags = 0
if (flags & 0x02):
scale = 1e-3
else:
scale = 1e-6
tout = scale * float(val)
return tout
def h2i(self, pkt, val):
scale = 1e6
if pkt:
flags = pkt.getfieldval("Pkt_Flags")
if flags:
if (flags & 0x02):
scale = 1e3
tout = int((scale * val) + 0.5)
return tout
_PPIDot11CommonChFlags = ['','','','','Turbo','CCK','OFDM','2GHz','5GHz',
'PassiveOnly','Dynamic CCK-OFDM','GSFK']
_PPIDot11CommonPktFlags = ['FCS','TSFT_ms','FCS_Invalid','PHY_Error']
# PPI 802.11 Common Field Header
class Dot11Common(Packet):
name = "PPI 802.11-Common"
fields_desc = [ LEShortField('pfh_type',PPI_DOT11COMMON),
LEShortField('pfh_length', 20),
PPITSFTField('TSF_Timer', 0),
FlagsField('Pkt_Flags',0, -16, _PPIDot11CommonPktFlags),
LEShortField('Rate',0),
LEShortField('Ch_Freq',0),
FlagsField('Ch_Flags', 0, -16, _PPIDot11CommonChFlags),
ByteField('FHSS_Hop',0),
ByteField('FHSS_Pat',0),
dBmByteField('Antsignal',-128),
dBmByteField('Antnoise',-128)]
def extract_padding(self, p):
return "",p
#Hopefully other CACE defined types will be added here.
#Add the dot11common layer to the PPI array
addPPIType(PPI_DOT11COMMON, Dot11Common)
| mit |
eviljeff/olympia | src/olympia/devhub/tests/test_tasks.py | 2 | 46598 | # -*- coding: utf-8 -*-
import json
import os
import shutil
import tempfile
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.core import mail
from django.core.files.storage import default_storage as storage
from unittest import mock
import pytest
from PIL import Image
from olympia import amo
from olympia.addons.models import Addon, AddonUser, Preview
from olympia.amo.templatetags.jinja_helpers import user_media_path
from olympia.amo.tests import (
TestCase, addon_factory, user_factory, version_factory)
from olympia.amo.tests.test_helpers import get_addon_file, get_image_path
from olympia.amo.utils import image_size, utc_millesecs_from_epoch
from olympia.api.models import SYMMETRIC_JWT_TYPE, APIKey
from olympia.applications.models import AppVersion
from olympia.constants.base import VALIDATOR_SKELETON_RESULTS
from olympia.devhub import tasks
from olympia.files.models import File
from olympia.files.utils import NoManifestFound
from olympia.files.tests.test_models import UploadTest
from olympia.versions.models import Version
pytestmark = pytest.mark.django_db
def test_resize_icon_shrink():
""" Image should be shrunk so that the longest side is 32px. """
resize_size = 32
final_size = (32, 12)
_uploader(resize_size, final_size)
def test_resize_icon_enlarge():
""" Image stays the same, since the new size is bigger than both sides. """
resize_size = 350
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_same():
""" Image stays the same, since the new size is the same. """
resize_size = 339
final_size = (339, 128)
_uploader(resize_size, final_size)
def test_resize_icon_list():
""" Resize multiple images at once. """
resize_size = [32, 339, 350]
final_size = [(32, 12), (339, 128), (339, 128)]
_uploader(resize_size, final_size)
def _uploader(resize_size, final_size):
img = get_image_path('mozilla.png')
original_size = (339, 128)
src = tempfile.NamedTemporaryFile(
mode='r+b', suffix='.png', delete=False, dir=settings.TMP_PATH)
if not isinstance(final_size, list):
final_size = [final_size]
resize_size = [resize_size]
uploadto = user_media_path('addon_icons')
try:
os.makedirs(uploadto)
except OSError:
pass
for rsize, expected_size in zip(resize_size, final_size):
# resize_icon moves the original
shutil.copyfile(img, src.name)
src_image = Image.open(src.name)
assert src_image.size == original_size
dest_name = os.path.join(uploadto, '1234')
with mock.patch('olympia.amo.utils.pngcrush_image') as pngcrush_mock:
return_value = tasks.resize_icon(src.name, dest_name, [rsize])
dest_image = '%s-%s.png' % (dest_name, rsize)
assert pngcrush_mock.call_count == 1
assert pngcrush_mock.call_args_list[0][0][0] == dest_image
assert image_size(dest_image) == expected_size
# original should have been moved to -original
orig_image = '%s-original.png' % dest_name
assert os.path.exists(orig_image)
# Return value of the task should be a dict with an icon_hash key
# containing the 8 first chars of the md5 hash of the source file,
# which is bb362450b00f0461c6bddc6b97b3c30b.
assert return_value == {'icon_hash': 'bb362450'}
os.remove(dest_image)
assert not os.path.exists(dest_image)
os.remove(orig_image)
assert not os.path.exists(orig_image)
shutil.rmtree(uploadto)
assert not os.path.exists(src.name)
@pytest.mark.django_db
@mock.patch('olympia.amo.utils.pngcrush_image')
def test_recreate_previews(pngcrush_image_mock):
addon = addon_factory()
# Set up the preview so it has files in the right places.
preview_no_original = Preview.objects.create(addon=addon)
with storage.open(preview_no_original.image_path, 'wb') as dest:
shutil.copyfileobj(open(get_image_path('preview_landscape.jpg'), 'rb'),
dest)
with storage.open(preview_no_original.thumbnail_path, 'wb') as dest:
shutil.copyfileobj(open(get_image_path('mozilla.png'), 'rb'), dest)
# And again but this time with an "original" image.
preview_has_original = Preview.objects.create(addon=addon)
with storage.open(preview_has_original.image_path, 'wb') as dest:
shutil.copyfileobj(open(get_image_path('preview_landscape.jpg'), 'rb'),
dest)
with storage.open(preview_has_original.thumbnail_path, 'wb') as dest:
shutil.copyfileobj(open(get_image_path('mozilla.png'), 'rb'), dest)
with storage.open(preview_has_original.original_path, 'wb') as dest:
shutil.copyfileobj(open(get_image_path('teamaddons.jpg'), 'rb'), dest)
tasks.recreate_previews([addon.id])
assert preview_no_original.reload().sizes == {
'image': [533, 400], 'thumbnail': [533, 400]}
# Check no resize for full size, but resize happened for thumbnail
assert (storage.size(preview_no_original.image_path) ==
storage.size(get_image_path('preview_landscape.jpg')))
assert (storage.size(preview_no_original.thumbnail_path) !=
storage.size(get_image_path('mozilla.png')))
assert preview_has_original.reload().sizes == {
'image': [2400, 1600], 'thumbnail': [640, 427],
'original': [3000, 2000]}
# Check both full and thumbnail changed, but original didn't.
assert (storage.size(preview_has_original.image_path) !=
storage.size(get_image_path('preview_landscape.jpg')))
assert (storage.size(preview_has_original.thumbnail_path) !=
storage.size(get_image_path('mozilla.png')))
assert (storage.size(preview_has_original.original_path) ==
storage.size(get_image_path('teamaddons.jpg')))
class ValidatorTestCase(TestCase):
def setUp(self):
self.create_appversion('firefox', '38.0a1')
# Required for WebExtensions tests.
self.create_appversion('firefox', '*')
self.create_appversion('firefox', '42.0')
self.create_appversion('firefox', '42.*')
self.create_appversion('firefox', '43.0')
# Required for 57-specific tests.
self.create_appversion('android', '38.0a1')
self.create_appversion('android', '*')
self.create_appversion('firefox', '57.0')
# Required for Android tests.
self.create_appversion('android', '42.0')
self.create_appversion('android', '45.0')
def create_appversion(self, name, version):
return AppVersion.objects.create(
application=amo.APPS[name].id, version=version)
class TestMeasureValidationTime(UploadTest, TestCase):
def setUp(self):
super(TestMeasureValidationTime, self).setUp()
# Set created time back (just for sanity) otherwise the delta
# would be in the microsecond range.
self.upload = self.get_upload(
abspath=get_addon_file('valid_webextension.xpi'),
with_validation=False)
assert not self.upload.valid
self.upload.update(created=datetime.now() - timedelta(days=1))
@contextmanager
def statsd_timing_mock(self):
statsd_calls = {}
def capture_timing_call(metric, value):
statsd_calls[metric] = value
with mock.patch('olympia.devhub.tasks.statsd.timing') as mock_timing:
mock_timing.side_effect = capture_timing_call
yield statsd_calls
def approximate_upload_time(self):
upload_start = utc_millesecs_from_epoch(self.upload.created)
now = utc_millesecs_from_epoch()
return now - upload_start
def assert_milleseconds_are_close(self, actual_ms, calculated_ms,
fuzz=None):
if fuzz is None:
fuzz = Decimal(300)
assert (actual_ms >= (calculated_ms - fuzz) and
actual_ms <= (calculated_ms + fuzz))
def handle_upload_validation_result(self,
channel=amo.RELEASE_CHANNEL_LISTED):
results = amo.VALIDATOR_SKELETON_RESULTS.copy()
tasks.handle_upload_validation_result(results, self.upload.pk,
channel, False)
def test_track_upload_validation_results_time(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls['devhub.validation_results_processed']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_track_upload_validation_results_with_file_size(self):
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# This test makes sure storage.size() works on a real file.
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
# This value should not be scaled because this package is under 1MB.
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_scale_large_xpi_times_per_megabyte(self):
megabyte = Decimal(1024 * 1024)
file_size_in_mb = Decimal(5)
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = file_size_in_mb * megabyte
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
# Validation times for files larger than 1MB should be scaled.
rough_delta = self.approximate_upload_time()
rough_scaled_delta = Decimal(rough_delta) / file_size_in_mb
actual_scaled_delta = statsd_calls[
'devhub.validation_results_processed_per_mb']
self.assert_milleseconds_are_close(actual_scaled_delta,
rough_scaled_delta)
def test_measure_small_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 500 # less than 1MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_under_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_measure_large_files_in_separate_bucket(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = (2014 * 1024) * 5 # 5MB
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
rough_delta = self.approximate_upload_time()
actual_delta = statsd_calls[
'devhub.validation_results_processed_over_1mb']
self.assert_milleseconds_are_close(actual_delta, rough_delta)
def test_do_not_calculate_scaled_time_for_empty_files(self):
with mock.patch('olympia.devhub.tasks.storage.size') as mock_size:
mock_size.return_value = 0
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
def test_ignore_missing_upload_paths_for_now(self):
with mock.patch('olympia.devhub.tasks.storage.exists') as mock_exists:
mock_exists.return_value = False
with self.statsd_timing_mock() as statsd_calls:
self.handle_upload_validation_result()
assert 'devhub.validation_results_processed' in statsd_calls
assert 'devhub.validation_results_processed_per_mb' not in statsd_calls
assert ('devhub.validation_results_processed_under_1mb' not in
statsd_calls)
class TestTrackValidatorStats(TestCase):
def setUp(self):
super(TestTrackValidatorStats, self).setUp()
patch = mock.patch('olympia.devhub.tasks.statsd.incr')
self.mock_incr = patch.start()
self.addCleanup(patch.stop)
def result(self, **overrides):
result = VALIDATOR_SKELETON_RESULTS.copy()
result.update(overrides)
return json.dumps(result)
def test_count_all_successes(self):
tasks.track_validation_stats(self.result(errors=0))
self.mock_incr.assert_any_call(
'devhub.linter.results.all.success'
)
def test_count_all_errors(self):
tasks.track_validation_stats(self.result(errors=1))
self.mock_incr.assert_any_call(
'devhub.linter.results.all.failure'
)
def test_count_listed_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': True}))
self.mock_incr.assert_any_call(
'devhub.linter.results.listed.success'
)
def test_count_unlisted_results(self):
tasks.track_validation_stats(self.result(metadata={'listed': False}))
self.mock_incr.assert_any_call(
'devhub.linter.results.unlisted.success'
)
class TestRunAddonsLinter(UploadTest, ValidatorTestCase):
mock_sign_addon_warning = json.dumps({
"warnings": 1,
"errors": 0,
"messages": [
{"context": None,
"editors_only": False,
"description": "Add-ons which are already signed will be "
"re-signed when published on AMO. This will "
"replace any existing signatures on the add-on.",
"column": None,
"type": "warning",
"id": ["testcases_content", "signed_xpi"],
"file": "",
"tier": 2,
"message": "Package already signed",
"uid": "87326f8f699f447e90b3d5a66a78513e",
"line": None,
"compatibility_type": None},
]
})
def setUp(self):
super(TestRunAddonsLinter, self).setUp()
self.valid_path = get_addon_file('valid_webextension.xpi')
self.invalid_path = get_addon_file(
'invalid_webextension_invalid_id.xpi')
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_pass_validation(self, _mock):
_mock.return_value = '{"errors": 0}'
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
tasks.validate(upload, listed=True)
assert upload.reload().valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_fail_validation(self, _mock):
_mock.return_value = '{"errors": 2}'
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
tasks.validate(upload, listed=True)
assert not upload.reload().valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_validation_error(self, _mock):
_mock.side_effect = Exception
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
tasks.validate(upload, listed=True)
upload.reload()
validation = upload.processed_validation
assert validation
assert validation['errors'] == 1
assert validation['messages'][0]['id'] == ['validator',
'unexpected_exception']
assert not upload.valid
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_validation_signing_warning(self, _mock):
"""If we sign addons, warn on signed addon submission."""
_mock.return_value = self.mock_sign_addon_warning
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
tasks.validate(upload, listed=True)
upload.reload()
validation = json.loads(upload.validation)
assert validation['warnings'] == 1
assert len(validation['messages']) == 1
@mock.patch('olympia.devhub.tasks.statsd.incr')
def test_track_validation_stats(self, mock_statsd_incr):
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
tasks.validate(upload, listed=True)
mock_statsd_incr.assert_has_calls((
mock.call('devhub.linter.results.all.success'),
mock.call('devhub.linter.results.listed.success')))
def test_handle_file_validation_result_task_result_is_serializable(self):
addon = addon_factory()
self.file = addon.current_version.all_files[0]
assert not self.file.has_been_validated
file_validation_id = tasks.validate(self.file).get()
assert json.dumps(file_validation_id)
# Not `self.file.reload()`. It won't update the `validation` FK.
self.file = File.objects.get(pk=self.file.pk)
assert self.file.has_been_validated
def test_binary_flag_set_on_addon_for_binary_extensions(self):
results = {
"errors": 0,
"success": True,
"warnings": 0,
"notices": 0,
"message_tree": {},
"messages": [],
"metadata": {
"contains_binary_extension": True,
"version": "1.0",
"name": "gK0Bes Bot",
"id": "gkobes@gkobes"
}
}
self.addon = addon_factory()
self.file = self.addon.current_version.all_files[0]
assert not self.addon.binary
tasks.handle_file_validation_result(results, self.file.pk)
self.addon = Addon.objects.get(pk=self.addon.pk)
assert self.addon.binary
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_calls_run_linter(self, run_addons_linter_mock):
run_addons_linter_mock.return_value = '{"errors": 0}'
upload = self.get_upload(
abspath=self.valid_path, with_validation=False)
assert not upload.valid
tasks.validate(upload, listed=True)
upload.reload()
assert upload.valid, upload.validation
def test_run_linter_fail(self):
upload = self.get_upload(
abspath=self.invalid_path, with_validation=False)
tasks.validate(upload, listed=True)
upload.reload()
assert not upload.valid
def test_run_linter_path_doesnt_exist(self):
with pytest.raises(ValueError) as exc:
tasks.run_addons_linter('doesntexist', amo.RELEASE_CHANNEL_LISTED)
assert str(exc.value) == (
'Path "doesntexist" is not a file or directory or '
'does not exist.')
def test_run_linter_use_temporary_file(self):
TemporaryFile = tempfile.TemporaryFile
with mock.patch('olympia.devhub.tasks.tempfile.TemporaryFile') as tmpf:
tmpf.side_effect = lambda *a, **kw: TemporaryFile(*a, **kw)
# This is a relatively small add-on but we are making sure that
# we're using a temporary file for all our linter output.
result = json.loads(tasks.run_addons_linter(
get_addon_file('webextension_containing_binary_files.xpi'),
amo.RELEASE_CHANNEL_LISTED
))
assert tmpf.call_count == 2
assert result['success']
assert not result['warnings']
assert not result['errors']
class TestValidateFilePath(ValidatorTestCase):
def test_success(self):
result = json.loads(tasks.validate_file_path(
get_addon_file('valid_webextension.xpi'),
channel=amo.RELEASE_CHANNEL_LISTED))
assert result['success']
assert not result['errors']
assert not result['warnings']
def test_fail_warning(self):
result = json.loads(tasks.validate_file_path(
get_addon_file('valid_webextension_warning.xpi'),
channel=amo.RELEASE_CHANNEL_LISTED))
assert result['success']
assert not result['errors']
assert result['warnings']
def test_fail_error(self):
result = json.loads(tasks.validate_file_path(
get_addon_file('invalid_webextension_invalid_id.xpi'),
channel=amo.RELEASE_CHANNEL_LISTED))
assert not result['success']
assert result['errors']
assert not result['warnings']
@mock.patch('olympia.devhub.tasks.parse_addon')
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_manifest_not_found_error(
self, run_addons_linter_mock, parse_addon_mock):
parse_addon_mock.side_effect = NoManifestFound(message=u'Fôo')
# When parse_addon() raises a NoManifestFound error, we should
# still call the linter to let it raise the appropriate error message.
tasks.validate_file_path(
get_addon_file('valid_webextension.xpi'),
channel=amo.RELEASE_CHANNEL_LISTED)
assert run_addons_linter_mock.call_count == 1
@mock.patch('olympia.devhub.tasks.parse_addon')
@mock.patch('olympia.devhub.tasks.run_addons_linter')
def test_invalid_json_manifest_error(
self, run_addons_linter_mock, parse_addon_mock):
parse_addon_mock.side_effect = NoManifestFound(message=u'Fôo')
# When parse_addon() raises a InvalidManifest error, we should
# still call the linter to let it raise the appropriate error message.
tasks.validate_file_path(
get_addon_file('invalid_manifest_webextension.xpi'),
channel=amo.RELEASE_CHANNEL_LISTED)
assert run_addons_linter_mock.call_count == 1
class TestWebextensionIncompatibilities(UploadTest, ValidatorTestCase):
fixtures = ['base/addon_3615']
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
# valid_webextension.xpi has version 1.0 so mock the original version
self.addon.update(guid='[email protected]')
self.addon.current_version.update(version='0.9')
self.update_files(
version=self.addon.current_version,
filename='delicious_bookmarks-2.1.106-fx.xpi')
def update_files(self, **kw):
for version in self.addon.versions.all():
for file in version.files.all():
file.update(**kw)
def test_webextension_no_webext_no_warning(self):
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, addon=self.addon,
version='0.1')
tasks.validate(upload, listed=True)
upload.refresh_from_db()
validation = upload.processed_validation
expected = ['validation', 'messages', 'webext_upgrade']
assert not any(msg['id'] == expected for msg in validation['messages'])
def test_webextension_cannot_be_downgraded(self):
self.update_files(is_webextension=True)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'legacy_addons_unsupported']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
def test_webextension_downgrade_unlisted_error(self):
self.update_files(is_webextension=True)
self.make_addon_unlisted(self.addon)
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, addon=self.addon)
tasks.validate(upload, listed=False)
upload.refresh_from_db()
expected = ['validation', 'messages', 'legacy_addons_unsupported']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
assert validation['errors'] == 1
def test_webextension_cannot_be_downgraded_ignore_deleted_version(self):
"""Make sure even deleting the previous version does not prevent
the downgrade error."""
file_ = amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi')
self.update_files(is_webextension=True)
deleted_version = version_factory(
addon=self.addon, file_kw={'is_webextension': False})
deleted_version.delete()
upload = self.get_upload(
abspath=file_, with_validation=False, addon=self.addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
expected = ['validation', 'messages', 'legacy_addons_unsupported']
validation = upload.processed_validation
assert validation['messages'][0]['id'] == expected
assert validation['messages'][0]['type'] == 'error'
class TestLegacyAddonRestrictions(UploadTest, ValidatorTestCase):
def test_legacy_submissions_disabled(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
upload = self.get_upload(abspath=file_, with_validation=False)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['messages'][0]['description'] == []
assert not upload.valid
def test_legacy_updates_disabled(self):
file_ = get_addon_file('valid_firefox_addon.xpi')
addon = addon_factory(version_kw={'version': '0.1'})
upload = self.get_upload(
abspath=file_, with_validation=False, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_dictionary_disabled(self):
file_ = get_addon_file('dictionary_targeting_57.xpi')
addon = addon_factory(version_kw={'version': '0.1'},
type=amo.ADDON_DICT)
upload = self.get_upload(
abspath=file_, with_validation=False, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert not upload.valid
def test_submit_legacy_thunderbird_specific_message(self):
# We only show thunderbird/seamonkey specific error message
# if the user submits a thunderbird/seamonkey extension.
file_ = get_addon_file('valid_firefox_and_thunderbird_addon.xpi')
addon = addon_factory(version_kw={'version': '0.0.1'})
upload = self.get_upload(
abspath=file_, with_validation=False, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['messages'][0]['description'] == [
u'Add-ons for Thunderbird and SeaMonkey are now listed and '
'maintained on addons.thunderbird.net. You can use the same '
'account to update your add-ons on the new site.']
assert not upload.valid
def test_submit_legacy_seamonkey_specific_message(self):
# We only show thunderbird/seamonkey specific error message
# if the user submits a thunderbird/seamonkey extension.
file_ = get_addon_file('valid_seamonkey_addon.xpi')
addon = addon_factory(version_kw={'version': '0.0.1'})
upload = self.get_upload(
abspath=file_, with_validation=False, addon=addon)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
expected = ['validation', 'messages', 'legacy_addons_unsupported']
assert upload.processed_validation['messages'][0]['id'] == expected
assert upload.processed_validation['messages'][0]['description'] == [
u'Add-ons for Thunderbird and SeaMonkey are now listed and '
'maintained on addons.thunderbird.net. You can use the same '
'account to update your add-ons on the new site.']
assert not upload.valid
def test_submit_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(abspath=file_, with_validation=False)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_submit_search_plugin(self):
file_ = get_addon_file('searchgeek-20090701.xml')
upload = self.get_upload(abspath=file_, with_validation=False)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert not upload.valid
assert upload.processed_validation['errors'] == 1
assert upload.processed_validation['messages'] == [{
'compatibility_type': None,
'description': [],
'id': ['validation', 'messages', 'opensearch_unsupported'],
'message': (
'Open Search add-ons are <a '
'href="https://blog.mozilla.org/addons/2019/10/15/'
'search-engine-add-ons-to-be-removed-from-addons-mozilla-org/"'
' rel="nofollow">no longer supported on AMO</a>. You can '
'create a <a href="https://developer.mozilla.org/docs/Mozilla'
'/Add-ons/WebExtensions/manifest.json/'
'chrome_settings_overrides" rel="nofollow">search extension '
'instead</a>.'),
'tier': 1,
'type': 'error'}]
@mock.patch('olympia.devhub.tasks.send_html_mail_jinja')
def test_send_welcome_email(send_html_mail_jinja_mock):
tasks.send_welcome_email(3615, ['[email protected]'], {'omg': 'yes'})
send_html_mail_jinja_mock.assert_called_with(
('Mozilla Add-ons: Your add-on has been submitted to'
' addons.mozilla.org!'),
'devhub/emails/submission.html',
'devhub/emails/submission.txt',
{'omg': 'yes'},
recipient_list=['[email protected]'],
from_email=settings.ADDONS_EMAIL,
use_deny_list=False,
perm_setting='individual_contact')
class TestSubmitFile(UploadTest, TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestSubmitFile, self).setUp()
self.addon = Addon.objects.get(pk=3615)
patcher = mock.patch('olympia.devhub.tasks.create_version_for_upload')
self.create_version_for_upload = patcher.start()
self.addCleanup(patcher.stop)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations', True)
def test_file_passed_all_validations(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, addon=self.addon, version='1.0')
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
self.create_version_for_upload.assert_called_with(
self.addon, upload, amo.RELEASE_CHANNEL_LISTED)
@mock.patch('olympia.devhub.tasks.FileUpload.passed_all_validations',
False)
def test_file_not_passed_all_validations(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, addon=self.addon, version='1.0')
tasks.submit_file(self.addon.pk, upload.pk, amo.RELEASE_CHANNEL_LISTED)
assert not self.create_version_for_upload.called
class TestCreateVersionForUpload(UploadTest, TestCase):
fixtures = ['base/addon_3615']
def setUp(self):
super(TestCreateVersionForUpload, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.mocks = {}
for key in ['Version.from_upload', 'parse_addon']:
patcher = mock.patch('olympia.devhub.tasks.%s' % key)
self.mocks[key] = patcher.start()
self.addCleanup(patcher.stop)
self.user = user_factory()
def test_file_passed_all_validations_not_most_recent(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
newer_upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# Check that the older file won't turn into a Version.
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
# But the newer one will.
tasks.create_version_for_upload(self.addon, newer_upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['Version.from_upload'].assert_called_with(
newer_upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_version_exists(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
Version.objects.create(addon=upload.addon, version=upload.version)
# Check that the older file won't turn into a Version.
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent_failed(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
newer_upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
newer_upload.update(created=datetime.today() + timedelta(hours=1),
valid=False,
validation=json.dumps({"errors": 5}))
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
assert not self.mocks['Version.from_upload'].called
def test_file_passed_all_validations_most_recent(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='1.0')
newer_upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon, version='0.5')
newer_upload.update(created=datetime.today() + timedelta(hours=1))
# The Version is created because the newer upload is for a different
# version_string.
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_beta_string(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon,
version='1.0beta1')
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
def test_file_passed_all_validations_no_version(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, user=self.user, addon=self.addon,
version=None)
tasks.create_version_for_upload(self.addon, upload,
amo.RELEASE_CHANNEL_LISTED)
self.mocks['parse_addon'].assert_called_with(
upload, self.addon, user=self.user)
self.mocks['Version.from_upload'].assert_called_with(
upload, self.addon, [amo.FIREFOX.id, amo.ANDROID.id],
amo.RELEASE_CHANNEL_LISTED,
parsed_data=self.mocks['parse_addon'].return_value)
class TestAPIKeyInSubmission(UploadTest, TestCase):
def setUp(self):
self.user = user_factory()
s = '656b16a8ab71686fcfcd04d574bc28be9a1d8252141f54cfb5041709262b84f4'
self.key = APIKey.objects.create(
user=self.user,
type=SYMMETRIC_JWT_TYPE,
key='user:12345:678',
secret=s)
self.addon = addon_factory(users=[self.user],
version_kw={'version': '0.1'},
file_kw={'is_webextension': True})
self.file = get_addon_file('webextension_containing_api_key.xpi')
def test_api_key_in_new_submission_is_found(self):
upload = self.get_upload(
abspath=self.file, with_validation=False, addon=self.addon,
user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_in_submission_is_found(self):
upload = self.get_upload(
abspath=self.file, with_validation=False, addon=self.addon,
user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('Your developer API key was found in the submitted '
'file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
assert mail.outbox[0].to[0] == self.user.email
def test_coauthor_api_key_in_submission_is_found(self):
coauthor = user_factory()
AddonUser.objects.create(addon=self.addon, user_id=coauthor.id)
upload = self.get_upload(
abspath=self.file, with_validation=False, addon=self.addon,
user=coauthor)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 1
messages = upload.processed_validation['messages']
assert len(messages) == 1
assert messages[0]['id'] == [
u'validation', u'messages', u'api_key_detected']
assert ('The developer API key of a coauthor was found in the '
'submitted file.' in messages[0]['message'])
assert not upload.valid
# If the key has been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
assert len(mail.outbox) == 1
assert ('Your AMO API credentials have been revoked'
in mail.outbox[0].subject)
assert ('never share your credentials' in mail.outbox[0].body)
# We submit as the coauthor, the leaked key is the one from 'self.user'
assert mail.outbox[0].to[0] == self.user.email
def test_api_key_already_revoked_by_developer(self):
self.key.update(is_active=None)
tasks.revoke_api_key(self.key.id)
# If the key has already been revoked, there is no active key,
# so `get_jwt_key` raises `DoesNotExist`.
with pytest.raises(APIKey.DoesNotExist):
APIKey.get_jwt_key(user_id=self.user.id)
def test_api_key_already_regenerated_by_developer(self):
self.key.update(is_active=None)
current_key = APIKey.new_jwt_credentials(user=self.user)
tasks.revoke_api_key(self.key.id)
key_from_db = APIKey.get_jwt_key(user_id=self.user.id)
assert current_key.key == key_from_db.key
assert current_key.secret == key_from_db.secret
def test_revoke_task_is_called(self):
mock_str = 'olympia.devhub.tasks.revoke_api_key'
wrapped = tasks.revoke_api_key
with mock.patch(mock_str, wraps=wrapped) as mock_revoke:
upload = self.get_upload(
abspath=self.file, with_validation=False, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
mock_revoke.apply_async.assert_called_with(
kwargs={'key_id': self.key.id}, countdown=120)
assert not upload.valid
def test_does_not_revoke_for_different_author(self):
different_author = user_factory()
upload = self.get_upload(
abspath=self.file, with_validation=False, user=different_author)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.valid
def test_does_not_revoke_safe_webextension(self):
file_ = get_addon_file('valid_webextension.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_binary_content(self):
file_ = get_addon_file('webextension_containing_binary_files.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
assert upload.processed_validation['errors'] == 0
assert upload.processed_validation['messages'] == []
assert upload.valid
def test_validation_finishes_if_containing_invalid_filename(self):
file_ = get_addon_file('invalid_webextension.xpi')
upload = self.get_upload(
abspath=file_, with_validation=False, user=self.user)
tasks.validate(upload, listed=True)
upload.refresh_from_db()
# https://github.com/mozilla/addons-server/issues/8208
# causes this to be 1 (and invalid) instead of 0 (and valid).
# The invalid filename error is caught and raised outside of this
# validation task.
assert upload.processed_validation['errors'] == 1
assert not upload.valid
class TestValidationTask(TestCase):
def setUp(self):
TestValidationTask.fake_task_has_been_called = False
@tasks.validation_task
def fake_task(results, pk):
TestValidationTask.fake_task_has_been_called = True
return {**results, 'fake_task_results': 1}
def test_returns_validator_results_when_received_results_is_none(self):
results = self.fake_task(None, 123)
assert not self.fake_task_has_been_called
assert results == amo.VALIDATOR_SKELETON_EXCEPTION_WEBEXT
def test_returns_results_when_received_results_have_errors(self):
results = {'errors': 1}
returned_results = self.fake_task(results, 123)
assert not self.fake_task_has_been_called
assert results == returned_results
def test_runs_wrapped_task(self):
results = {'errors': 0}
returned_results = self.fake_task(results, 123)
assert TestValidationTask.fake_task_has_been_called
assert results != returned_results
assert 'fake_task_results' in returned_results
class TestForwardLinterResults(TestCase):
def test_returns_received_results(self):
results = {'errors': 1}
returned_results = tasks.forward_linter_results(results, 123)
assert results == returned_results
| bsd-3-clause |
hgl888/web-testing-service | wts/tests/csp/csp_frame-src_cross-origin_multi_allowed_one-manual.py | 30 | 2514 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "frame-src " + url1 + " " + url2
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_frame-src_cross-origin_allowed_one</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#frame-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is a filled green square.</p>
<iframe frameborder="no" border="0" src='""" + url1 + """/tests/csp/support/green-100x100.png'/>
</body>
</html> """
| bsd-3-clause |
dlakata/flask-security | tests/test_registerable.py | 18 | 4132 | # -*- coding: utf-8 -*-
"""
test_registerable
~~~~~~~~~~~~~~~~~
Registerable tests
"""
import pytest
from flask import Flask
from flask_security.core import UserMixin
from flask_security.signals import user_registered
from utils import authenticate, logout
pytestmark = pytest.mark.registerable()
@pytest.mark.settings(post_register_view='/post_register')
def test_registerable_flag(client, app, get_message):
recorded = []
# Test the register view
response = client.get('/register')
assert b"<h1>Register</h1>" in response.data
# Test registering is successful, sends email, and fires signal
@user_registered.connect_via(app)
def on_user_registerd(app, user, confirm_token):
assert isinstance(app, Flask)
assert isinstance(user, UserMixin)
assert confirm_token is None
recorded.append(user)
data = dict(
email='[email protected]', password='password', password_confirm='password',
next=''
)
with app.mail.record_messages() as outbox:
response = client.post('/register', data=data, follow_redirects=True)
assert len(recorded) == 1
assert len(outbox) == 1
assert b'Post Register' in response.data
logout(client)
# Test user can login after registering
response = authenticate(client, email='[email protected]', password='password')
assert response.status_code == 302
logout(client)
# Test registering with an existing email
data = dict(
email='[email protected]', password='password', password_confirm='password',
next=''
)
response = client.post('/register', data=data, follow_redirects=True)
assert get_message('EMAIL_ALREADY_ASSOCIATED', email='[email protected]') in response.data
# Test registering with an existing email but case insensitive
data = dict(
email='[email protected]', password='password', password_confirm='password',
next=''
)
response = client.post('/register', data=data, follow_redirects=True)
assert get_message('EMAIL_ALREADY_ASSOCIATED', email='[email protected]') in response.data
# Test registering with JSON
data = '{ "email": "[email protected]", "password": "password"}'
response = client.post('/register', data=data, headers={'Content-Type': 'application/json'})
assert response.headers['content-type'] == 'application/json'
assert response.jdata['meta']['code'] == 200
logout(client)
# Test registering with invalid JSON
data = '{ "email": "bogus", "password": "password"}'
response = client.post('/register', data=data, headers={'Content-Type': 'application/json'})
assert response.headers['content-type'] == 'application/json'
assert response.jdata['meta']['code'] == 400
logout(client)
# Test ?next param
data = dict(email='[email protected]',
password='password',
password_confirm='password',
next='')
response = client.post('/register?next=/page1', data=data, follow_redirects=True)
assert b'Page 1' in response.data
@pytest.mark.settings(register_url='/custom_register', post_register_view='/post_register')
def test_custom_register_url(client):
response = client.get('/custom_register')
assert b"<h1>Register</h1>" in response.data
data = dict(email='[email protected]',
password='password',
password_confirm='password',
next='')
response = client.post('/custom_register', data=data, follow_redirects=True)
assert b'Post Register' in response.data
@pytest.mark.settings(register_user_template='custom_security/register_user.html')
def test_custom_register_tempalate(client):
response = client.get('/register')
assert b'CUSTOM REGISTER USER' in response.data
@pytest.mark.settings(send_register_email=False)
def test_disable_register_emails(client, app):
data = dict(
email='[email protected]', password='password', password_confirm='password',
next=''
)
with app.mail.record_messages() as outbox:
client.post('/register', data=data, follow_redirects=True)
assert len(outbox) == 0
| mit |
datalogics/scons | test/scons-time/help/all-subcommands.py | 2 | 2017 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that all subcommands show up in the global help.
This makes sure that each do_*() function attached to the SConsTimer
class has a line in the help string.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
# Compile the scons-time script as a module.
c = compile(test.read(test.program, mode='r'), test.program, 'exec')
# Evaluate the module in a global name space so we can get at SConsTimer.
globals = {}
try: eval(c, globals)
except: pass
# Extract all subcommands from the the do_*() functions.
functions = globals['SConsTimer'].__dict__.keys()
do_funcs = filter(lambda x: x[:3] == 'do_', functions)
subcommands = map(lambda x: x[3:], do_funcs)
expect = map(lambda x: ' %s ' % x, subcommands)
test.run(arguments = 'help')
test.must_contain_all_lines('Standard output', test.stdout(), expect)
test.pass_test()
| mit |
anrl/gini | frontend/src/gbuilder/UI/SendDirectoryWindow.py | 11 | 2028 | """The window to specify which directory to send a file to"""
from PyQt4 import QtCore, QtGui
from Core.globals import options, mainWidgets
class SendDirectoryWindow(QtGui.QDialog):
def __init__(self, parent = None):
"""
Create a send directory window to send a file to the server.
"""
QtGui.QDialog.__init__(self, parent)
self.filename = ""
self.radio1 = QtGui.QRadioButton("bin")
self.radio2 = QtGui.QRadioButton("tmp")
self.radio3 = QtGui.QRadioButton("data")
self.filenameLabel = QtGui.QLabel("")
self.sendButton = QtGui.QPushButton("Send")
self.cancelButton = QtGui.QPushButton("Cancel")
self.choices = [self.radio1, self.radio2, self.radio3]
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addWidget(self.sendButton)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.filenameLabel)
layout.addWidget(self.radio1)
layout.addWidget(self.radio2)
layout.addWidget(self.radio3)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.resize(250, 150)
self.setWindowTitle("Destination Directory")
self.connect(self.sendButton, QtCore.SIGNAL("clicked()"), self.send)
self.connect(self.cancelButton, QtCore.SIGNAL("clicked()"), self.reject)
def setFilename(self, filename):
"""
Set the filename to send to the server.
"""
self.filename = filename
self.filenameLabel.setText(filename)
def send(self):
"""
Send the file to the server.
"""
self.hide()
client = mainWidgets["client"]
if not client:
return
for radio in self.choices:
if radio.isChecked():
client.process("file " + radio.text() + " " + self.filename)
return
| mit |
haniehrajabi/ryu | ryu/controller/ofp_event.py | 33 | 2338 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenFlow event definitions.
"""
import inspect
from ryu.controller import handler
from ryu import ofproto
from ryu import utils
from . import event
class EventOFPMsgBase(event.EventBase):
def __init__(self, msg):
super(EventOFPMsgBase, self).__init__()
self.msg = msg
#
# Create ofp_event type corresponding to OFP Msg
#
_OFP_MSG_EVENTS = {}
def _ofp_msg_name_to_ev_name(msg_name):
return 'Event' + msg_name
def ofp_msg_to_ev(msg):
return ofp_msg_to_ev_cls(msg.__class__)(msg)
def ofp_msg_to_ev_cls(msg_cls):
name = _ofp_msg_name_to_ev_name(msg_cls.__name__)
return _OFP_MSG_EVENTS[name]
def _create_ofp_msg_ev_class(msg_cls):
name = _ofp_msg_name_to_ev_name(msg_cls.__name__)
# print 'creating ofp_event %s' % name
if name in _OFP_MSG_EVENTS:
return
cls = type(name, (EventOFPMsgBase,),
dict(__init__=lambda self, msg:
super(self.__class__, self).__init__(msg)))
globals()[name] = cls
_OFP_MSG_EVENTS[name] = cls
def _create_ofp_msg_ev_from_module(ofp_parser):
# print mod
for _k, cls in inspect.getmembers(ofp_parser, inspect.isclass):
if not hasattr(cls, 'cls_msg_type'):
continue
_create_ofp_msg_ev_class(cls)
for ofp_mods in ofproto.get_ofp_modules().values():
ofp_parser = ofp_mods[1]
# print 'loading module %s' % ofp_parser
_create_ofp_msg_ev_from_module(ofp_parser)
class EventOFPStateChange(event.EventBase):
def __init__(self, dp):
super(EventOFPStateChange, self).__init__()
self.datapath = dp
handler.register_service('ryu.controller.ofp_handler')
| apache-2.0 |
Outernet-Project/librarian-core | librarian_core/contrib/cache/decorators.py | 2 | 2707 | import functools
from bottle import request
from ...utils import is_string
from .utils import generate_key
def cached(prefix='', timeout=None):
"""Decorator that caches return values of functions that it wraps. The
key is generated from the function's name and the parameters passed to
it. E.g.:
@cached(timeout=300) # expires in 5 minutes
def my_func(a, b, c=4):
return (a + b) / c
Cache key in this case is an md5 hash, generated from the combined
values of: function's name("my_func"), and values of `a`, `b` and in
case of keyword arguments both argument name "c" and the value of `c`,
prefix with the value of the `prefix` keyword argument.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not request.app.supervisor.exts.is_installed('cache'):
return func(*args, **kwargs)
backend = request.app.supervisor.exts.cache
generated = generate_key(func.__name__, *args, **kwargs)
parsed_prefix = backend.parse_prefix(prefix)
key = '{0}{1}'.format(parsed_prefix, generated)
value = backend.get(key)
if value is None:
# not found in cache, or is expired, recalculate value
value = func(*args, **kwargs)
expires_in = timeout
if expires_in is None:
expires_in = backend.default_timeout
backend.set(key, value, timeout=expires_in)
return value
return wrapper
return decorator
def invalidates(prefix, before=False, after=False):
"""Decorator that invalidates keys matching the specified prefix(es) before
and/or after invoking the wrapped function."""
def invalidate_prefixes(prefixes):
"""Helper function to call invalidate over a list of prefixes."""
for p in prefixes:
request.app.supervisor.exts.cache.invalidate(prefix=p)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# make sure we're working with a list of prefixes always
prefixes = [prefix] if is_string(prefix) else prefix
if before:
# invalidate cache before invoking wrapped function
invalidate_prefixes(prefixes)
# obtain result of wrapped function
result = func(*args, **kwargs)
if after:
# invalidate cache after invoking wrapped function
invalidate_prefixes(prefixes)
# return result of wrapped function
return result
return wrapper
return decorator
| gpl-3.0 |
aktech/sympy | sympy/physics/mechanics/tests/test_linearize.py | 11 | 12048 | from __future__ import division
import warnings
from sympy import symbols, Matrix, solve, simplify, cos, sin, atan, sqrt
from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point,\
dot, cross, inertia, KanesMethod, Particle, RigidBody, Lagrangian,\
LagrangesMethod
from sympy.utilities.pytest import slow
from sympy.utilities.exceptions import SymPyDeprecationWarning
@slow
def test_linearize_rolling_disc_kane():
# Symbols for time and constant parameters
t, r, m, g, v = symbols('t r m g v')
# Configuration variables and their time derivatives
q1, q2, q3, q4, q5, q6 = q = dynamicsymbols('q1:7')
q1d, q2d, q3d, q4d, q5d, q6d = qd = [qi.diff(t) for qi in q]
# Generalized speeds and their time derivatives
u = dynamicsymbols('u:6')
u1, u2, u3, u4, u5, u6 = u = dynamicsymbols('u1:7')
u1d, u2d, u3d, u4d, u5d, u6d = [ui.diff(t) for ui in u]
# Reference frames
N = ReferenceFrame('N') # Inertial frame
NO = Point('NO') # Inertial origin
A = N.orientnew('A', 'Axis', [q1, N.z]) # Yaw intermediate frame
B = A.orientnew('B', 'Axis', [q2, A.x]) # Lean intermediate frame
C = B.orientnew('C', 'Axis', [q3, B.y]) # Disc fixed frame
CO = NO.locatenew('CO', q4*N.x + q5*N.y + q6*N.z) # Disc center
# Disc angular velocity in N expressed using time derivatives of coordinates
w_c_n_qd = C.ang_vel_in(N)
w_b_n_qd = B.ang_vel_in(N)
# Inertial angular velocity and angular acceleration of disc fixed frame
C.set_ang_vel(N, u1*B.x + u2*B.y + u3*B.z)
# Disc center velocity in N expressed using time derivatives of coordinates
v_co_n_qd = CO.pos_from(NO).dt(N)
# Disc center velocity in N expressed using generalized speeds
CO.set_vel(N, u4*C.x + u5*C.y + u6*C.z)
# Disc Ground Contact Point
P = CO.locatenew('P', r*B.z)
P.v2pt_theory(CO, N, C)
# Configuration constraint
f_c = Matrix([q6 - dot(CO.pos_from(P), N.z)])
# Velocity level constraints
f_v = Matrix([dot(P.vel(N), uv) for uv in C])
# Kinematic differential equations
kindiffs = Matrix([dot(w_c_n_qd - C.ang_vel_in(N), uv) for uv in B] +
[dot(v_co_n_qd - CO.vel(N), uv) for uv in N])
qdots = solve(kindiffs, qd)
# Set angular velocity of remaining frames
B.set_ang_vel(N, w_b_n_qd.subs(qdots))
C.set_ang_acc(N, C.ang_vel_in(N).dt(B) + cross(B.ang_vel_in(N), C.ang_vel_in(N)))
# Active forces
F_CO = m*g*A.z
# Create inertia dyadic of disc C about point CO
I = (m * r**2) / 4
J = (m * r**2) / 2
I_C_CO = inertia(C, I, J, I)
Disc = RigidBody('Disc', CO, C, m, (I_C_CO, CO))
BL = [Disc]
FL = [(CO, F_CO)]
KM = KanesMethod(N, [q1, q2, q3, q4, q5], [u1, u2, u3], kd_eqs=kindiffs,
q_dependent=[q6], configuration_constraints=f_c,
u_dependent=[u4, u5, u6], velocity_constraints=f_v)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
(fr, fr_star) = KM.kanes_equations(FL, BL)
# Test generalized form equations
linearizer = KM.to_linearizer()
assert linearizer.f_c == f_c
assert linearizer.f_v == f_v
assert linearizer.f_a == f_v.diff(t)
sol = solve(linearizer.f_0 + linearizer.f_1, qd)
for qi in qd:
assert sol[qi] == qdots[qi]
assert simplify(linearizer.f_2 + linearizer.f_3 - fr - fr_star) == Matrix([0, 0, 0])
# Perform the linearization
# Precomputed operating point
q_op = {q6: -r*cos(q2)}
u_op = {u1: 0,
u2: sin(q2)*q1d + q3d,
u3: cos(q2)*q1d,
u4: -r*(sin(q2)*q1d + q3d)*cos(q3),
u5: 0,
u6: -r*(sin(q2)*q1d + q3d)*sin(q3)}
qd_op = {q2d: 0,
q4d: -r*(sin(q2)*q1d + q3d)*cos(q1),
q5d: -r*(sin(q2)*q1d + q3d)*sin(q1),
q6d: 0}
ud_op = {u1d: 4*g*sin(q2)/(5*r) + sin(2*q2)*q1d**2/2 + 6*cos(q2)*q1d*q3d/5,
u2d: 0,
u3d: 0,
u4d: r*(sin(q2)*sin(q3)*q1d*q3d + sin(q3)*q3d**2),
u5d: r*(4*g*sin(q2)/(5*r) + sin(2*q2)*q1d**2/2 + 6*cos(q2)*q1d*q3d/5),
u6d: -r*(sin(q2)*cos(q3)*q1d*q3d + cos(q3)*q3d**2)}
A, B = linearizer.linearize(op_point=[q_op, u_op, qd_op, ud_op], A_and_B=True, simplify=True)
upright_nominal = {q1d: 0, q2: 0, m: 1, r: 1, g: 1}
# Precomputed solution
A_sol = Matrix([[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[sin(q1)*q3d, 0, 0, 0, 0, -sin(q1), -cos(q1), 0],
[-cos(q1)*q3d, 0, 0, 0, 0, cos(q1), -sin(q1), 0],
[0, 4/5, 0, 0, 0, 0, 0, 6*q3d/5],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -2*q3d, 0, 0]])
B_sol = Matrix([])
# Check that linearization is correct
assert A.subs(upright_nominal) == A_sol
assert B.subs(upright_nominal) == B_sol
# Check eigenvalues at critical speed are all zero:
assert A.subs(upright_nominal).subs(q3d, 1/sqrt(3)).eigenvals() == {0: 8}
def test_linearize_pendulum_kane_minimal():
q1 = dynamicsymbols('q1') # angle of pendulum
u1 = dynamicsymbols('u1') # Angular velocity
q1d = dynamicsymbols('q1', 1) # Angular velocity
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
A = N.orientnew('A', 'axis', [q1, N.z])
A.set_ang_vel(N, u1*N.z)
# Locate point P relative to the origin N*
P = pN.locatenew('P', L*A.x)
P.v2pt_theory(pN, N, A)
pP = Particle('pP', P, m)
# Create Kinematic Differential Equations
kde = Matrix([q1d - u1])
# Input the force resultant at P
R = m*g*N.x
# Solve for eom with kanes method
KM = KanesMethod(N, q_ind=[q1], u_ind=[u1], kd_eqs=kde)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
(fr, frstar) = KM.kanes_equations([(P, R)], [pP])
# Linearize
A, B, inp_vec = KM.linearize(A_and_B=True, new_method=True, simplify=True)
assert A == Matrix([[0, 1], [-9.8*cos(q1)/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_kane_nonminimal():
# Create generalized coordinates and speeds for this non-minimal realization
# q1, q2 = N.x and N.y coordinates of pendulum
# u1, u2 = N.x and N.y velocities of pendulum
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
u1, u2 = dynamicsymbols('u1:3')
u1d, u2d = dynamicsymbols('u1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
theta1 = atan(q2/q1)
A = N.orientnew('A', 'axis', [theta1, N.z])
# Locate the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
pP = Particle('pP', P, m)
# Calculate the kinematic differential equations
kde = Matrix([q1d - u1,
q2d - u2])
dq_dict = solve(kde, [q1d, q2d])
# Set velocity of point P
P.set_vel(N, P.pos_from(pN).dt(N).subs(dq_dict))
# Configuration constraint is length of pendulum
f_c = Matrix([P.pos_from(pN).magnitude() - L])
# Velocity constraint is that the velocity in the A.x direction is
# always zero (the pendulum is never getting longer).
f_v = Matrix([P.vel(N).express(A).dot(A.x)])
f_v.simplify()
# Acceleration constraints is the time derivative of the velocity constraint
f_a = f_v.diff(t)
f_a.simplify()
# Input the force resultant at P
R = m*g*N.x
# Derive the equations of motion using the KanesMethod class.
KM = KanesMethod(N, q_ind=[q2], u_ind=[u2], q_dependent=[q1],
u_dependent=[u1], configuration_constraints=f_c,
velocity_constraints=f_v, acceleration_constraints=f_a, kd_eqs=kde)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=SymPyDeprecationWarning)
(fr, frstar) = KM.kanes_equations([(P, R)], [pP])
# Set the operating point to be straight down, and non-moving
q_op = {q1: L, q2: 0}
u_op = {u1: 0, u2: 0}
ud_op = {u1d: 0, u2d: 0}
A, B, inp_vec = KM.linearize(op_point=[q_op, u_op, ud_op], A_and_B=True,
new_method=True, simplify=True)
assert A == Matrix([[0, 1], [-9.8/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_lagrange_minimal():
q1 = dynamicsymbols('q1') # angle of pendulum
q1d = dynamicsymbols('q1', 1) # Angular velocity
L, m, t = symbols('L, m, t')
g = 9.8
# Compose world frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
A = N.orientnew('A', 'axis', [q1, N.z])
A.set_ang_vel(N, q1d*N.z)
# Locate point P relative to the origin N*
P = pN.locatenew('P', L*A.x)
P.v2pt_theory(pN, N, A)
pP = Particle('pP', P, m)
# Solve for eom with Lagranges method
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1], forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Linearize
A, B, inp_vec = LM.linearize([q1], [q1d], A_and_B=True)
assert A == Matrix([[0, 1], [-9.8*cos(q1)/L, 0]])
assert B == Matrix([])
def test_linearize_pendulum_lagrange_nonminimal():
q1, q2 = dynamicsymbols('q1:3')
q1d, q2d = dynamicsymbols('q1:3', level=1)
L, m, t = symbols('L, m, t')
g = 9.8
# Compose World Frame
N = ReferenceFrame('N')
pN = Point('N*')
pN.set_vel(N, 0)
# A.x is along the pendulum
theta1 = atan(q2/q1)
A = N.orientnew('A', 'axis', [theta1, N.z])
# Create point P, the pendulum mass
P = pN.locatenew('P1', q1*N.x + q2*N.y)
P.set_vel(N, P.pos_from(pN).dt(N))
pP = Particle('pP', P, m)
# Constraint Equations
f_c = Matrix([q1**2 + q2**2 - L**2])
# Calculate the lagrangian, and form the equations of motion
Lag = Lagrangian(N, pP)
LM = LagrangesMethod(Lag, [q1, q2], hol_coneqs=f_c, forcelist=[(P, m*g*N.x)], frame=N)
LM.form_lagranges_equations()
# Compose operating point
op_point = {q1: L, q2: 0, q1d: 0, q2d: 0, q1d.diff(t): 0, q2d.diff(t): 0}
# Solve for multiplier operating point
lam_op = LM.solve_multipliers(op_point=op_point)
op_point.update(lam_op)
# Perform the Linearization
A, B, inp_vec = LM.linearize([q2], [q2d], [q1], [q1d],
op_point=op_point, A_and_B=True)
assert A == Matrix([[0, 1], [-9.8/L, 0]])
assert B == Matrix([])
def test_linearize_rolling_disc_lagrange():
q1, q2, q3 = q = dynamicsymbols('q1 q2 q3')
q1d, q2d, q3d = qd = dynamicsymbols('q1 q2 q3', 1)
r, m, g = symbols('r m g')
N = ReferenceFrame('N')
Y = N.orientnew('Y', 'Axis', [q1, N.z])
L = Y.orientnew('L', 'Axis', [q2, Y.x])
R = L.orientnew('R', 'Axis', [q3, L.y])
C = Point('C')
C.set_vel(N, 0)
Dmc = C.locatenew('Dmc', r * L.z)
Dmc.v2pt_theory(C, N, R)
I = inertia(L, m / 4 * r**2, m / 2 * r**2, m / 4 * r**2)
BodyD = RigidBody('BodyD', Dmc, R, m, (I, Dmc))
BodyD.potential_energy = - m * g * r * cos(q2)
Lag = Lagrangian(N, BodyD)
l = LagrangesMethod(Lag, q)
l.form_lagranges_equations()
# Linearize about steady-state upright rolling
op_point = {q1: 0, q2: 0, q3: 0,
q1d: 0, q2d: 0,
q1d.diff(): 0, q2d.diff(): 0, q3d.diff(): 0}
A = l.linearize(q_ind=q, qd_ind=qd, op_point=op_point, A_and_B=True)[0]
sol = Matrix([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, -6*q3d, 0],
[0, -4*g/(5*r), 0, 6*q3d/5, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert A == sol
| bsd-3-clause |
fernandezcuesta/ansible | lib/ansible/modules/packaging/os/opkg.py | 7 | 5205 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Pelletier <[email protected]>
# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: opkg
author: "Patrick Pelletier (@skinp)"
short_description: Package manager for OpenWrt
description:
- Manages OpenWrt packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
force:
description:
- opkg --force parameter used
choices:
- ""
- "depends"
- "maintainer"
- "reinstall"
- "overwrite"
- "downgrade"
- "space"
- "postinstall"
- "remove"
- "checksum"
- "removal-of-dependent-packages"
required: false
default: absent
version_added: "2.0"
update_cache:
description:
- update the package db first
required: false
default: "no"
choices: [ "yes", "no" ]
notes: []
requirements:
- opkg
- python
'''
EXAMPLES = '''
- opkg:
name: foo
state: present
- opkg:
name: foo
state: present
update_cache: yes
- opkg:
name: foo
state: absent
- opkg:
name: foo,bar
state: absent
- opkg:
name: foo
state: present
force: overwrite
'''
import pipes
def update_package_db(module, opkg_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s update" % opkg_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, opkg_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, opkg_path, packages):
""" Uninstalls one or more packages if installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
if query_package(module, opkg_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, opkg_path, packages):
""" Installs one or more packages if not already installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
install_c = 0
for package in packages:
if query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
if not query_package(module, opkg_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["pkg"], required=True),
state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
"checksum", "removal-of-dependent-packages"]),
update_cache=dict(default="no", aliases=["update-cache"], type='bool')
)
)
opkg_path = module.get_bin_path('opkg', True, ['/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, opkg_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, opkg_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, opkg_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sergiosvieira/ns3-dtn | src/visualizer/visualizer/plugins/show_last_packets.py | 182 | 9460 | import gobject
import gtk
import ns.core
import ns.network
import ns.visualizer
from visualizer.base import InformationWindow
from visualizer.higcontainer import HIGContainer
from kiwi.ui.objectlist import ObjectList, Column
class ShowLastPackets(InformationWindow):
class PacketList(gtk.ScrolledWindow):
(
COLUMN_TIME,
COLUMN_INTERFACE,
COLUMN_SIZE,
COLUMN_CONTENTS,
) = range(4)
def __init__(self):
super(ShowLastPackets.PacketList, self).__init__()
self.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
self.table_model = gtk.ListStore(*([str]*4))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Time", self.COLUMN_TIME)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Size", self.COLUMN_SIZE)
add_column("Contents", self.COLUMN_CONTENTS)
def update(self, node, packet_list):
self.table_model.clear()
for sample in packet_list:
tree_iter = self.table_model.append()
if sample.device is None:
interface_name = "(unknown)"
else:
interface_name = ns.core.Names.FindName(sample.device)
if not interface_name:
interface_name = "(interface %i)" % sample.device.GetIfIndex()
self.table_model.set(tree_iter,
self.COLUMN_TIME, str(sample.time.GetSeconds()),
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_SIZE, str(sample.packet.GetSize ()),
self.COLUMN_CONTENTS, str(sample.packet)
)
def __init__(self, visualizer, node_index):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Last packets for node %i" % node_index)
self.visualizer = visualizer
self.viz_node = visualizer.get_node(node_index)
self.node = ns.network.NodeList.GetNode(node_index)
def smart_expand(expander, vbox):
if expander.get_expanded():
vbox.set_child_packing(expander, expand=True, fill=True, padding=0, pack_type=gtk.PACK_START)
else:
vbox.set_child_packing(expander, expand=False, fill=False, padding=0, pack_type=gtk.PACK_START)
main_hbox = gtk.HBox(False, 4)
main_hbox.show()
main_vbox = gtk.VBox(False, 4)
main_vbox.show()
self.win.vbox.add(main_hbox)
main_hbox.add(main_vbox)
self.tx_list = self.PacketList()
self.tx_list.show()
group = gtk.Expander("Last transmitted packets")
group.show()
group.add(self.tx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.rx_list = self.PacketList()
self.rx_list.show()
group = gtk.Expander("Last received packets")
group.show()
group.add(self.rx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.drop_list = self.PacketList()
self.drop_list.show()
group = gtk.Expander("Last dropped packets")
group.show()
group.add(self.drop_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
# Packet Filter
# - options
self.packet_capture_options = ns.visualizer.PyViz.PacketCaptureOptions()
self.packet_capture_options.numLastPackets = 100
packet_filter_vbox = gtk.VBox(False, 4)
packet_filter_vbox.show()
main_hbox.add(packet_filter_vbox)
sel_buttons_box = gtk.HButtonBox()
sel_buttons_box.show()
packet_filter_vbox.pack_start(sel_buttons_box, False, False, 4)
select_all_button = gobject.new(gtk.Button, label="Sel. All", visible=True)
select_none_button = gobject.new(gtk.Button, label="Sel. None", visible=True)
sel_buttons_box.add(select_all_button)
sel_buttons_box.add(select_none_button)
self.packet_filter_widget = ObjectList([
Column('selected', title="Sel.", data_type=bool, editable=True),
Column('name', title="Header"),
], sortable=True)
self.packet_filter_widget.show()
packet_filter_vbox.pack_start(self.packet_filter_widget, True, True, 4)
class TypeIdConfig(object):
__slots__ = ['name', 'selected', 'typeid']
self.packet_filter_list = [] # list of TypeIdConfig instances
Header = ns.core.TypeId.LookupByName("ns3::Header")
Trailer = ns.core.TypeId.LookupByName("ns3::Trailer")
for typeid_i in range(ns.core.TypeId.GetRegisteredN()):
typeid = ns.core.TypeId.GetRegistered(typeid_i)
# check if this is a header or trailer subtype
typeid_tmp = typeid
type_is_good = False
while 1:
if typeid_tmp == Header or typeid_tmp == Trailer:
type_is_good = True
break
if typeid_tmp.HasParent():
typeid_tmp = typeid_tmp.GetParent()
else:
break
if not type_is_good:
continue
if typeid in [Header, Trailer]:
continue
c = TypeIdConfig()
c.selected = True
c.name = typeid.GetName()
c.typeid = typeid
self.packet_filter_list.append(c)
self.packet_filter_widget.add_list(self.packet_filter_list)
def update_capture_options():
if self.op_AND_button.props.active:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_AND
else:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_OR
self.packet_capture_options.numLastPackets = 100
self.packet_capture_options.headers = [c.typeid for c in self.packet_filter_list if c.selected]
self.visualizer.simulation.lock.acquire()
try:
self.visualizer.simulation.sim_helper.SetPacketCaptureOptions(
self.node.GetId(), self.packet_capture_options)
finally:
self.visualizer.simulation.lock.release()
def sel_all_cb(bt):
for c in self.packet_filter_list:
c.selected = True
self.packet_filter_widget.refresh()
update_capture_options()
def sel_none_cb(bt):
for c in self.packet_filter_list:
c.selected = False
self.packet_filter_widget.refresh()
update_capture_options()
select_all_button.connect("clicked", sel_all_cb)
select_none_button.connect("clicked", sel_none_cb)
op_buttons_box = gtk.HButtonBox()
op_buttons_box.show()
packet_filter_vbox.pack_start(op_buttons_box, False, False, 4)
self.op_AND_button = gobject.new(gtk.RadioButton, label="AND", visible=True)
self.op_OR_button = gobject.new(gtk.RadioButton, label="OR", visible=True, group=self.op_AND_button)
op_buttons_box.add(self.op_AND_button)
op_buttons_box.add(self.op_OR_button)
self.op_OR_button.props.active = True
self.op_AND_button.connect("toggled", lambda b: update_capture_options())
def cell_edited(l, obj, attribute):
update_capture_options()
self.packet_filter_widget.connect("cell-edited", cell_edited)
update_capture_options()
self.visualizer.add_information_window(self)
self.win.set_default_size(600, 300)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
last_packets = self.visualizer.simulation.sim_helper.GetLastPackets(self.node.GetId())
self.tx_list.update(self.node, last_packets.lastTransmittedPackets)
self.rx_list.update(self.node, last_packets.lastReceivedPackets)
self.drop_list.update(self.node, last_packets.lastDroppedPackets)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Show Last Packets")
menu_item.show()
def _show_it(dummy_menu_item):
ShowLastPackets(viz, node.node_index)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
| gpl-2.0 |
daenamkim/ansible | lib/ansible/modules/monitoring/sensu_client.py | 49 | 9532 | #!/usr/bin/python
# (c) 2017, Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_client
author: "David Moreau Simard (@dmsimard)"
short_description: Manages Sensu client configuration
version_added: 2.4
description:
- Manages Sensu client configuration.
- 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
options:
state:
description:
- Whether the client should be present or not
choices: [ 'present', 'absent' ]
required: False
default: present
name:
description:
- A unique name for the client. The name cannot contain special characters or spaces.
required: False
default: System hostname as determined by Ruby Socket.gethostname (provided by Sensu)
address:
description:
- An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
required: False
default: Non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu)
subscriptions:
description:
- An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
- These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
- The subscriptions array items must be strings.
required: True
default: null
safe_mode:
description:
- If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
choices: [ 'true', 'false' ]
required: False
default: false
redact:
description:
- Client definition attributes to redact (values) when logging and sending client keepalives.
required: False
default: null
socket:
description:
- The socket definition scope, used to configure the Sensu client socket.
required: False
default: null
keepalives:
description:
- If Sensu should monitor keepalives for this client.
choices: [ 'true', 'false' ]
required: False
default: true
keepalive:
description:
- The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
required: False
default: null
registration:
description:
- The registration definition scope, used to configure Sensu registration event handlers.
required: False
default: null
deregister:
description:
- If a deregistration event should be created upon Sensu client process stop.
choices: [ 'true', 'false' ]
required: False
default: false
deregistration:
description:
- The deregistration definition scope, used to configure automated Sensu client de-registration.
required: False
default: null
ec2:
description:
- The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
required: False
default: null
chef:
description:
- The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
required: False
default: null
puppet:
description:
- The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
required: False
default: null
servicenow:
description:
- The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
required: False
default: null
notes:
- Check mode is supported
requirements: [ ]
'''
EXAMPLES = '''
# Minimum possible configuration
- name: Configure Sensu client
sensu_client:
subscriptions:
- default
# With customization
- name: Configure Sensu client
sensu_client:
name: "{{ ansible_fqdn }}"
address: "{{ ansible_default_ipv4['address'] }}"
subscriptions:
- default
- webserver
redact:
- password
socket:
bind: 127.0.0.1
port: 3030
keepalive:
thresholds:
warning: 180
critical: 300
handlers:
- email
custom:
- broadcast: irc
occurrences: 3
register: client
notify:
- Restart sensu-client
- name: Secure Sensu client configuration file
file:
path: "{{ client['file'] }}"
owner: "sensu"
group: "sensu"
mode: "0600"
- name: Delete the Sensu client configuration
sensu_client:
state: "absent"
'''
RETURN = '''
config:
description: Effective client configuration, when state is present
returned: success
type: dict
sample: {'name': 'client', 'subscriptions': ['default']}
file:
description: Path to the client configuration file
returned: success
type: string
sample: "/etc/sensu/conf.d/client.json"
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
name=dict(type='str', required=False),
address=dict(type='str', required=False),
subscriptions=dict(type='list', required=False),
safe_mode=dict(type='bool', required=False, default=False),
redact=dict(type='list', required=False),
socket=dict(type='dict', required=False),
keepalives=dict(type='bool', required=False, default=True),
keepalive=dict(type='dict', required=False),
registration=dict(type='dict', required=False),
deregister=dict(type='bool', required=False),
deregistration=dict(type='dict', required=False),
ec2=dict(type='dict', required=False),
chef=dict(type='dict', required=False),
puppet=dict(type='dict', required=False),
servicenow=dict(type='dict', required=False)
),
required_if=[
['state', 'present', ['subscriptions']]
]
)
state = module.params['state']
path = "/etc/sensu/conf.d/client.json"
if state == 'absent':
if os.path.exists(path):
if module.check_mode:
msg = '{path} would have been deleted'.format(path=path)
module.exit_json(msg=msg, changed=True)
else:
try:
os.remove(path)
msg = '{path} deleted successfully'.format(path=path)
module.exit_json(msg=msg, changed=True)
except OSError as e:
msg = 'Exception when trying to delete {path}: {exception}'
module.fail_json(
msg=msg.format(path=path, exception=str(e)))
else:
# Idempotency: it's okay if the file doesn't exist
msg = '{path} already does not exist'.format(path=path)
module.exit_json(msg=msg)
# Build client configuration from module arguments
config = {'client': {}}
args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
for arg in args:
if arg in module.params and module.params[arg] is not None:
config['client'][arg] = module.params[arg]
# Load the current config, if there is one, so we can compare
current_config = None
try:
current_config = json.load(open(path, 'r'))
except (IOError, ValueError):
# File either doesn't exist or it's invalid JSON
pass
if current_config is not None and current_config == config:
# Config is the same, let's not change anything
module.exit_json(msg='Client configuration is already up to date',
config=config['client'],
file=path)
# Validate that directory exists before trying to write to it
if not module.check_mode and not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as e:
module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
str(e)))
if module.check_mode:
module.exit_json(msg='Client configuration would have been updated',
changed=True,
config=config['client'],
file=path)
try:
with open(path, 'w') as client:
client.write(json.dumps(config, indent=4))
module.exit_json(msg='Client configuration updated',
changed=True,
config=config['client'],
file=path)
except (OSError, IOError) as e:
module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
str(e)))
if __name__ == '__main__':
main()
| gpl-3.0 |
gaddman/ansible | lib/ansible/module_utils/ovirt.py | 7 | 28387 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import inspect
import os
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime
from distutils.version import LooseVersion
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.common._collections_compat import Mapping
try:
from enum import Enum # enum is a ovirtsdk4 requirement
import ovirtsdk4 as sdk
import ovirtsdk4.version as sdk_version
import ovirtsdk4.types as otypes
HAS_SDK = LooseVersion(sdk_version.VERSION) >= LooseVersion('4.2.4')
except ImportError:
HAS_SDK = False
BYTES_MAP = {
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
}
def check_sdk(module):
if not HAS_SDK:
module.fail_json(
msg='ovirtsdk4 version 4.2.4 or higher is required for this module'
)
def get_dict_of_struct(struct, connection=None, fetch_nested=False, attributes=None):
"""
Convert SDK Struct type into dictionary.
"""
res = {}
def remove_underscore(val):
if val.startswith('_'):
val = val[1:]
remove_underscore(val)
return val
def convert_value(value):
nested = False
if isinstance(value, sdk.Struct):
if not fetch_nested or not value.href:
return get_dict_of_struct(value)
# Fetch nested values of struct:
try:
value = connection.follow_link(value)
except sdk.Error:
value = None
nested_obj = dict(
(attr, convert_value(getattr(value, attr)))
for attr in attributes if getattr(value, attr, None)
)
nested_obj['id'] = getattr(value, 'id', None)
nested_obj['href'] = getattr(value, 'href', None)
return nested_obj
elif isinstance(value, Enum) or isinstance(value, datetime):
return str(value)
elif isinstance(value, list) or isinstance(value, sdk.List):
if isinstance(value, sdk.List) and fetch_nested and value.href:
try:
value = connection.follow_link(value)
nested = True
except sdk.Error:
value = []
ret = []
for i in value:
if isinstance(i, sdk.Struct):
if not nested:
ret.append(get_dict_of_struct(i))
else:
nested_obj = dict(
(attr, convert_value(getattr(i, attr)))
for attr in attributes if getattr(i, attr, None)
)
nested_obj['id'] = getattr(i, 'id', None)
ret.append(nested_obj)
elif isinstance(i, Enum):
ret.append(str(i))
else:
ret.append(i)
return ret
else:
return value
if struct is not None:
for key, value in struct.__dict__.items():
if value is None:
continue
key = remove_underscore(key)
res[key] = convert_value(value)
return res
def engine_version(connection):
"""
Return string representation of oVirt engine version.
"""
engine_api = connection.system_service().get()
engine_version = engine_api.product_info.version
return '%s.%s' % (engine_version.major, engine_version.minor)
def create_connection(auth):
"""
Create a connection to Python SDK, from task `auth` parameter.
If user doesnt't have SSO token the `auth` dictionary has following parameters mandatory:
url, username, password
If user has SSO token the `auth` dictionary has following parameters mandatory:
url, token
The `ca_file` parameter is mandatory in case user want to use secure connection,
in case user want to use insecure connection, it's mandatory to send insecure=True.
:param auth: dictionary which contains needed values for connection creation
:return: Python SDK connection
"""
url = auth.get('url')
if url is None and auth.get('hostname') is not None:
url = 'https://{0}/ovirt-engine/api'.format(auth.get('hostname'))
return sdk.Connection(
url=url,
username=auth.get('username'),
password=auth.get('password'),
ca_file=auth.get('ca_file', None),
insecure=auth.get('insecure', False),
token=auth.get('token', None),
kerberos=auth.get('kerberos', None),
headers=auth.get('headers', None),
)
def convert_to_bytes(param):
"""
This method convert units to bytes, which follow IEC standard.
:param param: value to be converted
"""
if param is None:
return None
# Get rid of whitespaces:
param = ''.join(param.split())
# Convert to bytes:
if param[-3].lower() in ['k', 'm', 'g', 't', 'p']:
return int(param[:-3]) * BYTES_MAP.get(param[-3:].lower(), 1)
elif param.isdigit():
return int(param) * 2**10
else:
raise ValueError(
"Unsupported value(IEC supported): '{value}'".format(value=param)
)
def follow_link(connection, link):
"""
This method returns the entity of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: entity which link points to
"""
if link:
return connection.follow_link(link)
else:
return None
def get_link_name(connection, link):
"""
This method returns the name of the element which link points to.
:param connection: connection to the Python SDK
:param link: link of the entity
:return: name of the entity, which link points to
"""
if link:
return connection.follow_link(link).name
else:
return None
def equal(param1, param2, ignore_case=False):
"""
Compare two parameters and return if they are equal.
This parameter doesn't run equal operation if first parameter is None.
With this approach we don't run equal operation in case user don't
specify parameter in their task.
:param param1: user inputted parameter
:param param2: value of entity parameter
:return: True if parameters are equal or first parameter is None, otherwise False
"""
if param1 is not None:
if ignore_case:
return param1.lower() == param2.lower()
return param1 == param2
return True
def search_by_attributes(service, list_params=None, **kwargs):
"""
Search for the entity by attributes. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by specified attributes.
"""
list_params = list_params or {}
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search=' and '.join('{0}={1}'.format(k, v) for k, v in kwargs.items()),
**list_params
)
else:
res = [
e for e in service.list(**list_params) if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def search_by_name(service, name, **kwargs):
"""
Search for the entity by its name. Nested entities don't support search
via REST, so in case using search for nested entity we return all entities
and filter them by name.
:param service: service of the entity
:param name: name of the entity
:return: Entity object returned by Python SDK
"""
# Check if 'list' method support search(look for search parameter):
if 'search' in inspect.getargspec(service.list)[0]:
res = service.list(
search="name={name}".format(name=name)
)
else:
res = [e for e in service.list() if e.name == name]
if kwargs:
res = [
e for e in service.list() if len([
k for k, v in kwargs.items() if getattr(e, k, None) == v
]) == len(kwargs)
]
res = res or [None]
return res[0]
def get_entity(service, get_params=None):
"""
Ignore SDK Error in case of getting an entity from service.
"""
entity = None
try:
if get_params is not None:
entity = service.get(**get_params)
else:
entity = service.get()
except sdk.Error:
# We can get here 404, we should ignore it, in case
# of removing entity for example.
pass
return entity
def get_id_by_name(service, name, raise_error=True, ignore_case=False):
"""
Search an entity ID by it's name.
"""
entity = search_by_name(service, name)
if entity is not None:
return entity.id
if raise_error:
raise Exception("Entity '%s' was not found." % name)
def wait(
service,
condition,
fail_condition=lambda e: False,
timeout=180,
wait=True,
poll_interval=3,
):
"""
Wait until entity fulfill expected condition.
:param service: service of the entity
:param condition: condition to be fulfilled
:param fail_condition: if this condition is true, raise Exception
:param timeout: max time to wait in seconds
:param wait: if True wait for condition, if False don't wait
:param poll_interval: Number of seconds we should wait until next condition check
"""
# Wait until the desired state of the entity:
if wait:
start = time.time()
while time.time() < start + timeout:
# Exit if the condition of entity is valid:
entity = get_entity(service)
if condition(entity):
return
elif fail_condition(entity):
raise Exception("Error while waiting on result state of the entity.")
# Sleep for `poll_interval` seconds if none of the conditions apply:
time.sleep(float(poll_interval))
raise Exception("Timeout exceed while waiting on result state of the entity.")
def __get_auth_dict():
OVIRT_URL = os.environ.get('OVIRT_URL')
OVIRT_HOSTNAME = os.environ.get('OVIRT_HOSTNAME')
OVIRT_USERNAME = os.environ.get('OVIRT_USERNAME')
OVIRT_PASSWORD = os.environ.get('OVIRT_PASSWORD')
OVIRT_TOKEN = os.environ.get('OVIRT_TOKEN')
OVIRT_CAFILE = os.environ.get('OVIRT_CAFILE')
OVIRT_INSECURE = OVIRT_CAFILE is None
env_vars = None
if OVIRT_URL is None and OVIRT_HOSTNAME is not None:
OVIRT_URL = 'https://{0}/ovirt-engine/api'.format(OVIRT_HOSTNAME)
if OVIRT_URL and ((OVIRT_USERNAME and OVIRT_PASSWORD) or OVIRT_TOKEN):
env_vars = {
'url': OVIRT_URL,
'username': OVIRT_USERNAME,
'password': OVIRT_PASSWORD,
'insecure': OVIRT_INSECURE,
'token': OVIRT_TOKEN,
'ca_file': OVIRT_CAFILE,
}
if env_vars is not None:
auth = dict(default=env_vars, type='dict')
else:
auth = dict(required=True, type='dict')
return auth
def ovirt_facts_full_argument_spec(**kwargs):
"""
Extend parameters of facts module with parameters which are common to all
oVirt facts modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def ovirt_full_argument_spec(**kwargs):
"""
Extend parameters of module with parameters which are common to all oVirt modules.
:param kwargs: kwargs to be extended
:return: extended dictionary with common parameters
"""
spec = dict(
auth=__get_auth_dict(),
timeout=dict(default=180, type='int'),
wait=dict(default=True, type='bool'),
poll_interval=dict(default=3, type='int'),
fetch_nested=dict(default=False, type='bool'),
nested_attributes=dict(type='list', default=list()),
)
spec.update(kwargs)
return spec
def check_params(module):
"""
Most modules must have either `name` or `id` specified.
"""
if module.params.get('name') is None and module.params.get('id') is None:
module.fail_json(msg='"name" or "id" is required')
def engine_supported(connection, version):
return LooseVersion(engine_version(connection)) >= LooseVersion(version)
def check_support(version, connection, module, params):
"""
Check if parameters used by user are supported by oVirt Python SDK
and oVirt engine.
"""
api_version = LooseVersion(engine_version(connection))
version = LooseVersion(version)
for param in params:
if module.params.get(param) is not None:
return LooseVersion(sdk_version.VERSION) >= version and api_version >= version
return True
class BaseModule(object):
"""
This is base class for oVirt modules. oVirt modules should inherit this
class and override method to customize specific needs of the module.
The only abstract method of this class is `build_entity`, which must
to be implemented in child class.
"""
__metaclass__ = ABCMeta
def __init__(self, connection, module, service, changed=False):
self._connection = connection
self._module = module
self._service = service
self._changed = changed
self._diff = {'after': dict(), 'before': dict()}
@property
def changed(self):
return self._changed
@changed.setter
def changed(self, changed):
if not self._changed:
self._changed = changed
@abstractmethod
def build_entity(self):
"""
This method should return oVirt Python SDK type, which we want to
create or update, initialized by values passed by Ansible module.
For example if we want to create VM, we will return following:
types.Vm(name=self._module.params['vm_name'])
:return: Specific instance of sdk.Struct.
"""
pass
def param(self, name, default=None):
"""
Return a module parameter specified by it's name.
"""
return self._module.params.get(name, default)
def update_check(self, entity):
"""
This method handle checks whether the entity values are same as values
passed to ansible module. By default we don't compare any values.
:param entity: Entity we want to compare with Ansible module values.
:return: True if values are same, so we don't need to update the entity.
"""
return True
def pre_create(self, entity):
"""
This method is called right before entity is created.
:param entity: Entity to be created or updated.
"""
pass
def post_create(self, entity):
"""
This method is called right after entity is created.
:param entity: Entity which was created.
"""
pass
def post_update(self, entity):
"""
This method is called right after entity is updated.
:param entity: Entity which was updated.
"""
pass
def diff_update(self, after, update):
for k, v in update.items():
if isinstance(v, Mapping):
after[k] = self.diff_update(after.get(k, dict()), v)
else:
after[k] = update[k]
return after
def create(
self,
entity=None,
result_state=None,
fail_condition=lambda e: False,
search_params=None,
update_params=None,
**kwargs
):
"""
Method which is called when state of the entity is 'present'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's updated, whether
the entity should be updated is checked by `update_check` method.
The corresponding updated entity is build by `build_entity` method.
Function executed after entity is created can optionally be specified
in `post_create` parameter. Function executed after entity is updated
can optionally be specified in `post_update` parameter.
:param entity: Entity we want to update, if exists.
:param result_state: State which should entity has in order to finish task.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param search_params: Dictionary of parameters to be used for search.
:param update_params: The params which should be passed to update method.
:param kwargs: Additional parameters passed when creating entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
self.pre_create(entity)
if entity:
# Entity exists, so update it:
entity_service = self._service.service(entity.id)
if not self.update_check(entity):
new_entity = self.build_entity()
if not self._module.check_mode:
update_params = update_params or {}
updated_entity = entity_service.update(
new_entity,
**update_params
)
self.post_update(entity)
# Update diffs only if user specified --diff parameter,
# so we don't useless overload API:
if self._module._diff:
before = get_dict_of_struct(
entity,
self._connection,
fetch_nested=True,
attributes=['name'],
)
after = before.copy()
self.diff_update(after, get_dict_of_struct(new_entity))
self._diff['before'] = before
self._diff['after'] = after
self.changed = True
else:
# Entity don't exists, so create it:
if not self._module.check_mode:
entity = self._service.add(
self.build_entity(),
**kwargs
)
self.post_create(entity)
self.changed = True
if not self._module.check_mode:
# Wait for the entity to be created and to be in the defined state:
entity_service = self._service.service(entity.id)
def state_condition(entity):
return entity
if result_state:
def state_condition(entity):
return entity and entity.status == result_state
wait(
service=entity_service,
condition=state_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': getattr(entity, 'id', None),
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def pre_remove(self, entity):
"""
This method is called right before entity is removed.
:param entity: Entity which we want to remove.
"""
pass
def entity_name(self, entity):
return "{e_type} '{e_name}'".format(
e_type=type(entity).__name__.lower(),
e_name=getattr(entity, 'name', None),
)
def remove(self, entity=None, search_params=None, **kwargs):
"""
Method which is called when state of the entity is 'absent'. If user
don't provide `entity` parameter the entity is searched using
`search_params` parameter. If entity is found it's removed.
Function executed before remove is executed can optionally be specified
in `pre_remove` parameter.
:param entity: Entity we want to remove.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed when removing entity.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
if entity is None:
return {
'changed': self.changed,
'msg': "Entity wasn't found."
}
self.pre_remove(entity)
entity_service = self._service.service(entity.id)
if not self._module.check_mode:
entity_service.remove(**kwargs)
wait(
service=entity_service,
condition=lambda entity: not entity,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
self.changed = True
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
}
def action(
self,
action,
entity=None,
action_condition=lambda e: e,
wait_condition=lambda e: e,
fail_condition=lambda e: False,
pre_action=lambda e: e,
post_action=lambda e: None,
search_params=None,
**kwargs
):
"""
This method is executed when we want to change the state of some oVirt
entity. The action to be executed on oVirt service is specified by
`action` parameter. Whether the action should be executed can be
specified by passing `action_condition` parameter. State which the
entity should be in after execution of the action can be specified
by `wait_condition` parameter.
Function executed before an action on entity can optionally be specified
in `pre_action` parameter. Function executed after an action on entity can
optionally be specified in `post_action` parameter.
:param action: Action which should be executed by service on entity.
:param entity: Entity we want to run action on.
:param action_condition: Function which is executed when checking if action should be executed.
:param fail_condition: Function which checks incorrect state of entity, if it returns `True` Exception is raised.
:param wait_condition: Function which is executed when waiting on result state.
:param pre_action: Function which is executed before running the action.
:param post_action: Function which is executed after running the action.
:param search_params: Dictionary of parameters to be used for search.
:param kwargs: Additional parameters passed to action.
:return: Dictionary with values returned by Ansible module.
"""
if entity is None:
entity = self.search_entity(search_params)
entity = pre_action(entity)
if entity is None:
self._module.fail_json(
msg="Entity not found, can't run action '{0}'.".format(
action
)
)
entity_service = self._service.service(entity.id)
entity = entity_service.get()
if action_condition(entity):
if not self._module.check_mode:
getattr(entity_service, action)(**kwargs)
self.changed = True
post_action(entity)
wait(
service=self._service.service(entity.id),
condition=wait_condition,
fail_condition=fail_condition,
wait=self._module.params['wait'],
timeout=self._module.params['timeout'],
poll_interval=self._module.params['poll_interval'],
)
return {
'changed': self.changed,
'id': entity.id,
type(entity).__name__.lower(): get_dict_of_struct(
struct=entity,
connection=self._connection,
fetch_nested=self._module.params.get('fetch_nested'),
attributes=self._module.params.get('nested_attributes'),
),
'diff': self._diff,
}
def wait_for_import(self, condition=lambda e: True):
if self._module.params['wait']:
start = time.time()
timeout = self._module.params['timeout']
poll_interval = self._module.params['poll_interval']
while time.time() < start + timeout:
entity = self.search_entity()
if entity and condition(entity):
return entity
time.sleep(poll_interval)
def search_entity(self, search_params=None, list_params=None):
"""
Always first try to search by `ID`, if ID isn't specified,
check if user constructed special search in `search_params`,
if not search by `name`.
"""
entity = None
if 'id' in self._module.params and self._module.params['id'] is not None:
entity = get_entity(self._service.service(self._module.params['id']), get_params=list_params)
elif search_params is not None:
entity = search_by_attributes(self._service, list_params=list_params, **search_params)
elif self._module.params.get('name') is not None:
entity = search_by_attributes(self._service, list_params=list_params, name=self._module.params['name'])
return entity
def _get_major(self, full_version):
if full_version is None or full_version == "":
return None
if isinstance(full_version, otypes.Version):
return int(full_version.major)
return int(full_version.split('.')[0])
def _get_minor(self, full_version):
if full_version is None or full_version == "":
return None
if isinstance(full_version, otypes.Version):
return int(full_version.minor)
return int(full_version.split('.')[1])
def _sdk4_error_maybe():
"""
Allow for ovirtsdk4 not being installed.
"""
if HAS_SDK:
return sdk.Error
return type(None)
class OvirtRetry(CloudRetry):
base_class = _sdk4_error_maybe()
@staticmethod
def status_code_from_exception(error):
return error.code
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This is a list of error codes to retry.
retry_on = [
# HTTP status: Conflict
409,
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
return response_code in retry_on
| gpl-3.0 |
RENCI/xDCIShare | hs_model_program/migrations/0004_auto_20151012_1656.py | 3 | 4593 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hs_model_program', '0003_auto_20150813_1730'),
]
operations = [
migrations.RemoveField(
model_name='mpmetadata',
name='date_released',
),
migrations.RemoveField(
model_name='mpmetadata',
name='operating_sys',
),
migrations.RemoveField(
model_name='mpmetadata',
name='program_website',
),
migrations.RemoveField(
model_name='mpmetadata',
name='release_notes',
),
migrations.RemoveField(
model_name='mpmetadata',
name='software_language',
),
migrations.RemoveField(
model_name='mpmetadata',
name='software_repo',
),
migrations.RemoveField(
model_name='mpmetadata',
name='software_version',
),
migrations.RemoveField(
model_name='mpmetadata',
name='source_code',
),
migrations.RemoveField(
model_name='mpmetadata',
name='theoretical_manual',
),
migrations.RemoveField(
model_name='mpmetadata',
name='user_manual',
),
migrations.AddField(
model_name='mpmetadata',
name='modelCodeRepository',
field=models.CharField(default=b'', max_length=255, blank=True, help_text=b'A URL to the source code repository (e.g. git, mecurial, svn)', null=True, verbose_name=b'Software Repository'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelDocumentation',
field=models.CharField(default=b'', choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Documentation for the model (e.g. User manuals, theoretical manuals, reports, notes, etc.)', null=True, verbose_name=b'Model Documentation'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelOperatingSystem',
field=models.CharField(default=b'', max_length=255, blank=True, help_text=b'Compatible operating systems to setup and run the model', null=True, verbose_name=b'Operating System'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelProgramLanguage',
field=models.CharField(default=b'', max_length=100, blank=True, help_text=b'The programming language(s) that the model is written in', null=True, verbose_name=b'Program Language'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelReleaseDate',
field=models.DateTimeField(help_text=b'The date that this version of the model was released', null=True, verbose_name=b'Release Date', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelReleaseNotes',
field=models.CharField(default=b'', choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Notes regarding the software release (e.g. bug fixes, new functionality, readme)', null=True, verbose_name=b'Release Notes'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelSoftware',
field=models.CharField(default=b'', choices=[(b'-', b' ')], max_length=400, blank=True, help_text=b'Uploaded archive containing model software (source code, executable, etc.)', null=True, verbose_name=b'Model Software'),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelVersion',
field=models.CharField(default=b'', max_length=255, blank=True, help_text=b'The software version or build number of the model', null=True, verbose_name=b'Version '),
preserve_default=True,
),
migrations.AddField(
model_name='mpmetadata',
name='modelWebsite',
field=models.CharField(default=b'', max_length=255, blank=True, help_text=b'A URL to the website maintained by the model developers', null=True, verbose_name=b'Website'),
preserve_default=True,
),
]
| bsd-3-clause |
douggeiger/gnuradio | gnuradio-runtime/python/gnuradio/gru/hexint.py | 78 | 1351 | #
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
def hexint(mask):
"""
Convert unsigned masks into signed ints.
This allows us to use hex constants like 0xf0f0f0f2 when talking to
our hardware and not get screwed by them getting treated as python
longs.
"""
if mask >= 2**31:
return int(mask-2**32)
return mask
def hexshort(mask):
"""
Convert unsigned masks into signed shorts.
This allows us to use hex constants like 0x8000 when talking to
our hardware and not get screwed by them getting treated as python
longs.
"""
if mask >= 2**15:
return int(mask-2**16)
return mask
| gpl-3.0 |
satishgoda/learningqt | basics/qmainwindow/jamming.py | 1 | 1233 | from PySide import QtGui
from PySide import QtCore
self = QtGui.QMainWindow()
self.setWindowTitle("Qt MainWindow")
self.show()
menuBar = self.menuBar()
fileMenu = menuBar.addMenu('File')
exitAction = fileMenu.addAction("Exit")
exitAction.triggered.connect(self.close)
dockWidgetWidget = QtGui.QWidget(parent=self)
dockWidgetWidget.setLayout(QtGui.QVBoxLayout())
dockWidgetWidget.layout().addWidget(QtGui.QPushButton("Dock Widget 1"))
dockWidget = QtGui.QDockWidget('Dock Widget 1', self)
dockWidget.setAllowedAreas(QtCore.Qt.LeftDockWidgetArea|QtCore.Qt.RightDockWidgetArea)
dockWidget.setWidget(dockWidgetWidget)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dockWidget)
centralWidget = QtGui.QWidget(parent=self)
centralWidget.setLayout(QtGui.QVBoxLayout())
centralWidget.layout().addWidget(QtGui.QTextEdit())
self.setCentralWidget(centralWidget)
toolbar1 = self.addToolBar("toolbar1")
toolbar1.setToolTip("File tools")
toolbar1.addAction(exitAction)
"""
In [10]: self.actions()
Out[10]: []
In [11]: menuBar.actions()
Out[11]: [<PySide.QtGui.QAction at 0x4565a80>]
In [12]: exitAction
Out[12]: <PySide.QtGui.QAction at 0x4551b70>
In [13]: fileMenu.actions()
Out[13]: [<PySide.QtGui.QAction at 0x4551b70>]
"""
| mit |
Andrerm124/Snapprefs | docs/conf.py | 3 | 9834 | # -*- coding: utf-8 -*-
#
# Snapprefs documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 16 14:42:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Snapprefs'
copyright = u'2016, MARZ'
author = u'Snapprefsteam'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2'
# The full version, including alpha/beta/rc tags.
release = u'2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Snapprefs v2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Snapprefsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Snapprefs.tex', u'Snapprefs Documentation',
u'MARZ', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'snapprefs', u'Snapprefs Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Snapprefs', u'Snapprefs Documentation',
author, 'Snapprefs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-3.0 |
sbesson/zeroc-ice | cs/test/IceSSL/certs/makecerts.py | 1 | 2044 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, shutil
for toplevel in [".", "..", "../..", "../../..", "../../../..", "../../../../.."]:
toplevel = os.path.normpath(toplevel)
if os.path.exists(os.path.join(toplevel, "scripts", "TestUtil.py")):
break
else:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(toplevel)
from scripts import *
#
# Show usage information.
#
def usage():
print("Usage: " + sys.argv[0] + " [options]")
print("")
print("Options:")
print("-h Show this message.")
print("-f Force an update to the C# files.")
#
# Check arguments
#
force = 0
for x in sys.argv[1:]:
if x == "-h":
usage()
sys.exit(0)
elif x == "-f":
force = 1
elif x.startswith("-"):
print(sys.argv[0] + ": unknown option `" + x + "'")
print("")
usage()
sys.exit(1)
else:
usage()
sys.exit(1)
cppcerts = os.path.join(TestUtil.getIceDir("cpp"), "test", "IceSSL", "certs")
for x in ("cacert1.pem", "cacert2.pem"):
if force or not os.path.exists(x):
shutil.copyfile(os.path.join(cppcerts, x), x)
certs = [\
"c_rsa_nopass_ca1_exp", \
"c_rsa_nopass_ca1", \
"c_rsa_nopass_ca2", \
"s_rsa_nopass_ca1_exp", \
"s_rsa_nopass_ca1", \
"s_rsa_nopass_ca2", \
"s_rsa_nopass_ca1_cn1", \
"s_rsa_nopass_ca1_cn2", \
]
for x in certs:
if force or not os.path.exists(x + ".pfx"):
cert = os.path.join(cppcerts, x)
os.system("openssl pkcs12 -in " + cert + "_pub.pem -inkey " + cert + "_priv.pem -export -out " + x + \
".pfx -passout pass:password")
print("Created " + x + ".pfx")
#
# Done.
#
print("Done.")
| gpl-2.0 |
DavidBreuer/CytoSeg | tests/test.py | 1 | 1464 | ################################################################################
# Module: test.py
# Description: Test imports and network extraction
# License: GPL3, see full license in LICENSE.txt
# Web: https://github.com/DavidBreuer/CytoSeg
################################################################################
#%%############################################################################# test imports
def test_imports():
import itertools
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import random
import scipy as sp
import scipy.misc
import scipy.ndimage
import scipy.optimize
import scipy.spatial
import scipy.stats
import scipy.cluster
import skimage
import skimage.filters
import skimage.io
import skimage.morphology
import skimage.feature
import skimage.segmentation
import shapely
import shapely.geometry
import sys
import xml
import xml.dom
import xml.dom.minidom
return None
#%%############################################################################# test read tiff
def test_read_tiff():
import skimage
import skimage.io
im=skimage.io.imread('../examples/data/',plugin='tifffile')
return None
#%%############################################################################# under construction
| gpl-3.0 |
JQIamo/artiq | artiq/frontend/artiq_flash.py | 1 | 5247 | #!/usr/bin/env python3
# Copyright (C) 2015 Robert Jordens <[email protected]>
import argparse
import os
import subprocess
import tempfile
import shutil
from artiq import __artiq_dir__ as artiq_dir
from artiq.frontend.bit2bin import bit2bin
def scripts_path():
p = ["share", "openocd", "scripts"]
if os.name == "nt":
p.insert(0, "Library")
p = os.path.abspath(os.path.join(
os.path.dirname(shutil.which("openocd")),
"..", *p))
return p
def get_argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="ARTIQ flashing/deployment tool",
epilog="""\
Valid actions:
* proxy: load the flash proxy gateware bitstream
* gateware: write gateware bitstream to flash
* bios: write bios to flash
* runtime: write runtime to flash
* storage: write storage image to flash
* load: load gateware bitstream into device (volatile but fast)
* start: trigger the target to (re)load its gateware bitstream from flash
Prerequisites:
* Connect the board through its/a JTAG adapter.
* Have OpenOCD installed and in your $PATH.
* Have access to the JTAG adapter's devices. Udev rules from OpenOCD:
'sudo cp openocd/contrib/99-openocd.rules /etc/udev/rules.d'
and replug the device. Ensure you are member of the
plugdev group: 'sudo adduser $USER plugdev' and re-login.
""")
parser.add_argument("-t", "--target", default="kc705",
help="target board, default: %(default)s")
parser.add_argument("-m", "--adapter", default="nist_clock",
help="target adapter, default: %(default)s")
parser.add_argument("--target-file", default=None,
help="use alternative OpenOCD target file")
parser.add_argument("-f", "--storage", help="write file to storage area")
parser.add_argument("-d", "--dir", help="look for files in this directory")
parser.add_argument("action", metavar="ACTION", nargs="*",
default="proxy gateware bios runtime start".split(),
help="actions to perform, default: %(default)s")
return parser
def main():
parser = get_argparser()
opts = parser.parse_args()
config = {
"kc705": {
"chip": "xc7k325t",
"start": "xc7_program xc7.tap",
"gateware": 0x000000,
"bios": 0xaf0000,
"runtime": 0xb00000,
"storage": 0xb80000,
},
}[opts.target]
if opts.dir is None:
opts.dir = os.path.join(artiq_dir, "binaries",
"{}-{}".format(opts.target, opts.adapter))
if not os.path.exists(opts.dir) and opts.action != ["start"]:
raise SystemExit("Binaries directory '{}' does not exist"
.format(opts.dir))
conv = False
prog = []
prog.append("init")
for action in opts.action:
if action == "proxy":
proxy_base = "bscan_spi_{}.bit".format(config["chip"])
proxy = None
for p in [opts.dir, os.path.expanduser("~/.migen"),
"/usr/local/share/migen", "/usr/share/migen"]:
proxy_ = os.path.join(p, proxy_base)
if os.access(proxy_, os.R_OK):
proxy = "jtagspi_init 0 {{{}}}".format(proxy_)
break
if not proxy:
raise SystemExit(
"proxy gateware bitstream {} not found".format(proxy_base))
prog.append(proxy)
elif action == "gateware":
bin = os.path.join(opts.dir, "top.bin")
if not os.access(bin, os.R_OK):
bin_handle, bin = tempfile.mkstemp()
bit = os.path.join(opts.dir, "top.bit")
conv = True
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
bin, config["gateware"]))
elif action == "bios":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
os.path.join(opts.dir, "bios.bin"), config["bios"]))
elif action == "runtime":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
os.path.join(opts.dir, "runtime.fbi"), config["runtime"]))
elif action == "storage":
prog.append("jtagspi_program {{{}}} 0x{:x}".format(
opts.storage, config["storage"]))
elif action == "load":
prog.append("pld load 0 {{{}}}".format(
os.path.join(opts.dir, "top.bit")))
elif action == "start":
prog.append(config["start"])
else:
raise ValueError("invalid action", action)
prog.append("exit")
try:
if conv:
bit2bin(bit, bin_handle)
if opts.target_file is None:
target_file = os.path.join("board", opts.target + ".cfg")
else:
target_file = opts.target_file
subprocess.check_call([
"openocd",
"-s", scripts_path(),
"-f", target_file,
"-c", "; ".join(prog),
])
finally:
if conv:
os.unlink(bin)
if __name__ == "__main__":
main()
| lgpl-3.0 |
Tictrac/django-push-notifications | tests/test_models.py | 2 | 17633 | import json
from django.test import TestCase
from django.utils import timezone
from push_notifications.gcm import GCMError, send_bulk_message
from push_notifications.models import GCMDevice, APNSDevice
from ._mock import mock
# Mock responses
GCM_PLAIN_RESPONSE = "id=1:08"
GCM_JSON_RESPONSE = '{"multicast_id":108,"success":1,"failure":0,"canonical_ids":0,"results":[{"message_id":"1:08"}]}'
GCM_MULTIPLE_JSON_RESPONSE = (
'{"multicast_id":108,"success":2,"failure":0,"canonical_ids":0,"results":'
'[{"message_id":"1:08"}, {"message_id": "1:09"}]}'
)
GCM_JSON_RESPONSE_ERROR = (
'{"success":1, "failure": 2, "canonical_ids": 0, "cast_id": 6358665107659088804, "results":'
' [{"error": "NotRegistered"}, {"message_id": "0:1433830664381654%3449593ff9fd7ecd"}, '
'{"error": "InvalidRegistration"}]}'
)
GCM_JSON_RESPONSE_ERROR_B = (
'{"success":1, "failure": 2, "canonical_ids": 0, "cast_id": 6358665107659088804, '
'"results": [{"error": "MismatchSenderId"}, {"message_id": '
'"0:1433830664381654%3449593ff9fd7ecd"}, {"error": "InvalidRegistration"}]}'
)
GCM_JSON_CANONICAL_ID_RESPONSE = (
'{"failure":0,"canonical_ids":1,"success":2,"multicast_id":7173139966327257000,"results":'
'[{"registration_id":"NEW_REGISTRATION_ID","message_id":"0:1440068396670935%6868637df9fd7ecd"},'
'{"message_id":"0:1440068396670937%6868637df9fd7ecd"}]}'
)
GCM_JSON_CANONICAL_ID_SAME_DEVICE_RESPONSE = (
'{"failure":0,"canonical_ids":1,"success":2,"multicast_id":7173139966327257000,'
'"results":[{"registration_id":"bar","message_id":"0:1440068396670935%6868637df9fd7ecd"}'
',{"message_id":"0:1440068396670937%6868637df9fd7ecd"}]}'
)
class ModelTestCase(TestCase):
def _create_devices(self, devices):
for device in devices:
GCMDevice.objects.create(registration_id=device)
def _create_fcm_devices(self, devices):
for device in devices:
GCMDevice.objects.create(registration_id=device, cloud_message_type="FCM")
def test_can_save_gcm_device(self):
device = GCMDevice.objects.create(registration_id="a valid registration id")
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_can_create_save_device(self):
device = APNSDevice.objects.create(registration_id="a valid registration id")
assert device.id is not None
assert device.date_created is not None
assert device.date_created.date() == timezone.now().date()
def test_gcm_send_message(self):
device = GCMDevice.objects.create(registration_id="abc")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world")
p.assert_called_once_with(
b"data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_gcm_send_message_extra(self):
device = GCMDevice.objects.create(registration_id="abc")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world", extra={"foo": "bar"})
p.assert_called_once_with(
b"data.foo=bar&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_gcm_send_message_collapse_key(self):
device = GCMDevice.objects.create(registration_id="abc")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
b"collapse_key=test_key&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_gcm_send_message_to_multiple_devices(self):
self._create_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_active_devices(self):
GCMDevice.objects.create(registration_id="abc", active=True)
GCMDevice.objects.create(registration_id="xyz", active=False)
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_collapse_to_multiple_devices(self):
self._create_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_gcm_send_message_to_single_device_with_error(self):
# these errors are device specific, device.active will be set false
devices = ["abc", "abc1"]
self._create_devices(devices)
errors = ["Error=NotRegistered", "Error=InvalidRegistration"]
for index, error in enumerate(errors):
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=error):
device = GCMDevice.objects.get(registration_id=devices[index])
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=devices[index]).active is False
def test_gcm_send_message_to_single_device_with_error_b(self):
device = GCMDevice.objects.create(registration_id="abc")
with mock.patch(
"push_notifications.gcm._gcm_send", return_value="Error=MismatchSenderId"
):
# these errors are not device specific, GCMError should be thrown
with self.assertRaises(GCMError):
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id="abc").active is True
def test_gcm_send_message_to_multiple_devices_with_error(self):
self._create_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_JSON_RESPONSE_ERROR
):
devices = GCMDevice.objects.all()
devices.send_message("Hello World")
assert not GCMDevice.objects.get(registration_id="abc").active
assert GCMDevice.objects.get(registration_id="abc1").active
assert not GCMDevice.objects.get(registration_id="abc2").active
def test_gcm_send_message_to_multiple_devices_with_error_b(self):
self._create_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_JSON_RESPONSE_ERROR_B
):
devices = GCMDevice.objects.all()
with self.assertRaises(GCMError):
devices.send_message("Hello World")
assert GCMDevice.objects.get(registration_id="abc").active is True
assert GCMDevice.objects.get(registration_id="abc1").active is True
assert GCMDevice.objects.get(registration_id="abc2").active is False
def test_gcm_send_message_to_multiple_devices_with_canonical_id(self):
self._create_devices(["foo", "bar"])
with mock.patch(
"push_notifications.gcm._gcm_send", return_value=GCM_JSON_CANONICAL_ID_RESPONSE
):
GCMDevice.objects.all().send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id="foo").exists()
assert GCMDevice.objects.filter(registration_id="bar").exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists() is True
def test_gcm_send_message_to_single_user_with_canonical_id(self):
old_registration_id = "foo"
self._create_devices([old_registration_id])
gcm_reg_blob = "id=1:2342\nregistration_id=NEW_REGISTRATION_ID"
with mock.patch("push_notifications.gcm._gcm_send", return_value=gcm_reg_blob):
GCMDevice.objects.get(registration_id=old_registration_id).send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id=old_registration_id).exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists()
def test_gcm_send_message_to_same_devices_with_canonical_id(self):
first_device = GCMDevice.objects.create(registration_id="foo", active=True)
second_device = GCMDevice.objects.create(registration_id="bar", active=False)
with mock.patch(
"push_notifications.gcm._gcm_send",
return_value=GCM_JSON_CANONICAL_ID_SAME_DEVICE_RESPONSE
):
GCMDevice.objects.all().send_message("Hello World")
assert first_device.active is True
assert second_device.active is False
def test_fcm_send_message(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world")
p.assert_called_once_with(
b"data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_fcm_send_message_extra(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world", extra={"foo": "bar"})
p.assert_called_once_with(
b"data.foo=bar&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_fcm_send_message_collapse_key(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_PLAIN_RESPONSE
) as p:
device.send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
b"collapse_key=test_key&data.message=Hello+world®istration_id=abc",
"application/x-www-form-urlencoded;charset=UTF-8"
)
def test_fcm_send_message_to_multiple_devices(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_fcm_send_message_active_devices(self):
GCMDevice.objects.create(registration_id="abc", active=True, cloud_message_type="FCM")
GCMDevice.objects.create(registration_id="xyz", active=False, cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world")
p.assert_called_once_with(
json.dumps({
"data": {"message": "Hello world"},
"registration_ids": ["abc"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_fcm_send_message_collapse_to_multiple_devices(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_MULTIPLE_JSON_RESPONSE
) as p:
GCMDevice.objects.all().send_message("Hello world", collapse_key="test_key")
p.assert_called_once_with(
json.dumps({
"collapse_key": "test_key",
"data": {"message": "Hello world"},
"registration_ids": ["abc", "abc1"]
}, separators=(",", ":"), sort_keys=True).encode("utf-8"), "application/json")
def test_fcm_send_message_to_single_device_with_error(self):
# these errors are device specific, device.active will be set false
devices = ["abc", "abc1"]
self._create_fcm_devices(devices)
errors = ["Error=NotRegistered", "Error=InvalidRegistration"]
for index, error in enumerate(errors):
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=error):
device = GCMDevice.objects.get(registration_id=devices[index])
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id=devices[index]).active is False
def test_fcm_send_message_to_single_device_with_error_b(self):
device = GCMDevice.objects.create(registration_id="abc", cloud_message_type="FCM")
with mock.patch(
"push_notifications.gcm._fcm_send", return_value="Error=MismatchSenderId"
):
# these errors are not device specific, GCMError should be thrown
with self.assertRaises(GCMError):
device.send_message("Hello World!")
assert GCMDevice.objects.get(registration_id="abc").active is True
def test_fcm_send_message_to_multiple_devices_with_error(self):
self._create_fcm_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_JSON_RESPONSE_ERROR
):
devices = GCMDevice.objects.all()
devices.send_message("Hello World")
assert not GCMDevice.objects.get(registration_id="abc").active
assert GCMDevice.objects.get(registration_id="abc1").active
assert not GCMDevice.objects.get(registration_id="abc2").active
def test_fcm_send_message_to_multiple_devices_with_error_b(self):
self._create_fcm_devices(["abc", "abc1", "abc2"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_JSON_RESPONSE_ERROR_B
):
devices = GCMDevice.objects.all()
with self.assertRaises(GCMError):
devices.send_message("Hello World")
assert GCMDevice.objects.get(registration_id="abc").active is True
assert GCMDevice.objects.get(registration_id="abc1").active is True
assert GCMDevice.objects.get(registration_id="abc2").active is False
def test_fcm_send_message_to_multiple_devices_with_canonical_id(self):
self._create_fcm_devices(["foo", "bar"])
with mock.patch(
"push_notifications.gcm._fcm_send", return_value=GCM_JSON_CANONICAL_ID_RESPONSE
):
GCMDevice.objects.all().send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id="foo").exists()
assert GCMDevice.objects.filter(registration_id="bar").exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists() is True
def test_fcm_send_message_to_single_user_with_canonical_id(self):
old_registration_id = "foo"
self._create_fcm_devices([old_registration_id])
gcm_reg_blob = "id=1:2342\nregistration_id=NEW_REGISTRATION_ID"
with mock.patch("push_notifications.gcm._fcm_send", return_value=gcm_reg_blob):
GCMDevice.objects.get(registration_id=old_registration_id).send_message("Hello World")
assert not GCMDevice.objects.filter(registration_id=old_registration_id).exists()
assert GCMDevice.objects.filter(registration_id="NEW_REGISTRATION_ID").exists()
def test_fcm_send_message_to_same_devices_with_canonical_id(self):
first_device = GCMDevice.objects.create(
registration_id="foo", active=True, cloud_message_type="FCM"
)
second_device = GCMDevice.objects.create(
registration_id="bar", active=False, cloud_message_type="FCM"
)
with mock.patch(
"push_notifications.gcm._fcm_send",
return_value=GCM_JSON_CANONICAL_ID_SAME_DEVICE_RESPONSE
):
GCMDevice.objects.all().send_message("Hello World")
assert first_device.active is True
assert second_device.active is False
def test_apns_send_message(self):
device = APNSDevice.objects.create(registration_id="abc")
socket = mock.MagicMock()
with mock.patch("push_notifications.apns._apns_pack_frame") as p:
device.send_message("Hello world", socket=socket, expiration=1)
p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"}}', 0, 1, 10)
def test_apns_send_message_extra(self):
device = APNSDevice.objects.create(registration_id="abc")
socket = mock.MagicMock()
with mock.patch("push_notifications.apns._apns_pack_frame") as p:
device.send_message(
"Hello world", extra={"foo": "bar"}, socket=socket,
identifier=1, expiration=2, priority=5
)
p.assert_called_once_with("abc", b'{"aps":{"alert":"Hello world"},"foo":"bar"}', 1, 2, 5)
def test_send_message_with_no_reg_ids(self):
self._create_devices(["abc", "abc1"])
with mock.patch("push_notifications.gcm._cm_send_plain", return_value="") as p:
GCMDevice.objects.filter(registration_id="xyz").send_message("Hello World")
p.assert_not_called()
with mock.patch("push_notifications.gcm._cm_send_json", return_value="") as p:
reg_ids = [obj.registration_id for obj in GCMDevice.objects.all()]
send_bulk_message(reg_ids, {"message": "Hello World"}, "GCM")
p.assert_called_once_with(
[u"abc", u"abc1"], {"message": "Hello World"}, cloud_type="GCM"
)
def test_fcm_send_message_with_no_reg_ids(self):
self._create_fcm_devices(["abc", "abc1"])
with mock.patch("push_notifications.gcm._cm_send_plain", return_value="") as p:
GCMDevice.objects.filter(registration_id="xyz").send_message("Hello World")
p.assert_not_called()
with mock.patch("push_notifications.gcm._cm_send_json", return_value="") as p:
reg_ids = [obj.registration_id for obj in GCMDevice.objects.all()]
send_bulk_message(reg_ids, {"message": "Hello World"}, "FCM")
p.assert_called_once_with(
[u"abc", u"abc1"], {"message": "Hello World"}, cloud_type="FCM"
)
def test_can_save_wsn_device(self):
device = GCMDevice.objects.create(registration_id="a valid registration id")
self.assertIsNotNone(device.pk)
self.assertIsNotNone(device.date_created)
self.assertEqual(device.date_created.date(), timezone.now().date())
| mit |
garmann/playground | python/python_mysql_queue/app.py | 1 | 2098 | #!/usr/bin/env python3
from classes.myqueue import MyQueue
import time # time.sleep(0.02)
import random # random.randint(1, 100)
import socket # socket.gethostname()
import sys
import argparse
CONF_DB = {
'server': 'localhost',
'user': 'root',
'pass': 'x',
'db': 'myqueue'
}
def worker_create(q, amount):
# makes objects in state new
hostname = socket.gethostname()
while amount > 0:
amount -= 1
objectname = "{}_{}_{}".format(hostname, int(time.time()), random.randint(1,10000000))
q.object_add(objectname)
def worker_update(q, amount):
# changes objects into status running
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('new')[0]['object']
q.object_update_status(name=objectid, status='running')
except IndexError: # happens when there are no new objects
pass
def worker_finish(q, amount):
# changes objects into status done
while amount > 0:
amount -= 1
try:
objectid = q.object_get_object_bystate('running')[0]['object']
q.object_update_status(name=objectid, status='done')
except IndexError: # happens when there are no running objects
pass
def main(args):
q = MyQueue(CONF_DB)
with q:
# using "with" ensures db exit, not worked on my testing with the db library
# see __enter__ & __exit__ in MyQueue Class
if args.type == 'create':
worker_create(q, args.amount)
elif args.type == 'update':
worker_update(q, args.amount)
elif args.type == 'finish':
worker_finish(q, args.amount)
else:
print('shit happens')
sys.exit(1)
# mysql> select status, count(object) as count from queue group by status order by count DESC
# set global general_log = 'ON';
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='its me, the python queue...')
parser.add_argument('type',
default='create',
help='for type: choose between create, update and finish',
choices=['create', 'update', 'finish'],
type=str)
parser.add_argument('--amount',
type=int,
default=1000,
help='amount to create/modify/finish')
args = parser.parse_args()
main(args)
| mit |
danieldresser/cortex | python/IECore/CompoundVectorParameter.py | 12 | 3505 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
## This class is a CompoundParameter that only accepts vector parameters with the same length.
# \ingroup python
class CompoundVectorParameter ( CompoundParameter ):
def __testParameterType( self, parameter ):
data = parameter.getValue()
if not isSequenceDataType( data ):
raise TypeError, "The parameter %s cannot be added because it does not hold vector data object." % parameter.name
# overwrites base class definition just to limit the parameter types accepted.
def addParameter( self, parameter ):
self.__testParameterType( parameter )
CompoundParameter.addParameter( self, parameter )
# overwrites base class definition just to limit the parameter types accepted.
def addParameters( self, parameters ):
for parameter in parameters:
self.__testParameterType( parameter )
CompoundParameter.addParameters( self, parameters )
# overwrites base class definition just to limit the parameter types accepted.
def insertParameter( self, parameter, other ):
self.__testParameterType( parameter )
CompoundParameter.insertParameter( self, parameter, other )
## Returns true only if all the vector parameters are of the same length and they also validate ok.
def valueValid( self, value ) :
res = CompoundParameter.valueValid( self, value )
if not res[0]:
return res
size = None
keys = value.keys()
values = value.values()
for i in range( 0, len( keys ) ) :
thisSize = len( values[i] )
if size is None:
size = thisSize
if size != thisSize :
return ( False, ( "Parameter \"%s\" has wrong size ( expected %d but found %d )" % ( keys[i], size, thisSize ) ) )
return ( True, "" )
registerRunTimeTyped( CompoundVectorParameter )
| bsd-3-clause |
naver/nbase-arc | integration_test/testrunner.py | 4 | 10208 | # Copyright 2015 Naver Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import sys
import testbase
import config
import util
import getopt
import imp
import sys
import os
import signal
import default_cluster
import zookeeper
def print_list_and_input(list):
while True:
print
last_idx = 0
for name in list:
print "%2d:\t%s" % (last_idx, name)
last_idx += 1
print
selection = 0
sys.stdout.write('select: ')
try:
selection = int(sys.stdin.readline())
if 0 <= selection < last_idx:
return list[selection]
except ValueError:
pass
print "\nInput error : 1~%d are only acceptable\n" % (last_idx - 1)
def get_test_methods_list(testcase, methods):
splits = testcase.split('.')
module = splits[0]
classname = splits[1]
class_attr = getattr(__import__(module), classname)
for t in dir(class_attr):
if callable(getattr(class_attr, t)) and t.startswith('test'):
methods.append(t)
def has_test_method(module_attr):
for member in dir(module_attr):
if member.startswith('test'):
return True
return False
def get_test_cases_list(module, cases):
for t in dir(module):
if callable(getattr(module, t)):
try:
module_attr = getattr(module, t)
if issubclass(module_attr, unittest.TestCase) and has_test_method(module_attr):
cases.append(module.__name__ + '.' + t)
except TypeError:
pass
def run_test_modules(module_list):
alltests = unittest.TestSuite()
for module in module_list:
suite = unittest.TestLoader().loadTestsFromModule(module)
alltests.addTest(suite)
return unittest.TextTestRunner(verbosity=2).run(alltests)
def run_test(module, testcase, method):
suite = None
if testcase == None:
suite = unittest.TestLoader().loadTestsFromModule(module)
elif method == None:
suite = unittest.TestLoader().loadTestsFromTestCase(getattr(module, testcase))
else:
suite = unittest.TestSuite()
suite.addTest(getattr(module, testcase)(method))
return unittest.TextTestRunner(verbosity=2).run(suite)
def prepare_cases(test_module_list, cases):
cases.append('Exit')
for test_module in test_module_list:
get_test_cases_list(test_module, cases)
if len(cases) <= 1:
print "No test cases in module[%s]" % test_module.__name__
return False
cases.append('All')
return True
def reload_all(test_module):
for t in dir(test_module):
attr = getattr(test_module, t)
if type(attr) == 'module':
print "reload module '%s'" % t
reload(attr)
reload(test_module)
def console(test_module_list):
cases = []
if not prepare_cases(test_module_list, cases):
return
module_name_list = ', '.join([t.__name__ for t in test_module_list])
while True:
print
print '===== module [%s] =====' % module_name_list
module_testcase = print_list_and_input(cases)
if module_testcase == 'All':
run_test_modules(test_module_list)
elif module_testcase == 'Exit':
return
else:
print '\n===== module.testcase [%s] =====' % module_testcase
methods = ['Up']
get_test_methods_list(module_testcase, methods)
if len(methods) <= 1:
print "No test methods in testcase[%s]" % module_testcase
break
methods.append('All')
test_module = __import__(module_testcase.split('.')[0])
testcase = module_testcase.split('.')[1]
while True:
method = print_list_and_input(methods)
if method == 'All':
run_test(test_module, testcase, None)
elif method == 'Up':
break
else:
run_test(test_module, testcase, method)
def __import__(name, globals=None, locals=None, fromlist=None):
try:
return sys.modules[name]
except KeyError:
pass
fp, pathname, description = imp.find_module(name)
try:
return imp.load_module(name, fp, pathname, description)
finally:
if fp:
fp.close()
def signal_handler( *args ):
exit(-1)
def cleanup_test_env(opt_skip_copy_binaries, opt_32bit_binary_test):
# Kill processes
if testbase.cleanup_processes() != 0:
util.log('failed to cleanup test environment')
return -1
# Cleanup pgs directories
srv_id_dict = {}
for cluster in config.clusters:
for server in cluster['servers']:
id = server['id']
if srv_id_dict.has_key(id):
continue
if testbase.cleanup_pgs_log_and_ckpt( cluster['cluster_name'], server ) is not 0:
util.log( 'failed to cleanup_pgs_data' )
return -1
srv_id_dict[id] = True
# Setup binaries
if testbase.setup_binaries( config.clusters, opt_skip_copy_binaries, opt_32bit_binary_test ) != 0:
util.log('failed to initialize testbase')
return -1
return 0
def load_test_modules(opt_32bit_binary_test):
module_list = []
if 'all' in sys.argv:
for file_name in os.listdir('.'):
if file_name.startswith('test_'):
if opt_32bit_binary_test and file_name.endswith('32.py'):
module_list.append(__import__(file_name[:-3]))
elif opt_32bit_binary_test == False and file_name.endswith('.py') and file_name.endswith('32.py') == False:
module_list.append(__import__(file_name[:-3]))
else:
for i in range(1, len(sys.argv)):
file_name = sys.argv[i]
if file_name[0] != '-' and file_name.endswith('.py'):
module_list.append(__import__(file_name[-3:] == '.py' and file_name[:-3] or file_name))
return module_list
def test_modules(module_list, opt_non_interactive, opt_backup_log_dir):
if opt_non_interactive:
test_results = []
for module in module_list:
ret = run_test(module, None, None)
test_results.append(ret)
# Summary
print "\n\n### SUMMARY ###\n"
errors = 0
failures = 0
for ret in test_results:
errors += len(ret.errors)
failures += len(ret.failures)
for e in ret.errors:
util.log(e[0])
util.log(e[1])
util.log('')
for f in ret.failures:
util.log(f[0])
util.log(f[1])
util.log('')
util.log("Test done. failures:%d, errors:%d" % (failures, errors))
if errors > 0 or failures > 0:
if opt_backup_log_dir is not None:
util.backup_log( opt_backup_log_dir )
return -1
else:
util.log("No Error!")
else:
console(module_list)
return 0
USAGE = """usage:
python testrunner.py [options] [file .. or all]
python testrunner.py test_confmaster.py
python testrunner.py -n all
python testrunner.py -n -b all
option:
-i <init> : Without test, run only nBase-ARC processes.
-l <backup_log_dir> : Backup test-logs to the specified directory.
-s <skip-copy-binareis> : Skip copying binaries. (It must be used when binaries already be deployed.)
-b <32bit-binary-test> : Test 32bit binary
-n <non-interactive> : Run specified tests without any interaction with a user.
Read README file for more details.
"""
def main():
if len(sys.argv) < 2:
print USAGE
return -1
signal.signal( signal.SIGINT, signal_handler )
# Verify config
config.verify_config()
# Init options
try:
opts, args = getopt.getopt(sys.argv[1:], 'inl:sb', ['init', 'non-interactive', 'backup_log_dir', 'skip-copy_binaries', '32bit-binary-test'])
except getopt.GetoptError as e:
print USAGE
print e
return -1
opt_init = False
opt_backup_log_dir = None
opt_skip_copy_binaries = False
opt_32bit_binary_test = False
opt_non_interactive = False
for opt, arg in opts:
if opt in ("-i", '--init'):
opt_init = True
elif opt in ("-l", '--backup_log_dir'):
opt_backup_log_dir = arg
elif opt in ("-s", '--skip-copy-binareis'):
opt_skip_copy_binaries = True
elif opt in ("-b", '--32bit-binary-test'):
opt_32bit_binary_test = True
elif opt in ("-n", '--non-interactive'):
opt_non_interactive = True
# Clean up test environment
if cleanup_test_env(opt_skip_copy_binaries, opt_32bit_binary_test) != 0:
print 'Clean up test environment fail! Aborting...'
return -1
# When -i flag is on, it exits after setting up a cluster.
if opt_init is True:
if default_cluster.initialize_starting_up_smr_before_redis( config.clusters[0], verbose=2 ) is None:
util.log('failed setting up servers.')
else:
util.log('finished successfully setting up servers.' )
return 0
# Load test modules
module_list = load_test_modules(opt_32bit_binary_test)
print "module list : "
print module_list
# Run test
return test_modules(module_list, opt_non_interactive, opt_backup_log_dir)
if __name__ == '__main__':
try:
zookeeper.ZooKeeperCli.start()
exit(main())
finally:
zookeeper.ZooKeeperCli.stop()
| apache-2.0 |
irvingprog/pilas | pilas/actores/menu.py | 1 | 6200 | # -*- encoding: utf-8 -*-
# Pilas engine - A video game framework.
#
# Copyright 2010 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilas.actores import Actor
import pilas
DEMORA = 14
class Menu(Actor):
"""Un actor que puede mostrar una lista de opciones a seleccionar."""
def __init__(self, opciones, x=0, y=0, fuente=None,
color_normal=pilas.colores.gris,
color_resaltado=pilas.colores.blanco):
"""Inicializa el menú.
:param opciones: Tupla con al menos dos elementos obligatorios (:texto:, :funcion:) y :argumentos: opcionales
:param x: Posicion en el eje x
:param y: Posicion en el eje y
"""
self.opciones_como_actores = []
self.demora_al_responder = 0
Actor.__init__(self, "invisible.png", x=x, y=y)
self._verificar_opciones(opciones)
self.crear_texto_de_las_opciones(opciones, fuente, color_normal, color_resaltado)
self.opciones = opciones
self.seleccionar_primer_opcion()
self.opcion_actual = 0
# contador para evitar la repeticion de teclas
self.activar()
# Mapeamos unas teclas para mover el menu
teclas = {pilas.simbolos.IZQUIERDA: 'izquierda',
pilas.simbolos.DERECHA: 'derecha',
pilas.simbolos.ARRIBA: 'arriba',
pilas.simbolos.ABAJO: 'abajo',
pilas.simbolos.SELECCION: 'boton'}
# Creamos un control personalizado
self.control_menu = pilas.control.Control(pilas.escena_actual(), teclas)
def activar(self):
"""Se ejecuta para activar el comportamiento del menú."""
self.escena.mueve_mouse.conectar(self.cuando_mueve_el_mouse)
self.escena.click_de_mouse.conectar(self.cuando_hace_click_con_el_mouse)
def desactivar(self):
"""Deshabilita toda la funcionalidad del menú."""
self.escena.mueve_mouse.desconectar(self.cuando_mueve_el_mouse)
self.escena.click_de_mouse.desconectar(self.cuando_hace_click_con_el_mouse)
def crear_texto_de_las_opciones(self, opciones, fuente, color_normal, color_resaltado):
"""Genera un actor por cada opcion del menu.
:param opciones: Una lista con todas las opciones que tendrá el menú.
"""
for indice, opcion in enumerate(opciones):
y = self.y - indice * 50
texto, funcion, argumentos = opcion[0],opcion[1],opcion[2:]
opciones = pilas.actores.Opcion(texto, x=0, y=y, funcion_a_invocar=funcion, argumentos=argumentos, fuente=fuente,
color_normal=color_normal, color_resaltado=color_resaltado)
self.opciones_como_actores.append(opciones)
def seleccionar_primer_opcion(self):
"""Destaca la primer opción del menú."""
if self.opciones_como_actores:
self.opciones_como_actores[0].resaltar()
def _verificar_opciones(self, opciones):
"""Se asegura de que la lista este bien definida.
:param opciones: La lista de opciones a inspeccionar.
"""
for x in opciones:
if not isinstance(x, tuple) or len(x)<2:
raise Exception("Opciones incorrectas, cada opcion tiene que ser una tupla.")
def actualizar(self):
"Se ejecuta de manera periodica."
if self.demora_al_responder < 0:
if self.control_menu.boton:
self.control_menu.limpiar()
self.seleccionar_opcion_actual()
self.demora_al_responder = DEMORA
if self.control_menu.abajo:
self.mover_cursor(1)
self.demora_al_responder = DEMORA
elif self.control_menu.arriba:
self.mover_cursor(-1)
self.demora_al_responder = DEMORA
self.demora_al_responder -= 1
def seleccionar_opcion_actual(self):
"""Se ejecuta para activar y lanzar el item actual."""
opcion = self.opciones_como_actores[self.opcion_actual]
opcion.seleccionar()
def mover_cursor(self, delta):
"""Realiza un movimiento del cursor que selecciona opciones.
:param delta: El movimiento a realizar (+1 es avanzar y -1 retroceder).
"""
# Deja como no-seleccionada la opcion actual.
self._deshabilitar_opcion_actual()
# Se asegura que las opciones esten entre 0 y 'cantidad de opciones'.
self.opcion_actual += delta
self.opcion_actual %= len(self.opciones_como_actores)
# Selecciona la opcion nueva.
self.opciones_como_actores[self.opcion_actual].resaltar()
def __setattr__(self, atributo, valor):
# Intenta propagar la accion a los actores del grupo.
try:
for x in self.opciones_como_actores:
setattr(x, atributo, valor)
except AttributeError:
pass
Actor.__setattr__(self, atributo, valor)
def cuando_mueve_el_mouse(self, evento):
"""Permite cambiar la opcion actual moviendo el mouse. Retorna True si el mouse esta sobre alguna opcion.
:param evento: El evento que representa el movimiento del mouse.
"""
for indice, opcion in enumerate(self.opciones_como_actores):
if opcion.colisiona_con_un_punto(evento.x, evento.y):
if indice != self.opcion_actual:
self._deshabilitar_opcion_actual()
self.opcion_actual = indice
self.opciones_como_actores[indice].resaltar()
return True
def _deshabilitar_opcion_actual(self):
"""Le quita el foco o resaltado a la opción del menú actual."""
self.opciones_como_actores[self.opcion_actual].resaltar(False)
def cuando_hace_click_con_el_mouse(self, evento):
"""Se ejecuta cuando se hace click con el mouse.
:param evento: objeto que representa el evento click de mouse.
"""
if self.cuando_mueve_el_mouse(evento):
self.seleccionar_opcion_actual()
| lgpl-3.0 |
devzero2000/RPM5 | python/test/test_rpm.py | 2 | 1590 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009 Per Øyvind Karlsen <[email protected]>
#
import unittest
from test.test_support import rmtree
import rpm
class Test_evrCompare(unittest.TestCase):
def test_e(self):
self.assertEqual(rpm.evrCompare("1", "2"), -1)
self.assertEqual(rpm.evrCompare("3", "2"), 1)
self.assertEqual(rpm.evrCompare("4", "4"), 0)
self.assertEqual(rpm.evrCompare("5", "20"), -1)
def test_ev(self):
self.assertEqual(rpm.evrCompare("1:32", "2:231"), -1)
self.assertEqual(rpm.evrCompare("3:1.1", "2:5"), 1)
self.assertEqual(rpm.evrCompare("2:1.1", "2:0.1"), 1)
self.assertEqual(rpm.evrCompare("4:123", "4:123"), 0)
self.assertEqual(rpm.evrCompare("5:1.3", "20:9.3"), -1)
def test_evr(self):
self.assertEqual(rpm.evrCompare("1:3.2-6", "2:9.4-99"), -1)
self.assertEqual(rpm.evrCompare("3:3-1", "2:9.3"), 1)
self.assertEqual(rpm.evrCompare("4:429-999999", "4:0.1-2"), 1)
self.assertEqual(rpm.evrCompare("5:23-83:23", "20:0.0.1-0.1"), -1)
def test_evrd(self):
self.assertEqual(rpm.evrCompare("10:321.32a-p21:999", "2:99"), 1)
self.assertEqual(rpm.evrCompare("3", "2:531-9:1"), -1)
self.assertEqual(rpm.evrCompare("4:3-2:1", "4:3-2"), 1)
self.assertEqual(rpm.evrCompare("20:9-3:2011.0", "20:9-3:2011.0"), 0)
def test_main():
from test import test_support
test_support.run_unittest(
Test_evrCompare)
test_support.reap_children()
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
Ditmar/plugin.video.pelisalacarta | pelisalacarta/channels/peliculasmx.py | 8 | 15499 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasmx
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "peliculasmx"
__category__ = "F"
__type__ = "generic"
__title__ = "peliculasmx"
__language__ = "ES"
__creationdate__ = "20130528"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("pelisalacarta.channels.peliculasmx mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Últimas añadidas", action="peliculas" , url="http://www.peliculasmx.net/" , extra="http://www.peliculasmx.net/"))
itemlist.append( Item(channel=__channel__, title="Últimas por género" , action="generos" , url="http://www.peliculasmx.net/"))
itemlist.append( Item(channel=__channel__, title="Últimas por letra" , action="letras" , url="http://www.peliculasmx.net/"))
itemlist.append( Item(channel=__channel__, title="Buscar..." , action="search" , url="http://www.peliculasmx.net/"))
return itemlist
def generos(item):
logger.info("pelisalacarta.channels.peliculasmx generos")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '>.*?<li><a title="(.*?)" href="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = urlparse.urljoin("",match[1])
scrapedurl = scrapedurl.replace(".html","/page/0.html")
extra = scrapedurl.replace ("/page/0.html","/page/")
scrapedtitle = match[0]
#scrapedtitle = scrapedtitle.replace("","")
scrapedthumbnail = ""
scrapedplot = ""
logger.info(scrapedtitle)
if scrapedtitle=="Eroticas +18":
if config.get_setting("enableadultmode") == "true":
itemlist.append( Item(channel=__channel__, action="peliculas", title="Eroticas +18" , url="http://www.myhotamateurvideos.com" , thumbnail=scrapedthumbnail , plot=scrapedplot , extra="" , folder=True) )
else:
if scrapedtitle <> "" and len(scrapedtitle) < 20 and scrapedtitle <> "Iniciar Sesion":
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra, folder=True) )
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
def letras(item):
logger.info("pelisalacarta.channels.peliculasmx letras")
extra = item.url
itemlist = []
itemlist.append( Item(channel=__channel__, action="peliculas" , title="0-9", url="http://www.peliculasmx.net/letra/09.html", extra="http://www.peliculasmx.net/letra/09.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="A" , url="http://www.peliculasmx.net/letra/a.html", extra="http://www.peliculasmx.net/letra/a.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="B" , url="http://www.peliculasmx.net/letra/b.html", extra="http://www.peliculasmx.net/letra/b.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="C" , url="http://www.peliculasmx.net/letra/c.html", extra="http://www.peliculasmx.net/letra/c.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="D" , url="http://www.peliculasmx.net/letra/d.html", extra="http://www.peliculasmx.net/letra/d.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="E" , url="http://www.peliculasmx.net/letra/e.html", extra="http://www.peliculasmx.net/letra/e.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="F" , url="http://www.peliculasmx.net/letra/f.html", extra="http://www.peliculasmx.net/letra/f.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="G" , url="http://www.peliculasmx.net/letra/g.html", extra="http://www.peliculasmx.net/letra/g.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="H" , url="http://www.peliculasmx.net/letra/h.html", extra="http://www.peliculasmx.net/letra/h.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="I" , url="http://www.peliculasmx.net/letra/i.html", extra="http://www.peliculasmx.net/letra/i.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="J" , url="http://www.peliculasmx.net/letra/j.html", extra="http://www.peliculasmx.net/letra/j.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="K" , url="http://www.peliculasmx.net/letra/k.html", extra="http://www.peliculasmx.net/letra/k.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="L" , url="http://www.peliculasmx.net/letra/l.html", extra="http://www.peliculasmx.net/letra/l.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="M" , url="http://www.peliculasmx.net/letra/m.html", extra="http://www.peliculasmx.net/letra/m.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="N" , url="http://www.peliculasmx.net/letra/n.html", extra="http://www.peliculasmx.net/letra/n.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="O" , url="http://www.peliculasmx.net/letra/o.html", extra="http://www.peliculasmx.net/letra/o.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="P" , url="http://www.peliculasmx.net/letra/p.html", extra="http://www.peliculasmx.net/letra/p.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Q" , url="http://www.peliculasmx.net/letra/q.html", extra="http://www.peliculasmx.net/letra/q.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="R" , url="http://www.peliculasmx.net/letra/r.html", extra="http://www.peliculasmx.net/letra/r.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="S" , url="http://www.peliculasmx.net/letra/s.html", extra="http://www.peliculasmx.net/letra/s.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="T" , url="http://www.peliculasmx.net/letra/t.html", extra="http://www.peliculasmx.net/letra/t.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="U" , url="http://www.peliculasmx.net/letra/u.html", extra="http://www.peliculasmx.net/letra/u.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="V" , url="http://www.peliculasmx.net/letra/v.html", extra="http://www.peliculasmx.net/letra/v.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="W" , url="http://www.peliculasmx.net/letra/w.html", extra="http://www.peliculasmx.net/letra/w.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="X" , url="http://www.peliculasmx.net/letra/x.html", extra="http://www.peliculasmx.net/letra/x.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Y" , url="http://www.peliculasmx.net/letra/y.html", extra="http://www.peliculasmx.net/letra/y.html"))
itemlist.append( Item(channel=__channel__, action="peliculas" , title="Z" , url="http://www.peliculasmx.net/letra/z.html", extra="http://www.peliculasmx.net/letra/z.html"))
return itemlist
def peliculas(item):
logger.info("pelisalacarta.channels.peliculasmx peliculas")
extra = item.extra
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<h2 class="titpeli.*?<a href="([^"]+)" title="([^"]+)".*?peli_img_img">.*?<img src="([^"]+)".*?<strong>Idioma</strong>:.*?/>([^"]+)</div>.*?<strong>Calidad</strong>: ([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0] #urlparse.urljoin("",match[0])
scrapedtitle = match[1] + ' ['+ match[4] +']'
scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
scrapedthumbnail = match[2]
#scrapedplot = match[0]
#itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , folder=True) )
#if extra<>"":
# Extrae la marca de siguiente página
#patron = 'page=(.*?)"><span><b>'
patron = '<span><b>(.*?)</b></span>'
matches = re.compile(patron,re.DOTALL).findall(data)
#if DEBUG: scrapertools.printMatches(matches)
for match in matches:
#if len(matches)>0:
nu = int(match[0]) + 1
scrapedurl = extra + "?page=" + str(nu)
scrapedtitle = "!Pagina Siguiente ->"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , extra=extra , folder=True) )
return itemlist
def findvideos(item):
logger.info("pelisalacarta.channels.peliculasmx videos")
# Descarga la página
data = scrapertools.cachePage(item.url)
title = item.title
scrapedthumbnail = item.thumbnail
itemlist = []
patron = '<li><a href="#ms.*?">([^"]+)</a></li>.*?<iframe src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
#itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=item.url , thumbnail=scrapedthumbnail , folder=False) )
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
url = match[1]
title = "SERVIDOR: " + match[0]
itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=url , thumbnail=scrapedthumbnail , folder=False) )
return itemlist
def play(item):
logger.info("pelisalacarta.channels.peliculasmx play")
itemlist=[]
from servers import servertools
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
return itemlist
#data2 = scrapertools.cache_page(item.url)
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/mv.php?url=","http://www.megavideo.com/?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videozer.php?url=","http://www.videozer.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
#listavideos = servertools.findvideos(data2)
#for video in listavideos:
# invalid = video[1]
# invalid = invalid[0:8]
# if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
# scrapedtitle = item.title+video[0]
# videourl = item.url
# server = video[2]
# if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")
#logger.info("url=" + item.url)
# Añade al listado de XBMC
#itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
# itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=item.url, thumbnail="", plot="", server=item.url))
# return itemlist
def search(item,texto):
logger.info("pelisalacarta.channels.peliculasmx search")
itemlist = []
texto = texto.replace(" ","+")
try:
# Series
item.url="http://www.peliculasmx.net/buscar/?q=%s"
item.url = item.url % texto
item.extra = ""
itemlist.extend(peliculas(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
'''url = "http://www.peliculasaudiolatino.com/series-anime"
data = scrapertools.cachePage(url)
# Extrae las entradas de todas series
patronvideos = '<li>[^<]+'
patronvideos += '<a.+?href="([\D]+)([\d]+)">[^<]+'
patronvideos += '.*?/>(.*?)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[2].strip()
# Realiza la busqueda
if scrapedtitle.lower()==texto.lower() or texto.lower() in scrapedtitle.lower():
logger.info(scrapedtitle)
scrapedurl = urlparse.urljoin(url,(match[0]+match[1]))
scrapedthumbnail = urlparse.urljoin("http://www.peliculasaudiolatino.com/images/series/",(match[1]+".png"))
scrapedplot = ""
# Añade al listado
itemlist.append( Item(channel=__channel__, action="listacapitulos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist'''
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
mainlist_items = mainlist(Item())
novedades_items = peliculas(mainlist_items[0])
for novedades_item in novedades_items:
mirrors = findvideos( item=novedades_item )
if len(mirrors)>0:
return True
return False
| gpl-3.0 |
digitalghost/pycv-gameRobot | ToolUtils.py | 1 | 4528 | # -*- coding: utf-8 -*
import subprocess
import time
import Settings
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
class ToolUtils(object):
"""docstring for ClassName"""
def __init__(self):
super(ClassName, self).__init__()
@staticmethod
def getTouchPoint(region):
centerX = int(int(region[0]) + (int(region[2]) - int(region[0]))/2)
centerY = int(int(region[1]) + (int(region[3]) - int(region[1]))/2)
return centerX,centerY
@staticmethod
def checkTemplateExists(tplPath,snapshot):
tplPath = './templates/' + Settings.DEVICE_RESID + "/" + Settings.SCENE_NAME + '/' + tplPath
process = subprocess.Popen(['python','./cvTplMatch.py',tplPath,snapshot],stdout=subprocess.PIPE)
cmdData = process.communicate()[0]
cmdStr = str(cmdData)[:-1]
cmdRC = process.returncode
#print "!!!!!!!!!!!!"+cmdStr
if str(cmdStr) == "NULL":
return False,[]
else:
arr = cmdStr.split(",")
for idx in range(len(arr)):
arr[idx] = int(arr[idx])
return True,arr
@staticmethod
def fixRotation(snapshotPath):
process = subprocess.Popen(['python','./cvFixRotation.py',snapshotPath],stdout=subprocess.PIPE)
cmdData = process.communicate()[0]
cmdStr = str(cmdData)[:-1]
cmdRC = process.returncode
return cmdStr
@staticmethod
def takeSnapshot(device):
startTick = time.time()
screenShot = device.takeSnapshot()
endTick = time.time()
print "***TAKE SNAPSHOT DONE*** Elapse in secs:" + str(endTick-startTick)
# Writes the screenshot to a file
Settings.LATEST_SCREENSHOT_PATH = "./snapshots/s" + str(Settings.SNAP_COUNT) + ".png"
startTick = time.time()
screenShot.writeToFile(Settings.LATEST_SCREENSHOT_PATH,'png')
if Settings.FIX_ROTATION_NEEDED:
ToolUtils.fixRotation(Settings.LATEST_SCREENSHOT_PATH)
endTick = time.time()
print "***WRITE FILE DONE*** Elapse in secs:" + str(endTick-startTick)
Settings.SNAP_COUNT += 1
if Settings.SNAP_COUNT == 10:
Settings.SNAP_COUNT = 0
@staticmethod
def executeScenePath(scenePath, device):
featuresFounded = False
print "===BEGIN PATH %s LOOP===" %(scenePath.name)
while (not featuresFounded):
if scenePath.needReSnapshots:
ToolUtils.takeSnapshot(device)
# Execute the scene path
for feature in scenePath.features:
tplPath = feature
print "***FEATURE %s MATCHING***........Template Path is %s" %(feature,tplPath)
exists,region = ToolUtils.checkTemplateExists(tplPath,Settings.LATEST_SCREENSHOT_PATH)
if exists:
print "***FEATURE MATCHING SUCCEED***"
featuresFounded = True
else:
print "***FEATURE MATCHING MISSED***"
featuresFounded = False
break
if featuresFounded:
if scenePath.method != "":
print "***EXECUTING METHOD FOR %s MATCHING***........data is %s" %(scenePath.name,scenePath.method)
getCustomMethod = getattr(Settings.GAME_OBJECT, scenePath.method)
getCustomMethod(scenePath)
else:
for touch in scenePath.touches:
touchPath = touch
print "***TOUCH %s MATCHING***........Touch Path is %s" %(touch,touchPath)
exists,region = ToolUtils.checkTemplateExists(touchPath,Settings.LATEST_SCREENSHOT_PATH)
if exists:
centerX,centerY = ToolUtils.getTouchPoint(region)
print "***FEATURE FOUNDED*** Start to touch at %s" %touch
device.touch(centerX,centerY,MonkeyDevice.DOWN_AND_UP)
else:
print "---FEATURE FOUNDED, BUT TOUCH %s NOT FOUNDED, CHECK YOUR TEMPLATE CONFIGURATION---" %touch
if (featuresFounded or (not scenePath.needRepeatWhenNotFound)):
break
if not featuresFounded:
print "!!!!!Not Found"
MonkeyRunner.sleep(5)
print "===END PATH %s LOOP===\n" %(scenePath.name)
return scenePath.nextPathId
| gpl-3.0 |
mhugo/QGIS | python/core/additions/processing.py | 26 | 1417 | # -*- coding: utf-8 -*-
"""
***************************************************************************
processing.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Denis Rouzaud
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
# add some __repr__ methods to processing classes
def processing_source_repr(self):
return "<QgsProcessingFeatureSourceDefinition {{'source':{}, 'selectedFeaturesOnly': {}}}>".format(
self.source.staticValue(), self.selectedFeaturesOnly)
def processing_output_layer_repr(self):
return "<QgsProcessingOutputLayerDefinition {{'sink':{}, 'createOptions': {}}}>".format(self.sink.staticValue(),
self.createOptions)
| gpl-2.0 |
lecaoquochung/ddnb.django | tests/indexes/models.py | 46 | 1681 | from django.db import connection
from django.db import models
class CurrentTranslation(models.ForeignObject):
"""
Creates virtual relation to the translation with model cache enabled.
"""
# Avoid validation
requires_unique_target = False
def __init__(self, to, from_fields, to_fields, **kwargs):
# Disable reverse relation
kwargs['related_name'] = '+'
# Set unique to enable model cache.
kwargs['unique'] = True
super(CurrentTranslation, self).__init__(to, from_fields, to_fields, **kwargs)
class ArticleTranslation(models.Model):
article = models.ForeignKey('indexes.Article')
language = models.CharField(max_length=10, unique=True)
content = models.TextField()
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
# Add virtual relation to the ArticleTranslation model.
translation = CurrentTranslation(ArticleTranslation, ['id'], ['article'])
class Meta:
index_together = [
["headline", "pub_date"],
]
# Model for index_together being used only with single list
class IndexTogetherSingleList(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
index_together = ["headline", "pub_date"]
# Indexing a TextField on Oracle or MySQL results in index creation error.
if connection.vendor == 'postgresql':
class IndexedArticle(models.Model):
headline = models.CharField(max_length=100, db_index=True)
body = models.TextField(db_index=True)
slug = models.CharField(max_length=40, unique=True)
| bsd-3-clause |
jdreaver/vispy | examples/basics/visuals/markers.py | 17 | 2332 | # -*- coding: utf-8 -*-
# vispy: gallery 30
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
""" Display markers at different sizes and line thicknessess.
"""
import numpy as np
from vispy import app, visuals
from vispy.visuals.transforms import STTransform
n = 500
pos = np.zeros((n, 2))
colors = np.ones((n, 4), dtype=np.float32)
radius, theta, dtheta = 1.0, 0.0, 5.5 / 180.0 * np.pi
for i in range(500):
theta += dtheta
x = 256 + radius * np.cos(theta)
y = 256 + radius * np.sin(theta)
r = 10.1 - i * 0.02
radius -= 0.45
pos[i] = x, y
colors[i] = (i/500, 1.0-i/500, 0, 1)
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive', size=(512, 512),
title="Marker demo [press space to change marker]")
self.index = 0
self.markers = visuals.MarkersVisual()
self.markers.set_data(pos, face_color=colors)
self.markers.symbol = visuals.marker_types[self.index]
self.markers.transform = STTransform()
self.show()
def on_draw(self, event):
self.context.clear(color='white')
self.markers.draw()
def on_mouse_wheel(self, event):
"""Use the mouse wheel to zoom."""
self.markers.transform.zoom((1.25**event.delta[1],)*2,
center=event.pos)
self.update()
def on_resize(self, event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, self.physical_size[0], self.physical_size[1])
self.context.set_viewport(*vp)
self.markers.transforms.configure(viewport=vp, canvas=self)
def on_key_press(self, event):
if event.text == ' ':
self.index = (self.index + 1) % (len(visuals.marker_types))
self.markers.symbol = visuals.marker_types[self.index]
self.update()
elif event.text == 's':
self.markers.scaling = not self.markers.scaling
self.update()
if __name__ == '__main__':
canvas = Canvas()
app.run()
| bsd-3-clause |
kartikp1995/gnuradio | gr-uhd/examples/python/usrp_wfm_rcv_fmdet.py | 58 | 14253 | #!/usr/bin/env python
#
# Copyright 2005-2007,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio, uhd
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio.eng_option import eng_option
from gnuradio.wxgui import slider, powermate
from gnuradio.wxgui import stdgui2, fftsink2, form, scopesink2
from optparse import OptionParser
import sys
import wx
class wfm_rx_block (stdgui2.std_top_block):
def __init__(self,frame,panel,vbox,argv):
stdgui2.std_top_block.__init__ (self,frame,panel,vbox,argv)
parser=OptionParser(option_class=eng_option)
parser.add_option("-a", "--args", type="string", default="",
help="UHD device address args [default=%default]")
parser.add_option("", "--spec", type="string", default=None,
help="Subdevice of UHD device where appropriate")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-f", "--freq", type="eng_float", default=100.1e6,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-s", "--squelch", type="eng_float", default=0,
help="set squelch level (default is 0)")
parser.add_option("-V", "--volume", type="eng_float", default=None,
help="set volume (default is midpoint)")
parser.add_option("-O", "--audio-output", type="string", default="default",
help="pcm device name. E.g., hw:0,0 or surround51 or /dev/dsp")
parser.add_option("", "--freq-min", type="eng_float", default=87.9e6,
help="Set a minimum frequency [default=%default]")
parser.add_option("", "--freq-max", type="eng_float", default=108.1e6,
help="Set a maximum frequency [default=%default]")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
self.frame = frame
self.panel = panel
self.vol = 0
self.state = "FREQ"
self.freq = 0
self.fm_freq_min = options.freq_min
self.fm_freq_max = options.freq_max
# build graph
self.u = uhd.usrp_source(device_addr=options.args, stream_args=uhd.stream_args('fc32'))
# Set the subdevice spec
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
usrp_rate = 320e3
demod_rate = 320e3
audio_rate = 48e3
audio_decim = 10
self.u.set_samp_rate(usrp_rate)
dev_rate = self.u.get_samp_rate()
nfilts = 32
chan_coeffs = filter.firdes.low_pass_2(10*nfilts, # gain
nfilts*usrp_rate, # sampling rate
90e3, # passband cutoff
30e3, # transition bw
70) # stopband attenuation
rrate = usrp_rate / dev_rate
self.chan_filt = filter.pfb.arb_resampler_ccf(rrate, chan_coeffs, nfilts)
self.guts = analog.wfm_rcv_fmdet (demod_rate, audio_decim)
chan_rate = audio_rate / (demod_rate/audio_decim)
self.rchan_filt = filter.pfb.arb_resampler_fff(chan_rate)
self.lchan_filt = filter.pfb.arb_resampler_fff(chan_rate)
# FIXME rework {add,multiply}_const_* to handle multiple streams
self.volume_control_l = blocks.multiply_const_ff(self.vol)
self.volume_control_r = blocks.multiply_const_ff(self.vol)
# sound card as final sink
self.audio_sink = audio.sink(int (audio_rate),
options.audio_output,
False) # ok_to_block
# now wire it all together
self.connect(self.u, self.chan_filt, self.guts)
self.connect((self.guts, 0), self.lchan_filt,
self.volume_control_l, (self.audio_sink,0))
self.connect((self.guts, 1), self.rchan_filt,
self.volume_control_r, (self.audio_sink,1))
try:
self.guts.stereo_carrier_pll_recovery.squelch_enable(True)
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
self._build_gui(vbox, usrp_rate, demod_rate, audio_rate)
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
if options.volume is None:
g = self.volume_range()
options.volume = float(g[0]+g[1])/2
if abs(options.freq) < 1e6:
options.freq *= 1e6
frange = self.u.get_freq_range()
if(frange.start() > self.fm_freq_max or frange.stop() < self.fm_freq_min):
sys.stderr.write("Radio does not support required frequency range.\n")
sys.exit(1)
if(options.freq < self.fm_freq_min or options.freq > self.fm_freq_max):
sys.stderr.write("Requested frequency is outside of required frequency range.\n")
sys.exit(1)
# set initial values
self.set_gain(options.gain)
self.set_vol(options.volume)
try:
self.guts.stereo_carrier_pll_recovery.set_lock_threshold(options.squelch)
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
if not(self.set_freq(options.freq)):
self._set_status_msg("Failed to set initial frequency")
def _set_status_msg(self, msg, which=0):
self.frame.GetStatusBar().SetStatusText(msg, which)
def _build_gui(self, vbox, usrp_rate, demod_rate, audio_rate):
def _form_set_freq(kv):
return self.set_freq(kv['freq'])
if 1:
self.src_fft = fftsink2.fft_sink_c(self.panel, title="Data from USRP",
fft_size=512, sample_rate=usrp_rate,
ref_scale=32768.0, ref_level=0, y_divs=12)
self.connect (self.u, self.src_fft)
vbox.Add (self.src_fft.win, 4, wx.EXPAND)
if 1:
post_fm_demod_fft = fftsink2.fft_sink_f(self.panel, title="Post FM Demod",
fft_size=512, sample_rate=demod_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.fm_demod, post_fm_demod_fft)
vbox.Add (post_fm_demod_fft.win, 4, wx.EXPAND)
if 0:
post_stereo_carrier_generator_fft = fftsink2.fft_sink_c (self.panel, title="Post Stereo_carrier_generator",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.stereo_carrier_generator, post_stereo_carrier_generator_fft)
vbox.Add (post_stereo_carrier_generator_fft.win, 4, wx.EXPAND)
if 0:
post_deemphasis_left = fftsink2.fft_sink_f (self.panel, title="Post_Deemphasis_Left",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=0)
self.connect (self.guts.deemph_Left, post_deemphasis_left)
vbox.Add (post_deemphasis_left.win, 4, wx.EXPAND)
if 0:
post_deemphasis_right = fftsink2.fft_sink_f(self.panel, title="Post_Deemphasis_Right",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=-20)
self.connect (self.guts.deemph_Left, post_deemphasis_right)
vbox.Add (post_deemphasis_right.win, 4, wx.EXPAND)
if 0:
LmR_fft = fftsink2.fft_sink_f(self.panel, title="LmR",
fft_size=512, sample_rate=audio_rate,
y_per_div=10, ref_level=-20)
self.connect (self.guts.LmR_real,LmR_fft)
vbox.Add (LmR_fft.win, 4, wx.EXPAND)
if 0:
self.scope = scopesink2.scope_sink_f(self.panel, sample_rate=demod_rate)
self.connect (self.guts.fm_demod,self.scope)
vbox.Add (self.scope.win,4,wx.EXPAND)
# control area form at bottom
self.myform = myform = form.form()
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['freq'] = form.float_field(
parent=self.panel, sizer=hbox, label="Freq", weight=1,
callback=myform.check_input_and_call(_form_set_freq, self._set_status_msg))
hbox.Add((5,0), 0)
myform['freq_slider'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, weight=3,
range=(self.fm_freq_min, self.fm_freq_max, 0.1e6),
callback=self.set_freq)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add((5,0), 0)
myform['volume'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Volume",
weight=3, range=self.volume_range(),
callback=self.set_vol)
hbox.Add((5,0), 1)
g = self.u.get_gain_range()
myform['gain'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Gain",
weight=3, range=(g.start(), g.stop(), g.step()),
callback=self.set_gain)
hbox.Add((5,0), 0)
myform['sqlch_thrsh'] = \
form.quantized_slider_field(parent=self.panel, sizer=hbox, label="Stereo Squelch Threshold",
weight=3, range=(0.0,1.0,0.01),
callback=self.set_squelch)
hbox.Add((5,0), 0)
vbox.Add(hbox, 0, wx.EXPAND)
try:
self.knob = powermate.powermate(self.frame)
self.rot = 0
powermate.EVT_POWERMATE_ROTATE (self.frame, self.on_rotate)
powermate.EVT_POWERMATE_BUTTON (self.frame, self.on_button)
except:
print "FYI: No Powermate or Contour Knob found"
def on_rotate (self, event):
self.rot += event.delta
if (self.state == "FREQ"):
if self.rot >= 3:
self.set_freq(self.freq + .1e6)
self.rot -= 3
elif self.rot <=-3:
self.set_freq(self.freq - .1e6)
self.rot += 3
else:
step = self.volume_range()[2]
if self.rot >= 3:
self.set_vol(self.vol + step)
self.rot -= 3
elif self.rot <=-3:
self.set_vol(self.vol - step)
self.rot += 3
def on_button (self, event):
if event.value == 0: # button up
return
self.rot = 0
if self.state == "FREQ":
self.state = "VOL"
else:
self.state = "FREQ"
self.update_status_bar ()
def set_vol (self, vol):
g = self.volume_range()
self.vol = max(g[0], min(g[1], vol))
self.volume_control_l.set_k(10**(self.vol/10))
self.volume_control_r.set_k(10**(self.vol/10))
self.myform['volume'].set_value(self.vol)
self.update_status_bar ()
def set_squelch(self,squelch_threshold):
try:
self.guts.stereo_carrier_pll_recovery.set_lock_threshold(squelch_threshold);
except:
print "FYI: This implementation of the stereo_carrier_pll_recovery has no squelch implementation yet"
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
Args:
target_freq: frequency in Hz
@rypte: bool
"""
r = self.u.set_center_freq(target_freq)
if r:
self.freq = target_freq
self.myform['freq'].set_value(target_freq) # update displayed value
self.myform['freq_slider'].set_value(target_freq) # update displayed value
self.update_status_bar()
self._set_status_msg("OK", 0)
return True
self._set_status_msg("Failed", 0)
return False
def set_gain(self, gain):
self.myform['gain'].set_value(gain) # update displayed value
self.u.set_gain(gain)
def update_status_bar (self):
msg = "Volume:%r Setting:%s" % (self.vol, self.state)
self._set_status_msg(msg, 1)
self.src_fft.set_baseband_freq(self.freq)
def volume_range(self):
return (-20.0, 0.0, 0.5)
if __name__ == '__main__':
app = stdgui2.stdapp (wfm_rx_block, "USRP WFM RX")
app.MainLoop ()
| gpl-3.0 |
zasdfgbnm/tensorflow | tensorflow/contrib/distributions/python/ops/vector_student_t.py | 15 | 10043 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Vector Student's t distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import student_t
from tensorflow.python.ops.distributions import transformed_distribution
class _VectorStudentT(transformed_distribution.TransformedDistribution):
"""A vector version of Student's t-distribution on `R^k`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + 1)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) ( sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1)) )**k
```
where:
* `loc = mu`; a vector in `R^k`,
* `scale = Sigma`; a lower-triangular matrix in `R^{k x k}`,
* `Z` denotes the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
tfd = tf.contrib.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name):
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
# pseudocode the basic procedure is:
# if df.batch_shape is scalar:
# if affine.batch_shape is not scalar:
# # broadcast distribution.sample so
# # it has affine.batch_shape.
# self.batch_shape = affine.batch_shape
# else:
# if affine.batch_shape is scalar:
# # let affine broadcasting do its thing.
# self.batch_shape = df.batch_shape
# All of the above magic is actually handled by TransformedDistribution.
# Here we really only need to collect the affine.batch_shape and decide
# what we're going to pass in to TransformedDistribution's
# (override) batch_shape arg.
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=scale_identity_multiplier,
scale_diag=scale_diag,
scale_tril=scale_tril,
scale_perturb_factor=scale_perturb_factor,
scale_perturb_diag=scale_perturb_diag,
validate_args=validate_args)
distribution = student_t.StudentT(
df=df,
loc=array_ops.zeros([], dtype=affine.dtype),
scale=array_ops.ones([], dtype=affine.dtype))
batch_shape, override_event_shape = (
distribution_util.shapes_from_loc_and_scale(
affine.shift, affine.scale))
override_batch_shape = distribution_util.pick_vector(
distribution.is_scalar_batch(),
batch_shape,
constant_op.constant([], dtype=dtypes.int32))
super(_VectorStudentT, self).__init__(
distribution=distribution,
bijector=affine,
batch_shape=override_batch_shape,
event_shape=override_event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self.distribution.df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self.bijector.shift
@property
def scale(self):
"""Dense (batch) covariance matrix, if available."""
return self.bijector.scale
| apache-2.0 |
pedropena/iteexe | twisted/tap/manhole.py | 20 | 1714 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am the support module for making a manhole server with mktap.
"""
from twisted.manhole import service
from twisted.spread import pb
from twisted.python import usage, util
from twisted.cred import portal, checkers
from twisted.application import strports
import os, sys
class Options(usage.Options):
synopsis = "mktap manhole [options]"
optParameters = [
["user", "u", "admin", "Name of user to allow to log in"],
["port", "p", str(pb.portno), "Port to listen on"],
]
optFlags = [
["tracebacks", "T", "Allow tracebacks to be sent over the network"],
]
zsh_actions = {"user" : "_users"}
def opt_password(self, password):
"""Required. '-' will prompt or read a password from stdin.
"""
# If standard input is a terminal, I prompt for a password and
# confirm it. Otherwise, I use the first line from standard
# input, stripping off a trailing newline if there is one.
if password in ('', '-'):
self['password'] = util.getPassword(confirm=1)
else:
self['password'] = password
opt_w = opt_password
def postOptions(self):
if not self.has_key('password'):
self.opt_password('-')
def makeService(config):
port, user, password = config['port'], config['user'], config['password']
p = portal.Portal(
service.Realm(service.Service(config["tracebacks"], config.get('namespace'))),
[checkers.InMemoryUsernamePasswordDatabaseDontUse(**{user: password})]
)
return strports.service(port, pb.PBServerFactory(p, config["tracebacks"]))
| gpl-2.0 |
gerddie/nipype | nipype/interfaces/cmtk/convert.py | 14 | 10363 | """
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os, os.path as op
import datetime
import string
import warnings
import networkx as nx
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
File, TraitedSpec, InputMultiPath, isdefined)
from nipype.utils.filemanip import split_filename
from nipype.utils.misc import package_check
have_cfflib = True
try:
package_check('cfflib')
except Exception, e:
have_cfflib = False
else:
import cfflib as cf
class CFFConverterInputSpec(BaseInterfaceInputSpec):
graphml_networks = InputMultiPath(File(exists=True), desc='list of graphML networks')
gpickled_networks = InputMultiPath(File(exists=True), desc='list of gpickled Networkx graphs')
gifti_surfaces = InputMultiPath(File(exists=True), desc='list of GIFTI surfaces')
gifti_labels = InputMultiPath(File(exists=True), desc='list of GIFTI labels')
nifti_volumes = InputMultiPath(File(exists=True), desc='list of NIFTI volumes')
tract_files = InputMultiPath(File(exists=True), desc='list of Trackvis fiber files')
timeseries_files = InputMultiPath(File(exists=True), desc='list of HDF5 timeseries files')
script_files = InputMultiPath(File(exists=True), desc='list of script files to include')
data_files = InputMultiPath(File(exists=True), desc='list of external data files (i.e. Numpy, HD5, XML) ')
title = traits.Str(desc='Connectome Title')
creator = traits.Str(desc='Creator')
email = traits.Str(desc='Email address')
publisher = traits.Str(desc='Publisher')
license = traits.Str(desc='License')
rights = traits.Str(desc='Rights')
references = traits.Str(desc='References')
relation = traits.Str(desc='Relation')
species = traits.Str('Homo sapiens',desc='Species',usedefault=True)
description = traits.Str('Created with the Nipype CFF converter', desc='Description', usedefault=True)
out_file = File('connectome.cff', usedefault = True, desc='Output connectome file')
class CFFConverterOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output connectome file')
class CFFConverter(BaseInterface):
"""
Creates a Connectome File Format (CFF) file from input networks, surfaces, volumes, tracts, etcetera....
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> cvt = cmtk.CFFConverter()
>>> cvt.inputs.title = 'subject 1'
>>> cvt.inputs.gifti_surfaces = ['lh.pial_converted.gii', 'rh.pial_converted.gii']
>>> cvt.inputs.tract_files = ['streamlines.trk']
>>> cvt.inputs.gpickled_networks = ['network0.gpickle']
>>> cvt.run() # doctest: +SKIP
"""
input_spec = CFFConverterInputSpec
output_spec = CFFConverterOutputSpec
def _run_interface(self, runtime):
a = cf.connectome()
if isdefined(self.inputs.title):
a.connectome_meta.set_title(self.inputs.title)
else:
a.connectome_meta.set_title(self.inputs.out_file)
if isdefined(self.inputs.creator):
a.connectome_meta.set_creator(self.inputs.creator)
else:
#Probably only works on some OSes...
a.connectome_meta.set_creator(os.getenv('USER'))
if isdefined(self.inputs.email):
a.connectome_meta.set_email(self.inputs.email)
if isdefined(self.inputs.publisher):
a.connectome_meta.set_publisher(self.inputs.publisher)
if isdefined(self.inputs.license):
a.connectome_meta.set_license(self.inputs.license)
if isdefined(self.inputs.rights):
a.connectome_meta.set_rights(self.inputs.rights)
if isdefined(self.inputs.references):
a.connectome_meta.set_references(self.inputs.references)
if isdefined(self.inputs.relation):
a.connectome_meta.set_relation(self.inputs.relation)
if isdefined(self.inputs.species):
a.connectome_meta.set_species(self.inputs.species)
if isdefined(self.inputs.description):
a.connectome_meta.set_description(self.inputs.description)
a.connectome_meta.set_created(datetime.date.today())
count = 0
if isdefined(self.inputs.graphml_networks):
for ntwk in self.inputs.graphml_networks:
# There must be a better way to deal with the unique name problem
#(i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files)
ntwk_name = 'Network {cnt}'.format(cnt=count)
a.add_connectome_network_from_graphml(ntwk_name, ntwk)
count += 1
if isdefined(self.inputs.gpickled_networks):
unpickled = []
for ntwk in self.inputs.gpickled_networks:
_, ntwk_name, _ = split_filename(ntwk)
unpickled = nx.read_gpickle(ntwk)
cnet = cf.CNetwork(name = ntwk_name)
cnet.set_with_nxgraph(unpickled)
a.add_connectome_network(cnet)
count += 1
count = 0
if isdefined(self.inputs.tract_files):
for trk in self.inputs.tract_files:
_, trk_name, _ = split_filename(trk)
ctrack = cf.CTrack(trk_name, trk)
a.add_connectome_track(ctrack)
count += 1
count = 0
if isdefined(self.inputs.gifti_surfaces):
for surf in self.inputs.gifti_surfaces:
_, surf_name, _ = split_filename(surf)
csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % (count,surf_name), surf)
csurf.fileformat='Gifti'
csurf.dtype='Surfaceset'
a.add_connectome_surface(csurf)
count += 1
count = 0
if isdefined(self.inputs.gifti_labels):
for label in self.inputs.gifti_labels:
_, label_name, _ = split_filename(label)
csurf = cf.CSurface.create_from_gifti("Surface Label %d - %s" % (count,label_name), label)
csurf.fileformat='Gifti'
csurf.dtype='Labels'
a.add_connectome_surface(csurf)
count += 1
if isdefined(self.inputs.nifti_volumes):
for vol in self.inputs.nifti_volumes:
_, vol_name, _ = split_filename(vol)
cvol = cf.CVolume.create_from_nifti(vol_name,vol)
a.add_connectome_volume(cvol)
if isdefined(self.inputs.script_files):
for script in self.inputs.script_files:
_, script_name, _ = split_filename(script)
cscript = cf.CScript.create_from_file(script_name, script)
a.add_connectome_script(cscript)
if isdefined(self.inputs.data_files):
for data in self.inputs.data_files:
_, data_name, _ = split_filename(data)
cda = cf.CData(name=data_name, src=data, fileformat='NumPy')
if not string.find(data_name,'lengths') == -1:
cda.dtype = 'FinalFiberLengthArray'
if not string.find(data_name,'endpoints') == -1:
cda.dtype = 'FiberEndpoints'
if not string.find(data_name,'labels') == -1:
cda.dtype = 'FinalFiberLabels'
a.add_connectome_data(cda)
a.print_summary()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(a,op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
class MergeCNetworksInputSpec(BaseInterfaceInputSpec):
in_files = InputMultiPath(File(exists=True), mandatory=True, desc='List of CFF files to extract networks from')
out_file = File('merged_network_connectome.cff', usedefault = True, desc='Output CFF file with all the networks added')
class MergeCNetworksOutputSpec(TraitedSpec):
connectome_file = File(exists=True, desc='Output CFF file with all the networks added')
class MergeCNetworks(BaseInterface):
""" Merges networks from multiple CFF files into one new CFF file.
Example
-------
>>> import nipype.interfaces.cmtk as cmtk
>>> mrg = cmtk.MergeCNetworks()
>>> mrg.inputs.in_files = ['subj1.cff','subj2.cff']
>>> mrg.run() # doctest: +SKIP
"""
input_spec = MergeCNetworksInputSpec
output_spec = MergeCNetworksOutputSpec
def _run_interface(self, runtime):
extracted_networks = []
for i, con in enumerate(self.inputs.in_files):
mycon = cf.load(con)
nets = mycon.get_connectome_network()
for ne in nets:
# here, you might want to skip networks with a given
# metadata information
ne.load()
contitle = mycon.get_connectome_meta().get_title()
ne.set_name( str(i) + ': ' + contitle + ' - ' + ne.get_name() )
ne.set_src(ne.get_name())
extracted_networks.append(ne)
# Add networks to new connectome
newcon = cf.connectome(title = 'All CNetworks', connectome_network = extracted_networks)
# Setting additional metadata
metadata = newcon.get_connectome_meta()
metadata.set_creator('My Name')
metadata.set_email('My Email')
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
cf.save_to_cff(newcon, op.abspath(name + ext))
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
_, name, ext = split_filename(self.inputs.out_file)
if not ext == '.cff':
ext = '.cff'
outputs['connectome_file'] = op.abspath(name + ext)
return outputs
| bsd-3-clause |
cancro7/gem5 | util/pbs/job.py | 77 | 7221 | #!/usr/bin/env python
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Steve Reinhardt
# Ali Saidi
import os, os.path, shutil, signal, socket, sys
from os import environ as env
from os.path import join as joinpath, expanduser
def date():
import time
return time.strftime('%a %b %e %H:%M:%S %Z %Y', time.localtime())
def cleandir(dir):
for root, dirs, files in os.walk(dir, False):
for name in files:
os.remove(joinpath(root, name))
for name in dirs:
os.rmdir(joinpath(root, name))
class rsync:
def __init__(self):
self.sudo = False
self.rsync = 'rsync'
self.compress = False
self.archive = True
self.delete = False
self.options = ''
def do(self, src, dst):
args = []
if self.sudo:
args.append('sudo')
args.append(self.rsync)
if (self.archive):
args.append('-a')
if (self.compress):
args.append('-z')
if (self.delete):
args.append('--delete')
if len(self.options):
args.append(self.options)
args.append(src)
args.append(dst)
return os.spawnvp(os.P_WAIT, args[0], args)
class JobDir(object):
def __init__(self, dir):
self.dir = dir
def file(self, filename):
return joinpath(self.dir, filename)
def create(self):
if os.path.exists(self.dir):
if not os.path.isdir(self.dir):
sys.exit('%s is not a directory. Cannot build job' % self.dir)
else:
os.mkdir(self.dir)
def exists(self):
return os.path.isdir(self.dir)
def clean(self):
cleandir(self.dir)
def hasfile(self, filename):
return os.path.isfile(self.file(filename))
def echofile(self, filename, string):
filename = self.file(filename)
try:
f = file(filename, 'w')
print >>f, string
f.flush()
f.close()
except IOError,e:
sys.exit(e)
def rmfile(self, filename):
filename = self.file(filename)
if os.path.isfile(filename):
os.unlink(filename)
def readval(self, filename):
filename = self.file(filename)
f = file(filename, 'r')
value = f.readline().strip()
f.close()
return value
def setstatus(self, string):
filename = self.file('.status')
try:
f = file(filename, 'a')
print >>f, string
f.flush()
f.close()
except IOError,e:
sys.exit(e)
def getstatus(self):
filename = self.file('.status')
try:
f = file(filename, 'r')
except IOError, e:
return 'none'
# fast forward to the end
for line in f: pass
# the first word on the last line is the status
return line.split(' ')[0]
def __str__(self):
return self.dir
if __name__ == '__main__':
rootdir = env.setdefault('ROOTDIR', os.getcwd())
pbs_jobid = env['PBS_JOBID']
pbs_jobname = env['PBS_JOBNAME']
basedir = joinpath(rootdir, 'Base')
jobname = env.setdefault('JOBNAME', pbs_jobname)
jobfile = env.setdefault('JOBFILE', joinpath(rootdir, 'Test.py'))
outdir = env.setdefault('OUTPUT_DIR', joinpath(rootdir, jobname))
env['POOLJOB'] = 'True'
if os.path.isdir("/work"):
workbase = "/work"
else:
workbase = "/tmp/"
workdir = joinpath(workbase, '%s.%s' % (env['USER'], pbs_jobid))
host = socket.gethostname()
os.umask(0022)
jobdir = JobDir(outdir)
started = date()
jobdir.echofile('.running', started)
jobdir.rmfile('.queued')
jobdir.echofile('.host', host)
jobdir.setstatus('running on %s on %s' % (host, started))
if os.path.isdir(workdir):
cleandir(workdir)
else:
os.mkdir(workdir)
if False and os.path.isdir('/z/dist'):
sync = rsync()
sync.delete = True
sync.sudo = True
sync.do('poolfs::dist/m5/', '/z/dist/m5/')
try:
os.chdir(workdir)
except OSError,e:
sys.exit(e)
os.symlink(jobdir.file('output'), 'status.out')
args = [ joinpath(basedir, 'm5'), joinpath(basedir, 'run.py') ]
if not len(args):
sys.exit("no arguments")
print 'starting job... %s' % started
print ' '.join(args)
print
sys.stdout.flush()
childpid = os.fork()
if not childpid:
# Execute command
sys.stdin.close()
fd = os.open(jobdir.file("output"),
os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(fd, sys.stdout.fileno())
os.dup2(fd, sys.stderr.fileno())
os.execvp(args[0], args)
def handler(signum, frame):
if childpid != 0:
os.kill(childpid, signum)
signal.signal(signal.SIGHUP, handler)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGCONT, handler)
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGUSR2, handler)
done = 0
while not done:
try:
thepid,ec = os.waitpid(childpid, 0)
if ec:
print 'Exit code ', ec
status = 'failure'
else:
status = 'success'
done = 1
except OSError:
pass
complete = date()
print '\njob complete... %s' % complete
jobdir.echofile('.%s' % status, complete)
jobdir.rmfile('.running')
jobdir.setstatus('%s on %s' % (status, complete))
| bsd-3-clause |
scizen9/kpy | NPK/Bar.py | 1 | 1338 | """
Create a console bar indicating the amount of time that's passed
"""
import sys
def setup(toolbar_width=40):
""" Initialize console with toolbar_width spaces between [ ]
Args:
toolbar_width: Number of spaces between the brackets
Returns:
toolbar_width [int]"""
# toolbar_width = 40
global n_bar, n_done, upchar
n_bar = toolbar_width
n_done = 0
upchar = '-'
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1))
return toolbar_width
def update(char='-'):
"""Prints char to indicate an update on the toolbar
Args:
char: The character to print"""
global n_bar, n_done, upchar
upchar = char
if n_done < n_bar:
sys.stdout.write(char)
sys.stdout.flush()
n_done += 1
def done(mapped=False):
"""Carriage return and flush the console"""
global n_bar, n_done, upchar
if mapped:
sys.stdout.write("\r")
sys.stdout.write("[%s]" % (upchar * n_bar))
sys.stdout.write("\n")
sys.stdout.flush()
else:
while n_done < n_bar:
sys.stdout.write(upchar)
sys.stdout.flush()
n_done += 1
sys.stdout.write("]")
sys.stdout.write("\n")
sys.stdout.flush()
| gpl-2.0 |
plotly/python-api | packages/python/plotly/plotly/graph_objs/histogram2d/colorbar/_title.py | 2 | 6722 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2d.colorbar"
_path_str = "histogram2d.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2d.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.histogram2d.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2d.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2d.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2d.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
patochectp/navitia | source/jormungandr/jormungandr/interfaces/v1/ResourceUri.py | 2 | 9951 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from flask_restful import abort
from jormungandr.interfaces.v1.converters_collection_type import collections_to_resource_type
from jormungandr.interfaces.v1.converters_collection_type import resource_type_to_collection
from jormungandr.interfaces.v1.StatedResource import StatedResource
from jormungandr.interfaces.v1.make_links import add_id_links, clean_links, add_pagination_links
from functools import wraps
from collections import deque
from flask import url_for
from flask_restful.utils import unpack
from jormungandr.authentication import authentication_required
from six.moves import map
def protect(uri):
"""
we protect the uri so there can be special character in them
"""
return '"' + uri.replace('"', '\\"') + '"'
class ResourceUri(StatedResource):
def __init__(self, authentication=True, links=True, *args, **kwargs):
StatedResource.__init__(self, *args, **kwargs)
self.region = None
if links:
self.get_decorators.append(add_id_links())
self.get_decorators.append(add_computed_resources(self))
self.get_decorators.append(add_pagination_links())
self.get_decorators.append(clean_links())
if authentication:
# some rare API (eg journey) must handle the authenfication by themself, thus deactivate it
# by default ALWAYS use authentication=True
self.get_decorators.append(authentication_required)
def get_filter(self, items, args):
# handle headsign
if args.get("headsign"):
f = u"vehicle_journey.has_headsign({})".format(protect(args["headsign"]))
if args.get("filter"):
args["filter"] = '({}) and {}'.format(args["filter"], f)
else:
args["filter"] = f
filter_list = ['({})'.format(args["filter"])] if args.get("filter") else []
type_ = None
for item in items:
if not type_:
if item != "coord":
if item == "calendars":
type_ = 'calendar'
else:
if item not in collections_to_resource_type:
abort(400, message="unknown type: {}".format(item))
type_ = collections_to_resource_type[item]
else:
type_ = "coord"
else:
if type_ == "coord" or type_ == "address":
splitted_coord = item.split(";")
if len(splitted_coord) == 2:
lon, lat = splitted_coord
object_type = "stop_point"
if self.collection == "pois":
object_type = "poi"
filter_ = '{obj}.coord DWITHIN({lon},{lat},{distance})'.format(
obj=object_type, lon=lon, lat=lat, distance=args.get('distance', 200)
)
filter_list.append(filter_)
else:
filter_list.append(type_ + ".uri=" + protect(item))
else:
filter_list.append(type_ + ".uri=" + protect(item))
type_ = None
# handle tags
tags = args.get("tags[]", [])
if tags:
filter_list.append('disruption.tags({})'.format(' ,'.join([protect(t) for t in tags])))
return " and ".join(filter_list)
class add_computed_resources(object):
def __init__(self, resource):
self.resource = resource
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
response = f(*args, **kwargs)
if isinstance(response, tuple):
data, code, header = unpack(response)
else:
data = response
if 'links' not in data:
return response
collection = None
kwargs["_external"] = True
templated = True
for key in data:
if key == 'disruptions' and collection is not None:
# disruption is a special case since it can be present in all responses
continue
if key in collections_to_resource_type:
collection = key
if key in resource_type_to_collection:
collection = resource_type_to_collection[key]
if collection is None:
return response
kwargs["uri"] = collection + '/'
if "id" in kwargs:
kwargs["uri"] += kwargs["id"]
del kwargs["id"]
templated = False
else:
kwargs["uri"] += '{' + collection + ".id}"
if (
collection in ['stop_areas', 'stop_points', 'lines', 'routes', 'addresses']
and "region" in kwargs
):
for api in ['route_schedules', 'stop_schedules', 'arrivals', 'departures', "places_nearby"]:
data['links'].append(
{"href": url_for("v1." + api, **kwargs), "rel": api, "type": api, "templated": templated}
)
if collection in ['stop_areas', 'stop_points', 'addresses']:
data['links'].append(
{
"href": url_for("v1.journeys", **kwargs),
"rel": "journeys",
"type": "journey",
"templated": templated,
}
)
# for lines we add the link to the calendars
if 'region' in kwargs:
if collection == 'lines':
data['links'].append(
{
"href": url_for("v1.calendars", **kwargs),
"rel": "calendars",
"type": "calendar",
"templated": templated,
}
)
if collection in ['stop_areas', 'lines', 'networks']:
data['links'].append(
{
"href": url_for("v1.traffic_reports", **kwargs),
"rel": "disruptions",
"type": "disruption",
"templated": templated,
}
)
if isinstance(response, tuple):
return data, code, header
else:
return data
return wrapper
class complete_links(object):
# This list should not change
EXPECTED_ITEMS = set(['category', 'id', 'internal', 'rel', 'type'])
def __init__(self, resource):
self.resource = resource
def make_and_get_link(self, elem, collect):
if collect == "notes":
return {"id": elem['id'], "category": elem['category'], "value": elem['value'], "type": collect}
type_ = "Add" if elem['except_type'] == 0 else "Remove"
return {"id": elem['id'], "date": elem['date'], "type": type_}
def get_links(self, data):
queue = deque()
result = {"notes": [], "exceptions": []}
queue.extend(list(data.values()))
while queue:
elem = queue.pop()
if isinstance(elem, (list, tuple)):
queue.extend(elem)
elif hasattr(elem, 'keys'):
collect = elem.get('type')
if collect in result:
link = self.make_and_get_link(elem, collect)
if link.get('id') not in [l.get('id') for l in result[collect]]:
result[collect].append(link)
# Delete all items from link not in expected_keys
del_keys = set(elem.keys()).difference(self.EXPECTED_ITEMS)
if len(del_keys):
list(map(elem.pop, del_keys))
else:
queue.extend(list(elem.values()))
return result
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
objects = f(*args, **kwargs)
if isinstance(objects, tuple):
data, code, header = unpack(objects)
else:
data = objects
if self.resource.region:
# Add notes and exceptions
data.update(self.get_links(data))
if isinstance(objects, tuple):
return data, code, header
else:
return data
return wrapper
| agpl-3.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/asyncio/base_subprocess.py | 63 | 6135 | import collections
import subprocess
from . import protocols
from . import transports
from .coroutines import coroutine
from .log import logger
class BaseSubprocessTransport(transports.SubprocessTransport):
def __init__(self, loop, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
super().__init__(extra)
self._protocol = protocol
self._loop = loop
self._pid = None
self._pipes = {}
if stdin == subprocess.PIPE:
self._pipes[0] = None
if stdout == subprocess.PIPE:
self._pipes[1] = None
if stderr == subprocess.PIPE:
self._pipes[2] = None
self._pending_calls = collections.deque()
self._finished = False
self._returncode = None
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=bufsize, **kwargs)
self._pid = self._proc.pid
self._extra['subprocess'] = self._proc
if self._loop.get_debug():
if isinstance(args, (bytes, str)):
program = args
else:
program = args[0]
logger.debug('process %r created: pid %s',
program, self._pid)
def __repr__(self):
info = [self.__class__.__name__, 'pid=%s' % self._pid]
if self._returncode is not None:
info.append('returncode=%s' % self._returncode)
stdin = self._pipes.get(0)
if stdin is not None:
info.append('stdin=%s' % stdin.pipe)
stdout = self._pipes.get(1)
stderr = self._pipes.get(2)
if stdout is not None and stderr is stdout:
info.append('stdout=stderr=%s' % stdout.pipe)
else:
if stdout is not None:
info.append('stdout=%s' % stdout.pipe)
if stderr is not None:
info.append('stderr=%s' % stderr.pipe)
return '<%s>' % ' '.join(info)
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
raise NotImplementedError
def _make_write_subprocess_pipe_proto(self, fd):
raise NotImplementedError
def _make_read_subprocess_pipe_proto(self, fd):
raise NotImplementedError
def close(self):
for proto in self._pipes.values():
proto.pipe.close()
if self._returncode is None:
self.terminate()
def get_pid(self):
return self._pid
def get_returncode(self):
return self._returncode
def get_pipe_transport(self, fd):
if fd in self._pipes:
return self._pipes[fd].pipe
else:
return None
def send_signal(self, signal):
self._proc.send_signal(signal)
def terminate(self):
self._proc.terminate()
def kill(self):
self._proc.kill()
@coroutine
def _post_init(self):
proc = self._proc
loop = self._loop
if proc.stdin is not None:
_, pipe = yield from loop.connect_write_pipe(
lambda: WriteSubprocessPipeProto(self, 0),
proc.stdin)
self._pipes[0] = pipe
if proc.stdout is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 1),
proc.stdout)
self._pipes[1] = pipe
if proc.stderr is not None:
_, pipe = yield from loop.connect_read_pipe(
lambda: ReadSubprocessPipeProto(self, 2),
proc.stderr)
self._pipes[2] = pipe
assert self._pending_calls is not None
self._loop.call_soon(self._protocol.connection_made, self)
for callback, data in self._pending_calls:
self._loop.call_soon(callback, *data)
self._pending_calls = None
def _call(self, cb, *data):
if self._pending_calls is not None:
self._pending_calls.append((cb, data))
else:
self._loop.call_soon(cb, *data)
def _pipe_connection_lost(self, fd, exc):
self._call(self._protocol.pipe_connection_lost, fd, exc)
self._try_finish()
def _pipe_data_received(self, fd, data):
self._call(self._protocol.pipe_data_received, fd, data)
def _process_exited(self, returncode):
assert returncode is not None, returncode
assert self._returncode is None, self._returncode
if self._loop.get_debug():
logger.info('%r exited with return code %r',
self, returncode)
self._returncode = returncode
self._call(self._protocol.process_exited)
self._try_finish()
def _try_finish(self):
assert not self._finished
if self._returncode is None:
return
if all(p is not None and p.disconnected
for p in self._pipes.values()):
self._finished = True
self._loop.call_soon(self._call_connection_lost, None)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._proc = None
self._protocol = None
self._loop = None
class WriteSubprocessPipeProto(protocols.BaseProtocol):
def __init__(self, proc, fd):
self.proc = proc
self.fd = fd
self.pipe = None
self.disconnected = False
def connection_made(self, transport):
self.pipe = transport
def __repr__(self):
return ('<%s fd=%s pipe=%r>'
% (self.__class__.__name__, self.fd, self.pipe))
def connection_lost(self, exc):
self.disconnected = True
self.proc._pipe_connection_lost(self.fd, exc)
def pause_writing(self):
self.proc._protocol.pause_writing()
def resume_writing(self):
self.proc._protocol.resume_writing()
class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
protocols.Protocol):
def data_received(self, data):
self.proc._pipe_data_received(self.fd, data)
| lgpl-3.0 |
cccfran/sympy | sympy/physics/hep/gamma_matrices.py | 19 | 30764 | from sympy import S
from sympy.tensor.tensor import TensorIndexType, TensorIndex,\
TensMul, TensorHead, tensorsymmetry, TensorType,\
TensAdd, tensor_mul, get_lines
from sympy.core.containers import Tuple
DiracSpinorIndex = TensorIndexType('DiracSpinorIndex', dim=4, dummy_fmt="S")
class _LorentzContainer(object):
"""
Helper to collect LorentzIndex indices in various dimensions.
It collects LorentzIndex TensorIndexType that have been implemented in the code,
and stores them in a dict()
"""
lorentz_types = dict()
def __new__(cls, dim=4, eps_dim=None, dummy_fmt="L"):
if (dim, eps_dim) in _LorentzContainer.lorentz_types:
return _LorentzContainer.lorentz_types[(dim, eps_dim)]
new_L = TensorIndexType("LorentzIndex", dim=dim, eps_dim=eps_dim, dummy_fmt=dummy_fmt)
_LorentzContainer.lorentz_types[(dim, eps_dim)] = new_L
return new_L
class GammaMatrixHead(TensorHead):
r"""
Class to wrap a ``TensorHead`` for gamma matrices.
``dim`` dimension of the gamma matrix.
``eps_dim`` correction for dimensional regularization, use None if not needed.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrixHead
>>> from sympy.tensor.tensor import tensor_indices
>>> G = GammaMatrixHead()
>>> i = tensor_indices('i', G.LorentzIndex)
>>> G(i)
gamma(i, auto_left, -auto_right)
Note that there is already an instance of GammaMatrixHead in four dimensions:
GammaMatrix, which is simply declare as
``GammaMatrix = GammaMatrixHead()``
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix
>>> from sympy.tensor.tensor import tensor_indices
>>> i = tensor_indices('i', GammaMatrix.LorentzIndex)
>>> GammaMatrix(i)
gamma(i, auto_left, -auto_right)
To access the metric tensor
>>> GammaMatrix.LorentzIndex.metric
metric(LorentzIndex,LorentzIndex)
"""
_gmhd = dict()
def __new__(cls, dim=4, eps_dim=4):
key = (dim, eps_dim)
if key in GammaMatrixHead._gmhd:
return GammaMatrixHead._gmhd[key]
lorentz = _LorentzContainer(*key)
gmh = TensorHead.__new__(cls, "gamma", TensorType(Tuple(lorentz, DiracSpinorIndex, DiracSpinorIndex), tensorsymmetry([1], [1], [1])), comm=2, matrix_behavior=True)
GammaMatrixHead._gmhd[key] = gmh
gmh.LorentzIndex = lorentz
return gmh
@staticmethod
def extract_type_tens(expression):
"""
Extract from a ``TensExpr`` all elements of this type.
Returns two tensor expressions:
* the first contains all ``TensorHead`` of this type.
* the second contains all remaining.
"""
sp = expression.split()
# Collect all gamma matrices of the same dimension
new_expr = S.One
residual_expr = S.One
for i in sp:
if isinstance(i.args[1][0], GammaMatrixHead):
new_expr *= i
else:
residual_expr *= i
return new_expr, residual_expr
@staticmethod
def simplify_this_type(expression):
extracted_expr, residual_expr = GammaMatrixHead.extract_type_tens(expression)
res_expr = GammaMatrixHead._simplify_single_line(extracted_expr)
return res_expr * residual_expr
@staticmethod
def simplify_gpgp(ex, sort=True):
"""
simplify products ``G(i)*p(-i)*G(j)*p(-j) -> p(i)*p(-i)``
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> G.simplify_gpgp(ps*qs*qs)
gamma(-L_0, auto_left, -auto_right)*p(L_0)*q(L_1)*q(-L_1)
"""
def _simplify_gpgp(ex):
tids = ex._tids
components = tids.components
a = []
for i in range(len(components)):
if not isinstance(components[i], GammaMatrixHead):
continue
dum = tids.dum
for dx in dum:
if dx[2] == i:
p_pos1 = dx[3]
elif dx[3] == i:
p_pos1 = dx[2]
else:
continue
comp1 = components[p_pos1]
if comp1.comm == 0 and comp1.rank == 1:
a.append((i, p_pos1))
if not a:
return ex
elim = set()
tv = []
hit = True
coeff = S.One
ta = None
while hit:
hit = False
for i, ai in enumerate(a[:-1]):
if ai[0] in elim:
continue
if ai[0] != a[i + 1][0] - 1:
continue
if components[ai[1]] != components[a[i + 1][1]]:
continue
elim.add(ai[0])
elim.add(ai[1])
elim.add(a[i + 1][0])
elim.add(a[i + 1][1])
if not ta:
ta = ex.split()
mu = TensorIndex('mu', GammaMatrix.LorentzIndex)
ind1 = ta[ai[0]].args[-1][1]
ind2 = ta[ai[0] + 1].args[-1][2]
hit = True
if i == 0:
coeff = ex.coeff
tx = components[ai[1]](mu)*components[ai[1]](-mu)
tv.append(tx*DiracSpinorIndex.delta(ind1, ind2))
break
if tv:
a = [x for j, x in enumerate(ta) if j not in elim]
a.extend(tv)
t = tensor_mul(*a)*coeff
t = t.contract_metric(DiracSpinorIndex.delta)
return t
else:
return ex
if sort:
ex = ex.sorted_components()
while 1:
t = _simplify_gpgp(ex)
if t != ex:
ex = t
else:
return t
@staticmethod
def simplify_lines(ex):
"""
simplify a product of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix, DiracSpinorIndex
>>> from sympy.tensor.tensor import tensor_indices
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', GammaMatrix.LorentzIndex)
>>> s0,s1,s2,s3,s4,s5,s6,s7 = tensor_indices('s0:8', DiracSpinorIndex)
>>> G = GammaMatrix
>>> t = G(i1,s1,-s2)*G(i4,s7,-s6)*G(i2,s2,-s3)*G(i3,s4,-s5)*G(i5,s6,-s7)
>>> G.simplify_lines(t)
4*gamma(i3, s4, -s5)*gamma(i1, s1, -S_0)*gamma(i2, S_0, -s3)*metric(i4, i5)
"""
lines, traces, rest = get_lines(ex, DiracSpinorIndex)
a = ex.split()
trest = tensor_mul(*[x for i, x in enumerate(a) if i in rest])
tlines = []
for line in lines:
first = a[line[0]]
last = a[line[-1]]
first = [x[0] for x in first.free if x[1] == 1][0]
last = [x[0] for x in last.free if x[1] == 2][0]
tx = tensor_mul(*[x for i, x in enumerate(a) if i in line])
tx1 = GammaMatrixHead._simplify_single_line(tx)
tlines.append(tx1)
traces = [GammaMatrix._trace_single_line(tensor_mul(*[x for i, x in enumerate(a) if i in line])) for line in traces]
res = tensor_mul(*([trest] + tlines + traces))
return res
def gamma_trace(self, t):
"""
trace of a single line of gamma matrices
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p, q = tensorhead('p, q', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> ps = p(i0)*G(-i0)
>>> qs = q(i0)*G(-i0)
>>> G.gamma_trace(G(i0)*G(i1))
4*metric(i0, i1)
>>> G.gamma_trace(ps*ps) - 4*p(i0)*p(-i0)
0
>>> G.gamma_trace(ps*qs + ps*ps) - 4*p(i0)*p(-i0) - 4*p(i0)*q(-i0)
0
"""
#assert any(x == DiracSpinorIndex.auto_right for x, p, c, in t._tids.free)
if isinstance(t, TensAdd):
res = TensAdd(*[self._trace_single_line(x) for x in t.args])
return res
t = self._simplify_single_line(t)
res = self._trace_single_line(t)
return res
@staticmethod
def _simplify_single_line(expression):
"""
Simplify single-line product of gamma matrices.
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, DiracSpinorIndex as DS
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p = tensorhead('p', [G.LorentzIndex], [[1]])
>>> i0,i1 = tensor_indices('i0:2', G.LorentzIndex)
>>> G._simplify_single_line(G(i0)*G(i1)*p(-i1)*G(-i0)) + 2*G(i0)*p(-i0)
0
"""
t1, t2 = GammaMatrixHead.extract_type_tens(expression)
if t1 != 1:
t1 = GammaMatrixHead._kahane_simplify(t1.coeff, t1._tids)
res = t1*t2
return res
def _trace_single_line(self, t):
"""
Evaluate the trace of a single gamma matrix line inside a ``TensExpr``.
Notes
=====
If there are ``DiracSpinorIndex.auto_left`` and ``DiracSpinorIndex.auto_right``
indices trace over them; otherwise traces are not implied (explain)
Examples
========
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G
>>> from sympy.tensor.tensor import tensor_indices, tensorhead
>>> p = tensorhead('p', [G.LorentzIndex], [[1]])
>>> i0,i1,i2,i3,i4,i5 = tensor_indices('i0:6', G.LorentzIndex)
>>> G._trace_single_line(G(i0)*G(i1))
4*metric(i0, i1)
>>> G._trace_single_line(G(i0)*p(-i0)*G(i1)*p(-i1)) - 4*p(i0)*p(-i0)
0
"""
def _trace_single_line1(t):
t = t.sorted_components()
components = t.components
ncomps = len(components)
g = self.LorentzIndex.metric
sg = DiracSpinorIndex.delta
# gamma matirices are in a[i:j]
hit = 0
for i in range(ncomps):
if isinstance(components[i], GammaMatrixHead):
hit = 1
break
for j in range(i + hit, ncomps):
if not isinstance(components[j], GammaMatrixHead):
break
else:
j = ncomps
numG = j - i
if numG == 0:
spinor_free = [_[0] for _ in t._tids.free if _[0].tensortype is DiracSpinorIndex]
tcoeff = t.coeff
if spinor_free == [DiracSpinorIndex.auto_left, -DiracSpinorIndex.auto_right]:
t = t*DiracSpinorIndex.delta(-DiracSpinorIndex.auto_left, DiracSpinorIndex.auto_right)
t = t.contract_metric(sg)
return t/tcoeff if tcoeff else t
else:
return t/tcoeff if tcoeff else t
if numG % 2 == 1:
return TensMul.from_data(S.Zero, [], [], [])
elif numG > 4:
t = t.substitute_indices((-DiracSpinorIndex.auto_right, -DiracSpinorIndex.auto_index), (DiracSpinorIndex.auto_left, DiracSpinorIndex.auto_index))
a = t.split()
ind1, lind1, rind1 = a[i].args[-1]
ind2, lind2, rind2 = a[i + 1].args[-1]
aa = a[:i] + a[i + 2:]
t1 = tensor_mul(*aa)*g(ind1, ind2)*sg(lind1, rind1)*sg(lind2, rind2)
t1 = t1.contract_metric(g)
t1 = t1.contract_metric(sg)
args = [t1]
sign = 1
for k in range(i + 2, j):
sign = -sign
ind2, lind2, rind2 = a[k].args[-1]
aa = a[:i] + a[i + 1:k] + a[k + 1:]
t2 = sign*tensor_mul(*aa)*g(ind1, ind2)*sg(lind1, rind1)*sg(lind2, rind2)
t2 = t2.contract_metric(g)
t2 = t2.contract_metric(sg)
t2 = GammaMatrixHead.simplify_gpgp(t2, False)
args.append(t2)
t3 = TensAdd(*args)
#aa = _tensorlist_contract_metric(aa, g(ind1, ind2))
#t3 = t3.canon_bp()
t3 = self._trace_single_line(t3)
return t3
else:
a = t.split()
if len(t.components) == 1:
if t.components[0] is DiracSpinorIndex.delta:
return 4 # FIXME only for D=4
t1 = self._gamma_trace1(*a[i:j])
a2 = a[:i] + a[j:]
t2 = tensor_mul(*a2)
t3 = t1*t2
if not t3:
return t3
t3 = t3.contract_metric(g)
return t3
if isinstance(t, TensAdd):
a = [x.coeff*_trace_single_line1(x) for x in t.args]
return TensAdd(*a)
elif isinstance(t, TensMul):
r = t.coeff*_trace_single_line1(t)
return r
else:
return t
def _gamma_trace1(self, *a):
gctr = 4 # FIXME specific for d=4
g = self.LorentzIndex.metric
if not a:
return gctr
n = len(a)
if n%2 == 1:
#return TensMul.from_data(S.Zero, [], [], [])
return S.Zero
if n == 2:
ind0 = a[0].args[-1][0]
ind1 = a[1].args[-1][0]
return gctr*g(ind0, ind1)
if n == 4:
ind0 = a[0].args[-1][0]
ind1 = a[1].args[-1][0]
ind2 = a[2].args[-1][0]
ind3 = a[3].args[-1][0]
return gctr*(g(ind0, ind1)*g(ind2, ind3) - \
g(ind0, ind2)*g(ind1, ind3) + g(ind0, ind3)*g(ind1, ind2))
@staticmethod
def _kahane_simplify(coeff, tids):
r"""
This function cancels contracted elements in a product of four
dimensional gamma matrices, resulting in an expression equal to the given
one, without the contracted gamma matrices.
Parameters
==========
`coeff` the coefficient of the tensor expression.
`tids` TIDS object representing the gamma matrix expression to simplify.
Notes
=====
If spinor indices are given, the matrices must be given in
the order given in the product.
Algorithm
=========
The idea behind the algorithm is to use some well-known identities,
i.e., for contractions enclosing an even number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N}} \gamma_\mu = 2 (\gamma_{a_{2N}} \gamma_{a_1} \cdots \gamma_{a_{2N-1}} + \gamma_{a_{2N-1}} \cdots \gamma_{a_1} \gamma_{a_{2N}} )`
for an odd number of `\gamma` matrices
`\gamma^\mu \gamma_{a_1} \cdots \gamma_{a_{2N+1}} \gamma_\mu = -2 \gamma_{a_{2N+1}} \gamma_{a_{2N}} \cdots \gamma_{a_{1}}`
Instead of repeatedly applying these identities to cancel out all contracted indices,
it is possible to recognize the links that would result from such an operation,
the problem is thus reduced to a simple rearrangement of free gamma matrices.
Examples
========
When using, always remember that the original expression coefficient
has to be handled separately
>>> from sympy.physics.hep.gamma_matrices import GammaMatrix as G, DiracSpinorIndex as DS
>>> from sympy.tensor.tensor import tensor_indices, tensorhead, TensMul, TensAdd
>>> i0, i1, i2 = tensor_indices('i0:3', G.LorentzIndex)
>>> s0,s1,s2,s3,s4,s5 = tensor_indices('s0:6', DS)
>>> ta = G(i0)*G(-i0)
>>> G._kahane_simplify(ta.coeff, ta._tids) - 4*DS.delta(DS.auto_left, -DS.auto_right)
0
>>> tb = G(i0)*G(i1)*G(-i0)
>>> G._kahane_simplify(tb.coeff, tb._tids)
-2*gamma(i1, auto_left, -auto_right)
>>> t = G(i0, s0, -s1)*G(-i0,s1,-s2)
>>> G._kahane_simplify(t.coeff, t._tids) - 4*DS.delta(s0, -s2)
0
>>> t = G(i0, s0, -s1)*G(-i0,s1,-s0)
>>> G._kahane_simplify(t.coeff, t._tids)
16
If there are no contractions, the same expression is returned
>>> tc = 3*G(i0)*G(i1)
>>> G._kahane_simplify(tc.coeff, tc._tids)
3*gamma(i0, auto_left, -S_0)*gamma(i1, S_0, -auto_right)
References
==========
[1] Algorithm for Reducing Contracted Products of gamma Matrices, Joseph Kahane, Journal of Mathematical Physics, Vol. 9, No. 10, October 1968.
"""
for c in tids.components:
if not(isinstance(tids.components[0], GammaMatrixHead)):
raise ValueError('use only gamma matrices')
n = len(tids.components)
for p0, p1, c0, c1 in tids.dum:
if p0 == 0:
continue
dc = abs(c0 - c1)
if dc not in (1, n - 1):
raise ValueError('wrong gamma matrix ordering')
free = [_ for _ in tids.free if _[1] == 0]
spinor_free = [_ for _ in tids.free if _[1] != 0]
if len(spinor_free) == 2:
spinor_free.sort(key=lambda x: x[2])
assert spinor_free[0][1] == 1 and spinor_free[-1][1] == 2
assert spinor_free[0][2] == 0
elif spinor_free:
raise ValueError('spinor indices do not match')
dum = sorted([_ for _ in tids.dum if _[0] == 0 and _[1] == 0])
if len(dum) == 0: # or GammaMatrixHead:
# no contractions in `expression`, just return it.
return TensMul.from_TIDS(coeff, tids)
# find the `first_dum_pos`, i.e. the position of the first contracted
# gamma matrix, Kahane's algorithm as described in his paper requires the
# gamma matrix expression to start with a contracted gamma matrix, this is
# a workaround which ignores possible initial free indices, and re-adds
# them later.
dum_zip = list(zip(*dum))[2:]
first_dum_pos = min(min(dum_zip[0]), min(dum_zip[1]))
total_number = len(free) + len(dum)*2
number_of_contractions = len(dum)
free_pos = [None]*total_number
for i in free:
free_pos[i[2]] = i[0]
# `index_is_free` is a list of booleans, to identify index position
# and whether that index is free or dummy.
index_is_free = [False]*total_number
for i, indx in enumerate(free):
if indx[1] != 0:
raise ValueError("indx[1] should be equal to 0")
index_is_free[indx[2]] = True
# `links` is a dictionary containing the graph described in Kahane's paper,
# to every key correspond one or two values, representing the linked indices.
# All values in `links` are integers, negative numbers are used in the case
# where it is necessary to insert gamma matrices between free indices, in
# order to make Kahane's algorithm work (see paper).
links = dict()
for i in range(first_dum_pos, total_number):
links[i] = []
# `cum_sign` is a step variable to mark the sign of every index, see paper.
cum_sign = -1
# `cum_sign_list` keeps storage for all `cum_sign` (every index).
cum_sign_list = [None]*total_number
block_free_count = 0
# multiply `resulting_coeff` by the coefficient parameter, the rest
# of the algorithm ignores a scalar coefficient.
resulting_coeff = S.One * coeff
# initialize a lisf of lists of indices. The outer list will contain all
# additive tensor expressions, while the inner list will contain the
# free indices (rearranged according to the algorithm).
resulting_indices = [[]]
# start to count the `connected_components`, which together with the number
# of contractions, determines a -1 or +1 factor to be multiplied.
connected_components = 1
# First loop: here we fill `cum_sign_list`, and draw the links
# among consecutive indices (they are stored in `links`). Links among
# non-consecutive indices will be drawn later.
for i, is_free in enumerate(index_is_free):
# if `expression` starts with free indices, they are ignored here;
# they are later added as they are to the beginning of all
# `resulting_indices` list of lists of indices.
if i < first_dum_pos:
continue
if is_free:
block_free_count += 1
# if previous index was free as well, draw an arch in `links`.
if block_free_count > 1:
links[i - 1].append(i)
links[i].append(i - 1)
else:
# Change the sign of the index (`cum_sign`) if the number of free
# indices preceding it is even.
cum_sign *= 1 if (block_free_count % 2) else -1
if block_free_count == 0 and i != first_dum_pos:
# check if there are two consecutive dummy indices:
# in this case create virtual indices with negative position,
# these "virtual" indices represent the insertion of two
# gamma^0 matrices to separate consecutive dummy indices, as
# Kahane's algorithm requires dummy indices to be separated by
# free indices. The product of two gamma^0 matrices is unity,
# so the new expression being examined is the same as the
# original one.
if cum_sign == -1:
links[-1-i] = [-1-i+1]
links[-1-i+1] = [-1-i]
if (i - cum_sign) in links:
if i != first_dum_pos:
links[i].append(i - cum_sign)
if block_free_count != 0:
if i - cum_sign < len(index_is_free):
if index_is_free[i - cum_sign]:
links[i - cum_sign].append(i)
block_free_count = 0
cum_sign_list[i] = cum_sign
# The previous loop has only created links between consecutive free indices,
# it is necessary to properly create links among dummy (contracted) indices,
# according to the rules described in Kahane's paper. There is only one exception
# to Kahane's rules: the negative indices, which handle the case of some
# consecutive free indices (Kahane's paper just describes dummy indices
# separated by free indices, hinting that free indices can be added without
# altering the expression result).
for i in dum:
if i[0] != 0:
raise ValueError("i[0] should be 0")
if i[1] != 0:
raise ValueError("i[1] should be 0")
# get the positions of the two contracted indices:
pos1 = i[2]
pos2 = i[3]
# create Kahane's upper links, i.e. the upper arcs between dummy
# (i.e. contracted) indices:
links[pos1].append(pos2)
links[pos2].append(pos1)
# create Kahane's lower links, this corresponds to the arcs below
# the line described in the paper:
# first we move `pos1` and `pos2` according to the sign of the indices:
linkpos1 = pos1 + cum_sign_list[pos1]
linkpos2 = pos2 + cum_sign_list[pos2]
# otherwise, perform some checks before creating the lower arcs:
# make sure we are not exceeding the total number of indices:
if linkpos1 >= total_number:
continue
if linkpos2 >= total_number:
continue
# make sure we are not below the first dummy index in `expression`:
if linkpos1 < first_dum_pos:
continue
if linkpos2 < first_dum_pos:
continue
# check if the previous loop created "virtual" indices between dummy
# indices, in such a case relink `linkpos1` and `linkpos2`:
if (-1-linkpos1) in links:
linkpos1 = -1-linkpos1
if (-1-linkpos2) in links:
linkpos2 = -1-linkpos2
# move only if not next to free index:
if linkpos1 >= 0 and not index_is_free[linkpos1]:
linkpos1 = pos1
if linkpos2 >=0 and not index_is_free[linkpos2]:
linkpos2 = pos2
# create the lower arcs:
if linkpos2 not in links[linkpos1]:
links[linkpos1].append(linkpos2)
if linkpos1 not in links[linkpos2]:
links[linkpos2].append(linkpos1)
# This loop starts from the `first_dum_pos` index (first dummy index)
# walks through the graph deleting the visited indices from `links`,
# it adds a gamma matrix for every free index in encounters, while it
# completely ignores dummy indices and virtual indices.
pointer = first_dum_pos
previous_pointer = 0
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
else:
break
if pointer == previous_pointer:
break
if pointer >=0 and free_pos[pointer] is not None:
for ri in resulting_indices:
ri.append(free_pos[pointer])
# The following loop removes the remaining connected components in `links`.
# If there are free indices inside a connected component, it gives a
# contribution to the resulting expression given by the factor
# `gamma_a gamma_b ... gamma_z + gamma_z ... gamma_b gamma_a`, in Kahanes's
# paper represented as {gamma_a, gamma_b, ... , gamma_z},
# virtual indices are ignored. The variable `connected_components` is
# increased by one for every connected component this loop encounters.
# If the connected component has virtual and dummy indices only
# (no free indices), it contributes to `resulting_indices` by a factor of two.
# The multiplication by two is a result of the
# factor {gamma^0, gamma^0} = 2 I, as it appears in Kahane's paper.
# Note: curly brackets are meant as in the paper, as a generalized
# multi-element anticommutator!
while links:
connected_components += 1
pointer = min(links.keys())
previous_pointer = pointer
# the inner loop erases the visited indices from `links`, and it adds
# all free indices to `prepend_indices` list, virtual indices are
# ignored.
prepend_indices = []
while True:
if pointer in links:
next_ones = links.pop(pointer)
else:
break
if previous_pointer in next_ones:
if len(next_ones) > 1:
next_ones.remove(previous_pointer)
previous_pointer = pointer
if next_ones:
pointer = next_ones[0]
if pointer >= first_dum_pos and free_pos[pointer] is not None:
prepend_indices.insert(0, free_pos[pointer])
# if `prepend_indices` is void, it means there are no free indices
# in the loop (and it can be shown that there must be a virtual index),
# loops of virtual indices only contribute by a factor of two:
if len(prepend_indices) == 0:
resulting_coeff *= 2
# otherwise, add the free indices in `prepend_indices` to
# the `resulting_indices`:
else:
expr1 = prepend_indices
expr2 = list(reversed(prepend_indices))
resulting_indices = [expri + ri for ri in resulting_indices for expri in (expr1, expr2)]
# sign correction, as described in Kahane's paper:
resulting_coeff *= -1 if (number_of_contractions - connected_components + 1) % 2 else 1
# power of two factor, as described in Kahane's paper:
resulting_coeff *= 2**(number_of_contractions)
# If `first_dum_pos` is not zero, it means that there are trailing free gamma
# matrices in front of `expression`, so multiply by them:
for i in range(0, first_dum_pos):
[ri.insert(0, free_pos[i]) for ri in resulting_indices]
resulting_expr = S.Zero
for i in resulting_indices:
temp_expr = S.One
for j in i:
temp_expr *= GammaMatrix(j)
resulting_expr += temp_expr
t = resulting_coeff * resulting_expr
t1 = None
if isinstance(t, TensAdd):
t1 = t.args[0]
elif isinstance(t, TensMul):
t1 = t
if t1:
spinor_free1 = [_ for _ in t1._tids.free if _[1] != 0]
if spinor_free1:
if spinor_free:
t = t.substitute_indices((DiracSpinorIndex.auto_left, spinor_free[0][0]), (-DiracSpinorIndex.auto_right, spinor_free[-1][0]))
else:
# FIXME trace
t = t*DiracSpinorIndex.delta(DiracSpinorIndex.auto_right, -DiracSpinorIndex.auto_left)
t = GammaMatrix.simplify_lines(t)
else:
if spinor_free:
t = t*DiracSpinorIndex.delta(spinor_free[0][0], spinor_free[-1][0])
else:
t = t*4
else:
if spinor_free:
t = t*DiracSpinorIndex.delta(spinor_free[0][0], spinor_free[-1][0])
else:
t = t*4
return t
GammaMatrix = GammaMatrixHead()
| bsd-3-clause |
flavour/cedarbluff | controllers/doc.py | 3 | 6605 | # -*- coding: utf-8 -*-
"""
Document Library - Controllers
"""
module = request.controller
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
# =============================================================================
def index():
"Module's Home Page"
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def document():
""" RESTful CRUD controller """
resourcename = request.function
# Load Models
s3mgr.load("doc_document")
rheader = lambda r: document_rheader(r)
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def document_rheader(r):
if r.representation == "html":
doc_document = r.record
if doc_document:
#rheader_tabs = s3_rheader_tabs(r, document_tabs(r))
table = db.doc_document
rheader = DIV(B("%s: " % T("Name")), doc_document.name,
TABLE(TR(
TH("%s: " % T("File")), table.file.represent( doc_document.file ),
TH("%s: " % T("URL")), table.url.represent( doc_document.url ),
),
TR(
TH("%s: " % T("Organization")), table.organisation_id.represent( doc_document.organisation_id ),
TH("%s: " % T("Person")), table.person_id.represent( doc_document.organisation_id ),
),
),
#rheader_tabs
)
return rheader
return None
# -----------------------------------------------------------------------------
def document_tabs(r):
"""
Display the number of Components in the tabs
- currently unused as we don't have these tabs off documents
"""
tab_opts = [{"tablename": "assess_rat",
"resource": "rat",
"one_title": T("1 Assessment"),
"num_title": " Assessments",
},
{"tablename": "irs_ireport",
"resource": "ireport",
"one_title": "1 Incident Report",
"num_title": " Incident Reports",
},
{"tablename": "cr_shelter",
"resource": "shelter",
"one_title": "1 Shelter",
"num_title": " Shelters",
},
#{"tablename": "flood_freport",
# "resource": "freport",
# "one_title": "1 Flood Report",
# "num_title": " Flood Reports",
#},
{"tablename": "req_req",
"resource": "req",
"one_title": "1 Request",
"num_title": " Requests",
},
]
tabs = [(T("Details"), None)]
for tab_opt in tab_opts:
tablename = tab_opt["tablename"]
if tablename in db and document_id in db[tablename]:
tab_count = db( (db[tablename].deleted == False) & (db[tablename].document_id == r.id) ).count()
if tab_count == 0:
label = s3base.S3CRUD.crud_string(tablename, "title_create")
elif tab_count == 1:
label = tab_opt["one_title"]
else:
label = T(str(tab_count) + tab_opt["num_title"] )
tabs.append( (label, tab_opt["resource"] ) )
return tabs
# =============================================================================
def image():
""" RESTful CRUD controller """
resourcename = request.function
# Load Models
s3mgr.load("doc_image")
output = s3_rest_controller()
return output
# =============================================================================
def bulk_upload():
"""
Custom view to allow bulk uploading of Photos
@ToDo: Allow creation of a GIS Feature Layer to view on the map
@ToDo: Allow uploading of associated GPX track for timestamp correlation.
See r1595 for the previous draft of this work
"""
response.s3.stylesheets.append( "S3/fileuploader.css" )
return dict()
def upload_bulk():
"""
Receive the Uploaded data from bulk_upload()
https://github.com/valums/file-uploader/blob/master/server/readme.txt
@ToDo: Read EXIF headers to geolocate the Photos
"""
tablename = "doc_image"
table = s3db[tablename]
import cgi
source = request.post_vars.get("qqfile", None)
if isinstance(source, cgi.FieldStorage) and source.filename:
# For IE6-8, Opera, older versions of other browsers you get the file as you normally do with regular form-base uploads.
name = source.filename
image = source.file
else:
# For browsers which upload file with progress bar, you will need to get the raw post data and write it to the file.
if "name" in request.vars:
name = request.vars.name
else:
HTTP(400, "Invalid Request: Need a Name!")
image = request.body.read()
# Convert to StringIO for onvalidation/import
import cStringIO
image = cStringIO.StringIO(image)
source = Storage()
source.filename = name
source.file = image
form = SQLFORM(table)
vars = Storage()
vars.name = name
vars.image = source
vars._formname = "%s_create" % tablename
# onvalidation callback
onvalidation = s3mgr.model.get_config(tablename, "create_onvalidation",
s3mgr.model.get_config(tablename, "onvalidation"))
if form.accepts(vars, onvalidation=onvalidation):
msg = Storage(success = True)
# onaccept callback
onaccept = s3mgr.model.get_config(tablename, "create_onaccept",
s3mgr.model.get_config(tablename, "onaccept"))
callback(onaccept, form, tablename=tablename)
else:
error_msg = ""
for error in form.errors:
error_msg = "%s\n%s:%s" % (error_msg, error, form.errors[error])
msg = Storage(error = error_msg)
response.headers["Content-Type"] = "text/html" # This is what the file-uploader widget expects
return json.dumps(msg)
# END =========================================================================
| mit |
dutchbot/FoscamBackupper | test/unit/test_file_wrapper.py | 1 | 2868 |
import unittest
import unittest.mock as umock
from unittest.mock import call
import progressbar
from foscambackup.download_file_tracker import DownloadFileTracker
from test.mocks import mock_file_helper
WRITE = mock_file_helper.WRITE
WRITE.return_value = WRITE
class TestFileWrapper(unittest.TestCase):
def setUp(self):
self.test_file = "12345.avi"
WRITE.name = self.test_file
self.byte_size = 266613824
with umock.patch("builtins.open", WRITE):
self.wrapper = DownloadFileTracker(self.test_file, self.byte_size)
def tearDown(self):
WRITE.reset_mock()
def test_file_wrap_init(self):
""" Verify class vars are initialized """
self.assertIsInstance(self.wrapper.local_file, umock.MagicMock)
self.assertEqual(self.wrapper.total_size, self.byte_size)
self.assertEqual(self.wrapper.downloaded_bytes, 0)
self.assertIsInstance(self.wrapper.progressbar, progressbar.ProgressBar)
def test_write_to_file(self):
""" Assert write and update_progress are called correctly """
def update(byte_len):
pass
mock_update = umock.MagicMock(side_effect=update)
writing = '123456'
with umock.patch("foscambackup.download_file_tracker.DownloadFileTracker.update_progress", mock_update):
self.wrapper.write_to_file(writing)
self.assertListEqual(mock_update.call_args_list, [call(len(writing))])
self.assertListEqual(WRITE.call_args_list, [call('12345.avi', 'w+b')])
self.assertEqual(WRITE.buffer, writing)
def test_update_progress(self):
""" Assert that the proper values are computed """
byte_size = 8192
remainder = self.wrapper.total_size % byte_size
number_of_times = (self.wrapper.total_size - remainder) / byte_size
number_of_times += 1 if remainder > 0 else 0
for i in range(0, round(number_of_times)):
if i == number_of_times -1:
self.wrapper.update_progress(remainder)
else:
self.wrapper.update_progress(byte_size)
self.assertEqual(self.wrapper.downloaded_bytes, self.wrapper.total_size)
self.assertEqual(self.wrapper.progressbar.data()['percentage'], 100.00)
def test_delete_file(self):
""" Test for closing and removing file """
WRITE.remove = umock.MagicMock()
WRITE.close = umock.MagicMock()
with umock.patch("os.remove", WRITE.remove):
self.wrapper.delete_file()
self.assertEqual(WRITE.remove.call_args, call(self.test_file))
def test_close_file(self):
""" Test if close function is called """
WRITE.close = umock.MagicMock()
self.wrapper.close_file()
self.assertEqual(self.wrapper.local_file.close.call_args_list,[call()])
| gpl-3.0 |
tylertian/Openstack | openstack F/horizon/horizon/dashboards/syspanel/networks/ports/tables.py | 2 | 2941 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import api
from horizon import exceptions
from horizon import tables
from horizon.dashboards.nova.networks.ports.tables import (get_fixed_ips,
get_attached)
LOG = logging.getLogger(__name__)
class DeletePort(tables.DeleteAction):
data_type_singular = _("Port")
data_type_plural = _("Ports")
def delete(self, request, obj_id):
try:
api.quantum.port_delete(request, obj_id)
except:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:syspanel:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreatePort(tables.LinkAction):
name = "create"
verbose_name = _("Create Port")
url = "horizon:syspanel:networks:addport"
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
class UpdatePort(tables.LinkAction):
name = "update"
verbose_name = _("Edit Port")
url = "horizon:syspanel:networks:editport"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, port):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, port.id))
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:syspanel:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
device_id = tables.Column(get_attached, verbose_name=_("Device Attached"))
status = tables.Column("status", verbose_name=_("Status"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
class Meta:
name = "ports"
verbose_name = _("Ports")
table_actions = (CreatePort, DeletePort)
row_actions = (UpdatePort, DeletePort,)
| apache-2.0 |
leansoft/edx-platform | common/lib/xmodule/xmodule/modulestore/tests/utils.py | 39 | 5990 | """
Helper classes and methods for running modulestore tests without Django.
"""
from importlib import import_module
from opaque_keys.edx.keys import UsageKey
from unittest import TestCase
from xblock.fields import XBlockMixin
from xmodule.x_module import XModuleMixin
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import ModuleStoreDraftAndPublished
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.tests import DATA_DIR
def load_function(path):
"""
Load a function by name.
path is a string of the form "path.to.module.function"
returns the imported python object `function` from `path.to.module`
"""
module_path, _, name = path.rpartition('.')
return getattr(import_module(module_path), name)
# pylint: disable=unused-argument
def create_modulestore_instance(
engine,
contentstore,
doc_store_config,
options,
i18n_service=None,
fs_service=None,
user_service=None,
signal_handler=None,
):
"""
This will return a new instance of a modulestore given an engine and options
"""
class_ = load_function(engine)
if issubclass(class_, ModuleStoreDraftAndPublished):
options['branch_setting_func'] = lambda: ModuleStoreEnum.Branch.draft_preferred
return class_(
doc_store_config=doc_store_config,
contentstore=contentstore,
signal_handler=signal_handler,
**options
)
def mock_tab_from_json(tab_dict):
"""
Mocks out the CourseTab.from_json to just return the tab_dict itself so that we don't have to deal
with plugin errors.
"""
return tab_dict
class LocationMixin(XBlockMixin):
"""
Adds a `location` property to an :class:`XBlock` so it is more compatible
with old-style :class:`XModule` API. This is a simplified version of
:class:`XModuleMixin`.
"""
@property
def location(self):
""" Get the UsageKey of this block. """
return self.scope_ids.usage_id
@location.setter
def location(self, value):
""" Set the UsageKey of this block. """
assert isinstance(value, UsageKey)
self.scope_ids = self.scope_ids._replace( # pylint: disable=attribute-defined-outside-init,protected-access
def_id=value,
usage_id=value,
)
class MixedSplitTestCase(TestCase):
"""
Stripped-down version of ModuleStoreTestCase that can be used without Django
(i.e. for testing in common/lib/ ). Sets up MixedModuleStore and Split.
"""
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': u'{}: {}, {}'.format(t_n, repr(d), repr(ctx))
modulestore_options = {
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'test_mongo_libs',
'collection': 'modulestore',
'asset_collection': 'assetstore',
}
MIXED_OPTIONS = {
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
]
}
def setUp(self):
"""
Set up requirements for testing: a user ID and a modulestore
"""
super(MixedSplitTestCase, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.store = MixedModuleStore(
None,
create_modulestore_instance=create_modulestore_instance,
mappings={},
**self.MIXED_OPTIONS
)
self.addCleanup(self.store.close_all_connections)
self.addCleanup(self.store._drop_database) # pylint: disable=protected-access
def make_block(self, category, parent_block, **kwargs):
"""
Create a block of type `category` as a child of `parent_block`, in any
course or library. You can pass any field values as kwargs.
"""
extra = {"publish_item": False, "user_id": self.user_id}
extra.update(kwargs)
return ItemFactory.create(
category=category,
parent=parent_block,
parent_location=parent_block.location,
modulestore=self.store,
**extra
)
class ProceduralCourseTestMixin(object):
"""
Contains methods for testing courses generated procedurally
"""
def populate_course(self, branching=2, emit_signals=False):
"""
Add k chapters, k^2 sections, k^3 verticals, k^4 problems to self.course (where k = branching)
"""
user_id = self.user.id
self.populated_usage_keys = {} # pylint: disable=attribute-defined-outside-init
def descend(parent, stack): # pylint: disable=missing-docstring
if not stack:
return
xblock_type = stack[0]
for _ in range(branching):
child = ItemFactory.create(
category=xblock_type,
parent_location=parent.location,
user_id=user_id
)
self.populated_usage_keys.setdefault(xblock_type, []).append(
child.location
)
descend(child, stack[1:])
with self.store.bulk_operations(self.course.id, emit_signals=emit_signals):
descend(self.course, ['chapter', 'sequential', 'vertical', 'problem'])
| agpl-3.0 |
0k/OpenUpgrade | addons/l10n_th/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
xdatravelbug/N909D_Kernel_JB_4.1.2 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Servir-Mekong/ecodash | lib/httplib2/__init__.py | 48 | 71030 | from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.9.1"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = [
'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
try:
# Users can optionally provide a module that tells us where the CA_CERTS
# are located.
import ca_certs_locater
CA_CERTS = ca_certs_locater.get()
except ImportError:
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=True, proxy_user=None, proxy_pass=None):
"""
Args:
proxy_type: The type of proxy server. This must be set to one of
socks.PROXY_TYPE_XXX constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
proxy_host: The hostname or IP address of the proxy server.
proxy_port: The port that the proxy server is running on.
proxy_rdns: If True (default), DNS queries will not be performed
locally, and instead, handed to the proxy to resolve. This is useful
if the network does not allow resolution of non-local names. In
httplib2 0.9 and earlier, this defaulted to False.
proxy_user: The username used to authenticate with the proxy server.
proxy_pass: The password used to authenticate with the proxy server.
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = proxy_info_from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
host = proxy_host
port = proxy_port
else:
use_proxy = False
host = self.host
port = self.port
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
except (ImportError, AttributeError):
from google3.apphosting.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google3.apphosting.api.urlfetch import fetch
from google3.apphosting.api.urlfetch import InvalidURLError
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(url, payload=None, method="GET", headers={},
allow_truncated=False, follow_redirects=True,
deadline=None):
if deadline is None:
deadline = socket.getdefaulttimeout() or 5
return fetch(url, payload=payload, method=method, headers=headers,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects, deadline=deadline,
validate_certificate=validate_certificate)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs, and
disable_ssl_certificate_validation are all dropped on the ground.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict,
timeout=timeout)
self._fetch = _new_fixed_fetch(
not disable_ssl_certificate_validation)
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except (ImportError, AttributeError):
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
i = 0
seen_bad_status_line = False
while i < RETRIES:
i += 1
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, 'sock') and conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except httplib.BadStatusLine:
# If we get a BadStatusLine on the first try then that means
# the connection just went stale, so retry regardless of the
# number of RETRIES set.
if not seen_bad_status_line and i == 1:
i = 0
seen_bad_status_line = True
conn.close()
conn.connect()
continue
else:
conn.close()
raise
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(
location, method=redirect_method,
body=body, headers=headers,
redirections=redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri.encode('utf-8')
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(
info['-x-permanent-redirect-url'], method='GET',
headers=headers, redirections=redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| gpl-3.0 |
kybriainfotech/iSocioCRM | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/tools.py | 384 | 1576 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import urllib
def get_absolute_file_path(url):
url_unquoted = urllib.unquote(url)
return os.name == 'nt' and url_unquoted[1:] or url_unquoted
# This function reads the content of a file and return it to the caller
def read_data_from_file(filename):
fp = file( filename, "rb" )
data = fp.read()
fp.close()
return data
# This function writes the content to a file
def write_data_to_file(filename, data):
fp = file( filename, 'wb' )
fp.write( data )
fp.close()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mwillmott/techbikers | server/api/urls.py | 1 | 2189 | from django.conf.urls import url, include
from .views.rides import RidesList, RideDetails, RideRidersList, RideRiderDetails, RideRiderCharge, RideRiderFundraiser
from .views.riders import RidersList, RiderProfile, RiderRides
from .views.chapters import ChaptersList, ChapterDetails, ChapterMembersList
from .views.sponsors import SponsorsList, SponsorDetails
from .views.fundraisers import FundraisersList
from .views.auth import AuthenticatedUserDetails, UserDetails
auth_urls = [
url(r'^verify', AuthenticatedUserDetails.as_view(), name='auth-verify'),
url(r'^account', UserDetails.as_view(), name='auth-account')
]
ride_urls = [
url(r'^(?P<id>\d+)/riders/(?P<rider_id>\d+)/fundraiser', RideRiderFundraiser.as_view(), name='ride-rider-fundraiser'),
url(r'^(?P<id>\d+)/riders/(?P<rider_id>\d+)/charge', RideRiderCharge.as_view(), name='ride-rider-charge'),
url(r'^(?P<id>\d+)/riders/(?P<rider_id>\d+)', RideRiderDetails.as_view(), name='ride-rider-details'),
url(r'^(?P<id>\d+)/riders', RideRidersList.as_view(), name='ride-riders'),
url(r'^(?P<id>\d+)', RideDetails.as_view(), name='ride-details'),
url(r'^$', RidesList.as_view(), name='rides-list')
]
rider_urls = [
url(r'^(?P<id>\d+)/rides', RiderRides.as_view(), name='rider-rides'),
url(r'^(?P<id>\d+)', RiderProfile.as_view(), name='rider-profile'),
url(r'^$', RidersList.as_view(), name='riders-list')
]
chapter_urls = [
url(r'^(?P<id>\d+)/members', ChapterMembersList.as_view(), name='chapter-members'),
url(r'^(?P<id>\d+)', ChapterDetails.as_view(), name='chapter-details'),
url(r'^$', ChaptersList.as_view(), name='chapters-list')
]
sponsor_urls = [
url(r'^(?P<id>\d+)', SponsorDetails.as_view(), name='sponsor-details'),
url(r'^$', SponsorsList.as_view(), name='sponsor-list')
]
fundraiser_urls = [
url(r'^$', FundraisersList.as_view(), name='fundraiser-list')
]
urlpatterns = [
url(r'^auth/', include(auth_urls)),
url(r'^rides/', include(ride_urls)),
url(r'^riders/', include(rider_urls)),
url(r'^chapters/', include(chapter_urls)),
url(r'^sponsors/', include(sponsor_urls)),
url(r'^fundraisers/', include(fundraiser_urls))
]
| mit |
quarckster/cfme_tests | cfme/tests/automate/test_common_methods.py | 1 | 4431 | # -*- coding: utf-8 -*-
"""This module contains tests that test the universally applicable canned methods in Automate."""
import fauxfactory
import pytest
from datetime import timedelta, date
from cfme import test_requirements
from cfme.infrastructure.virtual_machines import InfraVmSummaryView
from cfme.infrastructure.provider import InfraProvider
from cfme.automate.buttons import ButtonGroup, Button
from cfme.common.vm import VM
from cfme.utils.blockers import BZ
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
from widgetastic_manageiq import Dropdown
pytestmark = [
test_requirements.automate,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.provider([InfraProvider], required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
], scope="module")
]
@pytest.fixture(scope="function")
def vm_name():
vm_name = 'test_ae_methods_{}'.format(fauxfactory.gen_alphanumeric())
return vm_name
@pytest.fixture(scope="function")
def testing_vm(request, vm_name, setup_provider, provider, provisioning):
vm_obj = VM.factory(vm_name, provider, provisioning["template"])
def _finalize():
try:
vm_obj.delete_from_provider()
except Exception:
logger.warn('Failed deleting VM from provider: %s', vm_name)
request.addfinalizer(_finalize)
vm_obj.create_on_provider(find_in_cfme=True, allow_skip="default")
return vm_obj
def generate_retirement_date(delta=None):
gen_date = date.today()
if delta:
gen_date += timedelta(days=delta)
return gen_date
@pytest.mark.tier(3)
def test_vm_retire_extend(appliance, request, testing_vm, soft_assert):
""" Tests extending a retirement using an AE method.
Prerequisities:
* A running VM on any provider.
Steps:
* It creates a button pointing to ``Request/vm_retire_extend`` instance. The button should
live in the VM and Instance button group.
* Then it sets a retirement date for the VM
* Then it waits until the retirement date is set
* Then it clicks the button that was created and it waits for the retirement date to extend.
Metadata:
test_flag: retire, provision
"""
num_days = 5
soft_assert(testing_vm.retirement_date == 'Never', "The retirement date is not 'Never'!")
retirement_date = generate_retirement_date(delta=num_days)
testing_vm.set_retirement_date(retirement_date)
wait_for(lambda: testing_vm.retirement_date != 'Never', message="retirement date set")
set_date = testing_vm.retirement_date
vm_retire_date_fmt = VM.RETIRE_DATE_FMT.pick(appliance.version)
if not BZ(1419150, forced_streams='5.6').blocks:
soft_assert(set_date == retirement_date.strftime(vm_retire_date_fmt),
"The retirement date '{}' did not match expected date '{}'"
.format(set_date, retirement_date.strftime(vm_retire_date_fmt)))
# Create the vm_retire_extend button and click on it
grp_name = "grp_{}".format(fauxfactory.gen_alphanumeric())
grp = ButtonGroup(
text=grp_name,
hover=grp_name,
type=ButtonGroup.VM_INSTANCE
)
request.addfinalizer(lambda: grp.delete_if_exists())
grp.create()
btn_name = "btn_{}".format(fauxfactory.gen_alphanumeric())
button = Button(
group=grp,
text=btn_name,
hover=btn_name,
system="Request",
request="vm_retire_extend"
)
request.addfinalizer(lambda: button.delete_if_exists())
button.create()
navigate_to(testing_vm, 'Details')
class TestDropdownView(InfraVmSummaryView):
group = Dropdown(grp.text)
view = appliance.browser.create_view(TestDropdownView)
view.group.item_select(button.text)
# CFME automate vm_retire_extend method defaults to extending the date by 14 days
extend_duration_days = 14
extended_retirement_date = retirement_date + timedelta(days=extend_duration_days)
# Check that the WebUI updates with the correct date
wait_for(
lambda: testing_vm.retirement_date >= extended_retirement_date.strftime(vm_retire_date_fmt),
num_sec=60,
message="Check for extension of the VM retirement date by {} days".format(
extend_duration_days)
)
| gpl-2.0 |
ironbox360/django | django/contrib/sessions/base_session.py | 348 | 1623 | """
This module allows importing AbstractBaseSession even
when django.contrib.sessions is not in INSTALLED_APPS.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class BaseSessionManager(models.Manager):
def encode(self, session_dict):
"""
Return the given session dictionary serialized and encoded as a string.
"""
session_store_class = self.model.get_session_store_class()
return session_store_class().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
@python_2_unicode_compatible
class AbstractBaseSession(models.Model):
session_key = models.CharField(_('session key'), max_length=40, primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expire date'), db_index=True)
objects = BaseSessionManager()
class Meta:
abstract = True
verbose_name = _('session')
verbose_name_plural = _('sessions')
def __str__(self):
return self.session_key
@classmethod
def get_session_store_class(cls):
raise NotImplementedError
def get_decoded(self):
session_store_class = self.get_session_store_class()
return session_store_class().decode(self.session_data)
| bsd-3-clause |
ytjiang/django | tests/model_formsets/test_uuid.py | 45 | 1295 | from django.forms.models import inlineformset_factory
from django.test import TestCase
from .models import UUIDPKChild, UUIDPKParent
class InlineFormsetTests(TestCase):
def test_inlineformset_factory_nulls_default_pks(self):
"""
#24377 - If we're adding a new object, a parent's auto-generated pk
from the model field default should be ignored as it's regenerated on
the save request.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet()
self.assertIsNone(formset.forms[0].fields['parent'].initial)
def test_inlineformset_factory_ignores_default_pks_on_submit(self):
"""
#24377 - Inlines with a model field default should ignore that default
value to avoid triggering validation on empty forms.
"""
FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields='__all__')
formset = FormSet({
'uuidpkchild_set-TOTAL_FORMS': 3,
'uuidpkchild_set-INITIAL_FORMS': 0,
'uuidpkchild_set-MAX_NUM_FORMS': '',
'uuidpkchild_set-0-name': 'Foo',
'uuidpkchild_set-1-name': '',
'uuidpkchild_set-2-name': '',
})
self.assertTrue(formset.is_valid())
| bsd-3-clause |
deandunbar/bitwave | hackathon_version/venv/lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py | 56 | 13461 | from __future__ import unicode_literals
import os
from collections import OrderedDict
from optparse import make_option
from django.core.files.storage import FileSystemStorage
from django.core.management.base import CommandError, NoArgsCommand
from django.utils.encoding import smart_text
from django.utils.six.moves import input
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
class Command(NoArgsCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
option_list = NoArgsCommand.option_list + (
make_option('--noinput',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind."),
make_option('--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files."),
make_option('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more."),
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem."),
make_option('-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file."),
make_option('-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying."),
make_option('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'."),
)
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(NoArgsCommand, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = int(options.get('verbosity', 1))
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle_noargs() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle_noargs(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
else:
destination_path = None
message.append('.\n\n')
if self.clear:
message.append('This will DELETE EXISTING FILES!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if self.interactive and input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
self.stdout.write(summary)
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
if prefixed_path not in self.copied_files:
self.copied_files.append(prefixed_path)
| mit |
damonkohler/sl4a | python/src/Lib/plat-irix5/GET.py | 132 | 1025 | # Symbols from <gl/get.h>
from warnings import warnpy3k
warnpy3k("the GET module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
BCKBUFFER = 0x1
FRNTBUFFER = 0x2
DRAWZBUFFER = 0x4
DMRGB = 0
DMSINGLE = 1
DMDOUBLE = 2
DMRGBDOUBLE = 5
HZ30 = 0
HZ60 = 1
NTSC = 2
HDTV = 3
VGA = 4
IRIS3K = 5
PR60 = 6
PAL = 9
HZ30_SG = 11
A343 = 14
STR_RECT = 15
VOF0 = 16
VOF1 = 17
VOF2 = 18
VOF3 = 19
SGI0 = 20
SGI1 = 21
SGI2 = 22
HZ72 = 23
GL_VIDEO_REG = 0x00800000
GLV_GENLOCK = 0x00000001
GLV_UNBLANK = 0x00000002
GLV_SRED = 0x00000004
GLV_SGREEN = 0x00000008
GLV_SBLUE = 0x00000010
GLV_SALPHA = 0x00000020
GLV_TTLGENLOCK = 0x00000080
GLV_TTLSYNC = GLV_TTLGENLOCK
GLV_GREENGENLOCK = 0x0000100
LEFTPLANE = 0x0001
RIGHTPLANE = 0x0002
BOTTOMPLANE = 0x0004
TOPPLANE = 0x0008
NEARPLANE = 0x0010
FARPLANE = 0x0020
## GETDEF = __GL_GET_H__
NOBUFFER = 0x0
BOTHBUFFERS = 0x3
DMINTENSITYSINGLE = 3
DMINTENSITYDOUBLE = 4
MONSPECIAL = 0x20
HZ50 = 3
MONA = 5
MONB = 6
MONC = 7
MOND = 8
MON_ALL = 12
MON_GEN_ALL = 13
CMAPMULTI = 0
CMAPONE = 1
| apache-2.0 |
mahak/keystone | keystone/common/policies/policy.py | 2 | 3808 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from keystone.common.policies import base
deprecated_get_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'get_policy',
check_str=base.RULE_ADMIN_REQUIRED,
)
deprecated_list_policies = policy.DeprecatedRule(
name=base.IDENTITY % 'list_policies',
check_str=base.RULE_ADMIN_REQUIRED,
)
deprecated_update_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'update_policy',
check_str=base.RULE_ADMIN_REQUIRED,
)
deprecated_create_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy',
check_str=base.RULE_ADMIN_REQUIRED,
)
deprecated_delete_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy',
check_str=base.RULE_ADMIN_REQUIRED,
)
DEPRECATED_REASON = (
"The policy API is now aware of system scope and default roles."
)
policy_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_policy',
check_str=base.SYSTEM_READER,
# This API isn't really exposed to usable, it's actually deprecated.
# More-or-less adding scope_types to be consistent with other policies.
scope_types=['system'],
description='Show policy details.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'GET'}],
deprecated_rule=deprecated_get_policy,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_policies',
check_str=base.SYSTEM_READER,
scope_types=['system'],
description='List policies.',
operations=[{'path': '/v3/policies',
'method': 'GET'}],
deprecated_rule=deprecated_list_policies,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_policy',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Create policy.',
operations=[{'path': '/v3/policies',
'method': 'POST'}],
deprecated_rule=deprecated_create_policy,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_policy',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Update policy.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'PATCH'}],
deprecated_rule=deprecated_update_policy,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy',
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description='Delete policy.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_policy,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.TRAIN)
]
def list_rules():
return policy_policies
| apache-2.0 |
u2009cf/spark-radar | examples/src/main/python/mllib/streaming_linear_regression_example.py | 77 | 2117 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Streaming Linear Regression Example.
"""
from __future__ import print_function
# $example on$
import sys
# $example off$
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
# $example on$
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.regression import StreamingLinearRegressionWithSGD
# $example off$
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: streaming_linear_regression_example.py <trainingDir> <testDir>",
file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonLogisticRegressionWithLBFGSExample")
ssc = StreamingContext(sc, 1)
# $example on$
def parse(lp):
label = float(lp[lp.find('(') + 1: lp.find(',')])
vec = Vectors.dense(lp[lp.find('[') + 1: lp.find(']')].split(','))
return LabeledPoint(label, vec)
trainingData = ssc.textFileStream(sys.argv[1]).map(parse).cache()
testData = ssc.textFileStream(sys.argv[2]).map(parse)
numFeatures = 3
model = StreamingLinearRegressionWithSGD()
model.setInitialWeights([0.0, 0.0, 0.0])
model.trainOn(trainingData)
print(model.predictOnValues(testData.map(lambda lp: (lp.label, lp.features))))
ssc.start()
ssc.awaitTermination()
# $example off$
| apache-2.0 |
Wattpad/luigi | luigi/mrrunner.py | 65 | 2641 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The hadoop runner.
This module contains the main() method which will be used to run the
mapper and reducer on the Hadoop nodes.
"""
from __future__ import print_function
try:
import cPickle as pickle
except ImportError:
import pickle
import logging
import os
import sys
import tarfile
import traceback
class Runner(object):
"""
Run the mapper or reducer on hadoop nodes.
"""
def __init__(self, job=None):
self.extract_packages_archive()
self.job = job or pickle.load(open("job-instance.pickle", "rb"))
self.job._setup_remote()
def run(self, kind, stdin=sys.stdin, stdout=sys.stdout):
if kind == "map":
self.job.run_mapper(stdin, stdout)
elif kind == "combiner":
self.job.run_combiner(stdin, stdout)
elif kind == "reduce":
self.job.run_reducer(stdin, stdout)
else:
raise Exception('weird command: %s' % kind)
def extract_packages_archive(self):
if not os.path.exists("packages.tar"):
return
tar = tarfile.open("packages.tar")
for tarinfo in tar:
tar.extract(tarinfo)
tar.close()
if '' not in sys.path:
sys.path.insert(0, '')
def print_exception(exc):
tb = traceback.format_exc()
print('luigi-exc-hex=%s' % tb.encode('hex'), file=sys.stderr)
def main(args=None, stdin=sys.stdin, stdout=sys.stdout, print_exception=print_exception):
"""
Run either the mapper or the reducer from the class instance in the file "job-instance.pickle".
Arguments:
kind -- is either map or reduce
"""
try:
# Set up logging.
logging.basicConfig(level=logging.WARN)
kind = args is not None and args[1] or sys.argv[1]
Runner().run(kind, stdin=stdin, stdout=stdout)
except Exception as exc:
# Dump encoded data that we will try to fetch using mechanize
print_exception(exc)
raise
if __name__ == '__main__':
main()
| apache-2.0 |
chris-chris/tensorflow | tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py | 8 | 27110 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.rnn import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
pass
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self, query_layer, memory, memory_sequence_length=None,
memory_layer=None, check_inner_dims_defined=True,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False),
memory=memory,
memory_sequence_length=memory_sequence_length,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
Returns:
score: Tensor of dtype matching `self.values` and shape
`[batch_size, max_time]` (`max_time` is memory's `max_time`).
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = self.keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the the keys' dimension (%s)?"
% (query, depth, self.keys, key_units, key_units))
dtype = query.dtype
with variable_scope.variable_scope(None, "luong_attention", [query]):
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_time, 1, max_time].
# we then squeee out the center singleton dimension.
score = math_ops.matmul(query, self.keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if self._scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype, initializer=1.)
score = g * score
return score
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bhadanau-style (additive) attention.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
name: Name to use when creating ops.
"""
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False),
memory=memory,
memory_sequence_length=memory_sequence_length,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
Returns:
score: Tensor of dtype matching `self.values` and shape
`[batch_size, max_time]` (`max_time` is memory's `max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
dtype = processed_query.dtype
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
keys = self._keys
v = variable_scope.get_variable(
"attention_v", [self._num_units], dtype=dtype)
if self._normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / self._num_units)))
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [self._num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
score = math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
score = math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query),
[2])
return score
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time",
"alignment_history"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell`.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignment_history`: (if enabled) a `TensorArray` containing alignment
matrices from all time steps. Call `stack()` to convert to a `Tensor`.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overriden properties as provided in `kwargs`.
"""
return super(AttentionWrapperState, self)._replace(**kwargs)
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
class AttentionWrapper(core_rnn_cell.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
probability_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
Args:
cell: An instance of `RNNCell`.
attention_mechanism: An instance of `AttentionMechanism`.
attention_layer_size: Python integer, the depth of the attention (output)
layer. If None (default), use the context as attention at each time
step. Otherwise, feed the context and cell output into the attention
layer to generate attention at each time step.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
"""
super(AttentionWrapper, self).__init__(name=name)
if not isinstance(cell, core_rnn_cell.RNNCell):
raise TypeError(
"cell must be an RNNCell, saw type: %s" % type(cell).__name__)
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be a AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if probability_fn is None:
probability_fn = nn_ops.softmax
else:
if not callable(probability_fn):
raise TypeError(
"probability_fn must be callable, saw type: %s"
% type(probability_fn).__name__)
if attention_layer_size is not None:
self._attention_layer = layers_core.Dense(
attention_layer_size, name="attention_layer", use_bias=False)
self._attention_size = attention_layer_size
else:
self._attention_layer = None
self._attention_size = attention_mechanism.values.get_shape()[-1].value
self._cell = cell
self._attention_mechanism = attention_mechanism
self._cell_input_fn = cell_input_fn
self._probability_fn = probability_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(state_batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
@property
def output_size(self):
if self._output_attention:
return self._attention_size
else:
return self._cell.output_size
@property
def state_size(self):
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_size,
alignment_history=()) # alignment_history is sometimes a TensorArray
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
if self._alignment_history:
alignment_history = tensor_array_ops.TensorArray(
dtype=dtype, size=0, dynamic_size=True)
else:
alignment_history = ()
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_size, batch_size,
dtype),
alignment_history=alignment_history)
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `DynamicAttentionWrapperState`
containing the state calculated at this time step.
"""
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
[check_ops.assert_equal(cell_batch_size,
self._attention_mechanism.batch_size,
message=error_message)]):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
score = self._attention_mechanism(cell_output)
alignments = self._probability_fn(score)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, attention_mechanism.num_units]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, attention_mechanism.num_units].
# we then squeeze out the singleton dim.
attention_mechanism_values = self._attention_mechanism.values
context = math_ops.matmul(expanded_alignments, attention_mechanism_values)
context = array_ops.squeeze(context, [1])
if self._attention_layer is not None:
attention = self._attention_layer(
array_ops.concat([cell_output, context], 1))
else:
attention = context
if self._alignment_history:
alignment_history = state.alignment_history.write(
state.time, alignments)
else:
alignment_history = ()
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
alignment_history=alignment_history)
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
| apache-2.0 |
ctbrennan/cross-platform-message-analytics | parse_analyze.py | 1 | 33998 | import sys
#sys.path.insert(0, '/home/connor/Documents/prj/parser/FB-Message-Parser/')
import fb_parser
import imessage_export
from collections import OrderedDict
import json
from datetime import datetime
from wordcloud import WordCloud
import calendar
import os, os.path #parseAllThreads
import nltk
import xml.etree.ElementTree as ET #parseSMS
import itertools
from titlecase import titlecase #for name/alias matching
from sklearn.metrics.pairwise import cosine_similarity #finding similarity between two texts, requires scipy
from sklearn.feature_extraction.text import TfidfVectorizer #make tf-idf matrix
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
import numpy as np
import re #remove emojis
import twython
#=====================================================================================================
# Parsing Messages and Contact Info
#=====================================================================================================
me = None
personDict = {} #name -> dateTime -> tuple of messages
fullTextDict = {} #dateTime -> tuple of everyone's messages
vCardDict = {} #phone number/email -> name
aliasDict = {} #alias1 -> alias2, alias2 -> alias1
STOPWORDS = set([x.strip() for x in open(os.path.join(os.getcwd(),'stopwords.txt')).read().split('\n')])
STOPWORDS.add('none')
"""
For parsing all Facebook messages into dictionaries
"""
def parseFBMessages(confident):
if not os.path.isfile('messages.json') :
with open('messages.htm', "r") as f:
chat = fb_parser.html_to_py(f)
# Dump to json to prove works:
fb_parser.py_to_json(chat)
jsonFile = json.loads(open('messages.json').read())
newPersonDict = {}
newFullTextDict = {}
for person in jsonFile['threads']:
for message in person['messages']:
sender = titlecase(message['sender'])
text = message['text']
dateFormatted = datetime.strptime(message['date_time'], '%A, %B %d, %Y at %I:%M%p ')
addToNewDict(newPersonDict, dateFormatted, text, sender)
addToNewDict(newFullTextDict, dateFormatted, text)
if 'y' in input("Enter 'y' if you would like to match duplicate names on Facebook"):
matchDuplicates(newPersonDict)
mergeAndSortPersonDict(newPersonDict, confident)
mergeAndSortFullTextDict(newFullTextDict)
"""
Parsing .xml file containing all sms ("Super Backup" for Android)
"""
def parseSMS(me, confident):
def parseSuperBackup():
tree = ET.parse('allsms.xml') #fix this later
root = tree.getroot()
newPersonDict = {}
newFullTextDict = {}
newNames = []
notFound = []
for message in root:
phoneNumber = formatPhoneNumber(message.attrib['address'])
if message.attrib['type'] == '2':
sender = me
elif message.attrib['name']:
sender = titlecase(message.attrib['name'])
elif phoneNumber in vCardDict.keys():
sender = titlecase(vCardDict[phoneNumber])
if sender not in newNames:
newNames.append(sender)
else:
continue #don't add plain phone numbers
date = message.attrib['time']
text = message.attrib['body']
dateFormatted = datetime.strptime(date, '%b %d, %Y %I:%M:%S %p') #"Jul 10, 2016 8:28:10 PM"
addToNewDict(newPersonDict, dateFormatted, text, sender)
addToNewDict(newFullTextDict, dateFormatted, text)
if 'y' in input("Enter 'y' if you would like to match duplicate names from Android SMS"):
matchDuplicates(newPersonDict)
mergeAndSortPersonDict(newPersonDict, confident)
mergeAndSortFullTextDict(newFullTextDict)
parseVCF()
parseSuperBackup()
return
"""
For parsing all imessage threads into dictionaries
"""
def parseAllThreads(folder=None):
global vCardDict
newPersonDict = {}
newFullTextDict = {}
def parseThread(me, fileName):
if vCardDict == {}:
parseVCF()
jsonFile = json.loads(open(fileName).read())
number = formatPhoneNumber(jsonFile['messages'][0]['handle_id'])
if number in vCardDict.keys():
person = vCardDict[number]
else:
return 1
for message in jsonFile['messages']:
fromMe = message['is_from_me']
date = message['date']
text = message['text']
sender = me if fromMe else titlecase(person)
if sender not in newPersonDict.keys():
newPersonDict[sender] = {}
dateFormatted = datetime.strptime(date, '%Y-%m-%d %H:%M:%S') #"2016-01-13 23:36:32"
addToNewDict(newPersonDict, dateFormatted, text, sender)
addToNewDict(newFullTextDict, dateFormatted, text)
return 0
if not folder or not os.path.exists(folder):
wantsToParse = True if 'y' in input("Enter 'y' if you would like to parse your iMessageDatabase (please make a backup first)") else False
if not wantsToParse:
return
folder = folder if folder else "./chatparsed/threads/"
for file in os.listdir(os.getcwd()): #for file in working directory
if file.endswith(".db"):
sqlPath = file
break
#imessage_export.main("-i " + sqlPath, "-o " + folder)
imessage_export.main(sqlPath, folder)
for root, _, files in list(os.walk(folder)):
for f in files:
fullpath = os.path.join(root, f)
parseThread(me, fullpath)
if 'y' in input("Enter 'y' if you would like to match duplicates from iMessage"):
matchDuplicates(newPersonDict)
mergeAndSortPersonDict(newPersonDict)
mergeAndSortFullTextDict(newFullTextDict)
"""
Parses file of all vcard data into dictionary mapping phone number/email to name.
All vcards must be in same file. Handles VCF versions 2.x and 3.x
"""
def parseVCF():
global vCardDict
def parseVCF3():
for line in vcfFile:
if line.startswith('FN'):
currName = line[3:len(line)-1]
elif line.startswith('TEL;') or line.startswith('EMAIL'):
index = line.find('pref:')
number = line[index + 5:]
if index == -1:
index = numer.find(":")
number = number[index + 1:]
number = number.rstrip()
number = formatPhoneNumber(number) #trying this out
if currName:
vCardDict[number] = currName
else: #currName is still None, haven't found a name yet
vCardDict[number] = number
def parseVCF2():
for line in vcfFile:
if line.startswith('FN'):
currName = line[3:len(line)-1]
elif line.startswith('TEL;') or line.startswith('EMAIL'):
index = line.find(':')
number = line[index+1:]
number = number.rstrip()
number = formatPhoneNumber(number) #trying this out
if currName:
vCardDict[number] = currName
else:#currName is still None, haven't found a name yet
vCardDict[number] = number
currName = None #necessary in case first contact has no name associated with it
vcfFile = open('allme.vcf', 'r')#need to change to below, eventually
"""
#only works if there's only one .vcf file
for file in os.listdir(os.getcwd()): #for file in working directory
if file.endswith(".vcf"):
vcfFile = file
break
"""
i = 0
for line in vcfFile: #hacky, consider changing
i += 1
if line.startswith('VERSION'):
if '3.' in line:
parseVCF3()
else:
parseVCF2()
"""
Adds a new message entry to the new dictionary. Handles both fullTextDict and personDict.
"""
def addToNewDict(newDict, dateFormatted, text, sender = None):
if not areEnglishCharacters(sender):
return
if sender is None: #newFullTextDict
if dateFormatted in newDict.keys():
if text not in newDict[dateFormatted]:
currLst = list(newDict[dateFormatted])
currLst.append(text)
newDict[dateFormatted] = tuple(currLst)
else:
newDict[dateFormatted] = tuple([text])
else: #newPersonDict
if sender not in newDict.keys():
newDict[sender] = {}
if dateFormatted in newDict[sender].keys():
if text not in newDict[sender][dateFormatted]:
currLst = list(newDict[sender][dateFormatted])
currLst.append(text)
newDict[sender][dateFormatted] = tuple(currLst)
else:
newDict[sender][dateFormatted] = tuple([text])
"""
Merges a newDict, which is newly parsed from some platform, with the dictionary that already exists
"""
def mergeAndSortPersonDict(newDict, confident):
global personDict
if personDict == {}:
personDict = newDict
for name in personDict:
personDict[name] = OrderedDict(sorted(personDict[name].items(), key=lambda t: t[0]))
return
for name in newDict:
if name not in personDict.keys():
if name not in aliasDict.keys():
matchAlias(personDict.keys(), name, newDict, confident)
trueName = aliasDict[name]
if trueName in aliasDict.keys():
trueName = aliasDict[trueName]
else:
trueName = name
for date in newDict[name]:
if trueName not in personDict.keys():
personDict[trueName] = {}
if date not in personDict[trueName]:
personDict[trueName][date] = newDict[name][date]
else:
personDict[trueName][date] = combineMessageTuples(personDict[trueName][date], newDict[name][date])
personDict[trueName] = OrderedDict(sorted(personDict[trueName].items(), key=lambda t: t[0]))
def mergeAndSortFullTextDict(newDict):
global fullTextDict
if fullTextDict == {}:
fullTextDict = newDict
fullTextDict = OrderedDict(sorted(fullTextDict.items(), key=lambda t: t[0]))
return
for date in newDict:
if date not in fullTextDict:
fullTextDict[date] = newDict[date]
else:
fullTextDict[date] = combineMessageTuples(fullTextDict[date], newDict[date])
fullTextDict = OrderedDict(sorted(fullTextDict.items(), key=lambda t: t[0]))
#=====================================================================================================
# Combining Contacts
#=====================================================================================================
"""
add to alias dictionary a mapping from each name in otherNames to a name that's in existing names,
or adds a new name if no good match is found.
Step 1: compile list of possible matches by using minEditDistance (need to deal with middle names, non-English characters, initials for last names, shortened first names)
Step 2: If there are a few possible matches, sort possible matches using elements of writing style, and ask user to confirm
"""
def matchAliases(existingNames, otherNames, otherNamesDict, confident):
CUTOFFSCORE = 2 #play around with this
for otherName in otherNames:
candidates = possMatches(otherName, existingNames) #list of possible matches (determined by small edit distance)
topCandidate, bestScore = candidates[0]
correctMatch = False
if not confident and bestScore < CUTOFFSCORE:
if otherName.isdigit(): #phone number
aliasDict[otherName] = otherName
#if candidates[1][1] >= bestScore - 1: #multiple best matches within 1 of eachother
elif candidates[1][1] == bestScore: #multiple best matches equal to eachother
writingStyleSimilarityDict = {} #candidate existingName -> similarity to otherName
toCompare = [candidates[0][0]]
for candidate in candidates:
if candidate[1] == bestScore:
writingStyleSimilarityDict[candidate[0]] = writingStyleMatchScore(otherName, otherNamesDict, candidate[0])
topCandidates = sorted(writingStyleSimilarityDict.keys(), key = lambda x: -writingStyleSimilarityDict[x])
i = 0
while not correctMatch and i < len(topCandidates):
topCandidate = topCandidates[i]
correctMatch = True if 'y' in input("Enter 'y' if " + otherName + " should be matched with " + topCandidate + ": ") else False
i += 1
else:
correctMatch = True if 'y' in input("Enter 'y' if " + otherName + " should be matched with " + topCandidate + ": ") else False
if correctMatch:
aliasDict[otherName] = topCandidate
else:
aliasDict[otherName] = titlecase(otherName)
elif confident:
aliasDict[otherName] = topCandidate
else:
aliasDict[otherName] = titlecase(otherName)
def matchAlias(existingNames, otherName, otherNamesDict, confident):
matchAliases(existingNames, [otherName], otherNamesDict, confident)
return aliasDict[otherName]
def matchDuplicates(newDict):
tried = {}
CUTOFFSCORE = 1 #play around with this
for name in newDict:
if name in aliasDict.values():
continue
candidates = possMatches(name, newDict.keys(), 3, False) #list of possible matches (determined by small edit distance)
correctMatch = False
i = 0
while not correctMatch and i < len(candidates) and candidates[i][1] <= CUTOFFSCORE:
topCandidate = candidates[i][0]
pair1 = (topCandidate, name)
if pair1 in tried.keys():
i += 1
continue
pair2 = (name, topCandidate)
tried[pair1] = True
tried[pair2] = True
if topCandidate != name:
correctMatch = True if 'y' in input("Enter 'y' if " + topCandidate + " is a duplicate of " + name + " on the same platform: ") else False
i += 1
if correctMatch:
aliasDict[name] = topCandidate
return
def possMatches(name, existingNames, number=5, diffDics = True):
if name in existingNames and diffDics:
return [(name,0)]
similarityScores = {} #existing name -> min edit distance
for existingName in existingNames:
score = nameSimilarityScore(name, existingName)
similarityScores[existingName] = score
sortedByScore = sorted(similarityScores.items(), key = lambda x: x[1])[:number] #?
return sortedByScore
def nameSimilarityScore(w1, w2):
def minEditDistance(partW1, partW2):
def diff(a, b):
return 0 if a == b else 1
if len(partW1) == 0 or len(partW2)==0:
return max(len(partW1), len(partW2))
table = [[0 for _ in range(len(partW2))] for _ in range(len(partW1))]
for i in range(0, len(partW1)):
table[i][0] = i
for j in range(1, len(partW2)):
table[0][j] = j
for i in range(1, len(partW1)):
for j in range(1, len(partW2)):
table[i][j] = min(table[i-1][j] + 1, table[i][j-1] + 1, table[i-1][j-1] + diff(partW1[i], partW2[j]))
return table[len(partW1) - 1][len(partW2) - 1]
FIRSTNAMEMATCHSCORE = .1 #play around with this value
allPartScore = 0
splitW1 = w1.split(" ")
splitW2 = w2.split(" ")
if splitW1[0] == splitW2[0] or splitW1[0].startswith(splitW2[0]) or splitW2[0].startswith(splitW1[0]): #first name match, cor one is a prefix of the other
if splitW1[len(splitW1)-1] == splitW2[len(splitW2)-1]: #first and last name match
return 0
else:
if len(splitW1) == 1 or len(splitW2) == 1:#one of the names is just a first name
return FIRSTNAMEMATCHSCORE
else: #both are more than just a first name
splitW1 = splitW1[1:]
splitW2 = splitW2[1:]
w1 = " ".join(splitW1)
w2 = " ".join(splitW2)
return minEditDistance(w1, w2)
"""
computes the cosine similarity of the tfidf vectors formed from all the words the unknown person and possible match have typed
"""
def writingStyleMatchScore(otherName, otherNamesDict, possibleExistingMatch):
#http://blog.christianperone.com/2013/09/machine-learning-cosine-similarity-for-vector-space-models-part-iii/
existingNameText = " ".join(fullMessageList(possibleExistingMatch))
otherNameText = " ".join(fullMessageList(otherName, otherNamesDict))
tfidf_vectorizer = TfidfVectorizer(min_df=1)
tfidf_matrix = tfidf_vectorizer.fit_transform(tuple([existingNameText, otherNameText]))
similarity = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:])[0][0]
return similarity
#=====================================================================================================
# Analytics/Fun
#=================================================== ==================================================
def wordCloud(person):
text = getAllMessagesAsString(person)
# Generate a word cloud image
wordcloud = WordCloud().generate(text)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
# take relative word frequencies into account, lower max_font_size
wordcloud = WordCloud(max_font_size=40, relative_scaling=.5).generate(text)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
def mostCommonNGrams(n, number, person = None):
"""
get common phrases of length n
"""
from nltk import ngrams
if person:
wordList = fullWordListPerson(person)
else:
wordList = fullWordList()
grams = list(ngrams(wordList, n))
counts = {}
for gram in grams:
if gram not in counts:
counts[gram] = 1
else:
counts[gram] += 1
return sorted(counts.keys(), key = lambda x: -counts[x])[:number]
def plotTopFriends(number = 15):
import matplotlib.pyplot as plt
earliestDateTime = min(fullTextDict.keys())
earliestYear = earliestDateTime.year
lastDateTime = max(fullTextDict.keys())
lastYear = lastDateTime.year
messageCount = {} #key = person, value = [month1count, month2count, ...]
topFriendsList = topFriends(number)
for friend in topFriendsList:
messageCount[friend] = []
for year in range(earliestYear, lastYear+1):
for month in range(1, 13):
monthStart = datetime(int(year), int(month), 1)
monthEnd = datetime(int(year), int(month), calendar.monthrange(int(year),int(month))[1])
for friend in topFriendsList:
messageCount[friend].append((monthStart, numMessagesMonth(friend, monthStart, monthEnd)))
for friend, mcTups in sorted(messageCount.items(), key= lambda x: -sum([count for month, count in x[1]])):
counts = [count for (monthStart, count) in mcTups]
months = [monthStart for (monthStart, count) in mcTups]
plt.plot(months, counts, label = friend)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def plotFriendSentiment(number = 15):
import matplotlib.pyplot as plt
earliestDateTime = min(fullTextDict.keys())
earliestYear = earliestDateTime.year
lastDateTime = max(fullTextDict.keys())
lastYear = lastDateTime.year
sentiments = {} #key = person, value = [month1sent, month2sent, ...]
topFriendsList = topFriends(number)
for friend in topFriendsList:
sentiments[friend] = []
for year in range(earliestYear, lastYear+1):
for month in range(1, 13):
monthStart = datetime(int(year), int(month), 1)
monthEnd = datetime(int(year), int(month), calendar.monthrange(int(year),int(month))[1])
for friend in topFriendsList:
sentiments[friend].append((monthStart, personAvgSentiment(friend, month, year)))
for friend, sentTups in sorted(sentiments.items(), key= lambda x: -sum([sent for month, sent in x[1]])):
sents = [sent for (monthStart, sent) in sentTups]
months = [monthStart for (monthStart, sent) in sentTups]
plt.plot(months, sents, label = friend)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
def mostSimilarFriends():
return maxPairWritingStyleMatchScore(personDict.keys())
def maxPairWritingStyleMatchScore(people, number = 10):
#http://blog.christianperone.com/2013/09/machine-learning-cosine-similarity-for-vector-space-models-part-iii/
textList = []
orderingDict = {} # i -> name
scoreDict = {} #(personi, personj) -> score
i = 0
for p in people:
pText = " ".join(fullMessageList(p))
textList.append(pText)
orderingDict[i] = p
i += 1
tfidf_vectorizer = TfidfVectorizer(min_df=1)
tfidf_matrix = tfidf_vectorizer.fit_transform(tuple(textList))
for i in range(tfidf_matrix.shape[0]):
for j in range(tfidf_matrix.shape[0]):
if i < j and len(personDict[orderingDict[i]]) > 100 and len(personDict[orderingDict[j]]) > 100: #minimum of 100 messages for both people
score = cosine_similarity(tfidf_matrix[i:i+1], tfidf_matrix[j:j+1])[0][0]
if len(scoreDict) <= number: #number of pairs
scoreDict[(i,j)] = score
else:
sortedScores = sorted(scoreDict.items(), key = lambda x: x[1])
leastSim = sortedScores[0]
if leastSim[1] < score:
del scoreDict[leastSim[0]]
scoreDict[(i,j)] = score
# del scoreDict[sortedScores[0][0]]
# scoreDict[(i,j)] = similarities[j]
return [(orderingDict[i], orderingDict[j], score) for (i,j), score in sorted(scoreDict.items(), key = lambda x: -x[1])]
"""
Takes tf_idf vectors of top friends, identifies the most distinguishing features (PCA), and reduces the dimensionality while seeking to preserve distances
between vectors, and projects the vectors onto 2D plane to show similarity between friends' word choice.
"""
def similarityPlot():
import matplotlib.pyplot as plt
from matplotlib import rcParams
tfidf_vectorizer = TfidfVectorizer(min_df=1)
names = friendsAboveMinNumMessages(200) + [me]
data = []
words = [] #ordering of words in tf_idf matrix
wordsSet = set() #for faster lookup
nameSet = set()
for person in personDict:
for name in person.split():
nameSet.add(name)
nameSet.add(name.lower())
for i in range(len(names)):
data.append(getAllMessagesAsString(names[i], False))
tfidf_matrix = tfidf_vectorizer.fit_transform(data)
featureNames = tfidf_vectorizer.get_feature_names()
tfidf_arr = tfidf_matrix.toarray()
for j in range(len(tfidf_arr[0])):
word = tfidf_arr[0][j]
if word not in wordsSet:
words.append(word)
wordsSet.add(j)
#nmds = manifold.MDS(metric = True, n_components = N_DISTINGUISHING_FEATURES)
#npos = nmds.fit_transform(tfidf_matrix.toarray())
clf = PCA(n_components=2)
npos = clf.fit_transform(tfidf_arr)
plt.scatter(npos[:, 0], npos[:, 1], marker = 'o', c = 'b', cmap = plt.get_cmap('Spectral')) #change colors
for name, x, y in zip(names, npos[:, 0], npos[:, 1]):
plt.annotate(
name,
xy = (x, y), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
fig, ax = plt.subplots()
ax2 = ax.twinx()
xAxisP = [featureNames[i] for i in np.argpartition(clf.components_[0], -50)[-50:] if featureNames[i] not in nameSet]
yAxisP = [featureNames[i] for i in np.argpartition(clf.components_[1], -50)[-50:] if featureNames[i] not in nameSet]
xAxisN = [featureNames[i] for i in np.argpartition(-clf.components_[0], -50)[-50:] if featureNames[i] not in nameSet]
yAxisN = [featureNames[i] for i in np.argpartition(-clf.components_[1], -50)[-50:] if featureNames[i] not in nameSet]
ax.set_xlabel("Most Postively influential words along x axis:\n" + ", ".join(xAxisP), fontsize=18)
ax.set_ylabel("Most Postively influential words along y axis:\n" + ", ".join(yAxisP), fontsize=18)
ax2.set_xlabel("Most Negatively influential words along x axis:\n" + ", ".join(xAxisN), fontsize=18)
ax2.set_ylabel("Most Negatively influential words along y axis:\n" + ", ".join(yAxisN), fontsize=18)
# xAxis = [featureNames[i] for i in np.argpartition(np.absolute(clf.components_[0]), -50)[-50:] if featureNames[i] not in nameSet]
# yAxis = [featureNames[i] for i in np.argpartition(np.absolute(clf.components_[1]), -50)[-50:] if featureNames[i] not in nameSet]
# for i in range(1, max(len(xAxis), len(yAxis)) ):
# if i % 20 == 0 and i < len(xAxis):
# xAxis[i] += "\n"
# if i % 15 == 0 and i < len(yAxis):
# yAxis[i] += "\n"
# plt.xlabel("Most influential words along x axis:\n" + ", ".join(xAxis), fontsize=18)
# plt.ylabel("Most influential words along y axis:\n" + ", ".join(yAxis), fontsize=18)
rcParams.update({'figure.autolayout': True})
plt.suptitle("Word-Usage Similarity Scatterplot", fontsize = 24, fontweight = 'bold')
plt.show()
def similarityPlot3():
import matplotlib.pyplot as plt
from matplotlib import rcParams
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
tfidf_vectorizer = TfidfVectorizer(min_df=1)
names = friendsAboveMinNumMessages(200) + [me]
data = []
words = [] #ordering of words in tf_idf matrix
wordsSet = set() #for faster lookup
nameSet = set()
for person in personDict:
for name in person.split():
nameSet.add(name)
nameSet.add(name.lower())
for i in range(len(names)):
data.append(getAllMessagesAsString(names[i], False))
tfidf_matrix = tfidf_vectorizer.fit_transform(data)
featureNames = tfidf_vectorizer.get_feature_names()
tfidf_arr = tfidf_matrix.toarray()
for j in range(len(tfidf_arr[0])):
word = tfidf_arr[0][j]
if word not in wordsSet:
words.append(word)
wordsSet.add(j)
clf = PCA(n_components=3)
npos = clf.fit_transform(tfidf_arr)
visualize3DData(npos, clf, featureNames)
def visualize3DData (X, clf, featureNames):
"""Visualize data in 3d plot with popover next to mouse position.
Args:
X (np.array) - array of points, of shape (numPoints, 3)
Returns:
None
"""
import matplotlib.pyplot as plt, numpy as np
from mpl_toolkits.mplot3d import proj3d
fig = plt.figure(figsize = (16,10))
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], depthshade = False, picker = True)
names = friendsAboveMinNumMessages(200) + [me]
data = []
words = [] #ordering of words in tf_idf matrix
wordsSet = set() #for faster lookup
nameSet = set()
for person in personDict:
for name in person.split():
nameSet.add(name)
nameSet.add(name.lower())
xAxis = [featureNames[i] for i in np.argpartition(np.absolute(clf.components_[0]), -50)[-50:] if featureNames[i] not in nameSet]
yAxis = [featureNames[i] for i in np.argpartition(np.absolute(clf.components_[1]), -50)[-50:] if featureNames[i] not in nameSet]
zAxis = [featureNames[i] for i in np.argpartition(np.absolute(clf.components_[2]), -50)[-50:] if featureNames[i] not in nameSet]
ax.set_xlabel("Most influential words along x axis:\n" + ", ".join(xAxis), fontsize=18)
ax.set_ylabel("Most influential words along y axis:\n" + ", ".join(yAxis), fontsize=18)
ax.set_zlabel("Most influential words along z axis:\n" + ", ".join(zAxis), fontsize=18)
def distance(point, event):
"""Return distance between mouse position and given data point
Args:
point (np.array): np.array of shape (3,), with x,y,z in data coords
event (MouseEvent): mouse event (which contains mouse position in .x and .xdata)
Returns:
distance (np.float64): distance (in screen coords) between mouse pos and data point
"""
assert point.shape == (3,), "distance: point.shape is wrong: %s, must be (3,)" % point.shape
# Project 3d data space to 2d data space
x2, y2, _ = proj3d.proj_transform(point[0], point[1], point[2], plt.gca().get_proj())
# Convert 2d data space to 2d screen space
x3, y3 = ax.transData.transform((x2, y2))
return np.sqrt ((x3 - event.x)**2 + (y3 - event.y)**2)
def calcClosestDatapoint(X, event):
""""Calculate which data point is closest to the mouse position.
Args:
X (np.array) - array of points, of shape (numPoints, 3)
event (MouseEvent) - mouse event (containing mouse position)
Returns:
smallestIndex (int) - the index (into the array of points X) of the element closest to the mouse position
"""
distances = [distance (X[i, 0:3], event) for i in range(X.shape[0])]
return np.argmin(distances)
labelDic = {}
def annotatePlot(X, index):
"""Create popover label in 3d chart
Args:
X (np.array) - array of points, of shape (numPoints, 3)
index (int) - index (into points array X) of item which should be printed
Returns:
None
"""
# If we have previously displayed another label, remove it first
if hasattr(annotatePlot, 'label'):
annotatePlot.label.remove()
# Get data point from array of points X, at position index
if index not in labelDic:
x2, y2, _ = proj3d.proj_transform(X[index, 0], X[index, 1], X[index, 2], ax.get_proj())
labelDic[index] = (x2, y2)
x2, y2 = labelDic[index]
annotatePlot.label = plt.annotate(names[index],
xy = (x2, y2), xytext = (-20, 20), textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
fig.canvas.draw()
def onMouseMotion(event):
"""Event that is triggered when mouse is moved. Shows text annotation over data point closest to mouse."""
closestIndex = calcClosestDatapoint(X, event)
annotatePlot (X, closestIndex)
fig.canvas.mpl_connect('motion_notify_event', onMouseMotion) # on mouse motion
plt.show()
#make messages searchable
#most similar friends in terms of words used
#make function that makes huge png of all messages (maybe in shape of picture)
#phraseCloud
#graph slang usage over time
#=====================================================================================================
# Helpers/Utilities
#=====================================================================================================
def topFriends(number):
temp = sorted(personDict.keys(), key=lambda x: -len(personDict[x]))
i = 0
topFriends = []
for person in temp:
if i < number:
if person != me:
topFriends.append(person)
i += 1
else:
return topFriends
def topFriendsMonth(number, month, year):
monthStart = datetime(int(year), int(month), 1)
monthEnd = datetime(int(year), int(month), calendar.monthrange(int(year),int(month))[1])
temp = sorted(personDict.keys(), key= lambda x: -numMessagesMonth(x, monthStart, monthEnd))
i = 0
topFriends = []
for person in temp:
if i < number:
if person != me:
topFriends.append(person)
i += 1
else:
return topFriends
def friendsAboveMinNumMessages(number = 100):
temp = sorted(personDict.keys(), key=lambda x: -len(personDict[x]))
topFriends = []
for person in temp:
if len(personDict[person]) >= number:
if person != me:
topFriends.append(person)
else:
return topFriends
def getAllMessagesAsString(personStr, includeSW = True):
string = ""
for messages in personDict[personStr].values():
for message in messages:
if not includeSW:
messageWords = message.split()
if type(messageWords) != None and len(messageWords) > 0:
afterRemoval = [word for word in messageWords if word.lower() not in STOPWORDS]
message = ' '.join(afterRemoval)
string += message + " "
return string
def numMessagesMonth(person, monthStart, monthEnd):
count = 0
for datetime in personDict[person]:
if datetime >= monthStart and datetime <= monthEnd:
count += 1
return count
def fullMessageList(name, sourceDict=None):
if not sourceDict:
sourceDict = personDict[name]
fullMessageList = []
for messTups in sourceDict.values():
if type(messTups) == dict:
messTups = messTups.values()
i = 0
if i>0:
break
i += 1
for message in messTups:
if type(message) == tuple:
for mess in message:
fullMessageList.append(mess)
else:
fullMessageList.append(message)
return fullMessageList
def fullMessageListMonth(name, month, year):
messageLst = []
monthStart = datetime(int(year), int(month), 1)
monthEnd = datetime(int(year), int(month), calendar.monthrange(int(year),int(month))[1])
for dt in personDict[name]:
if dt >= monthStart and dt <= monthEnd:
for message in personDict[name][dt]:
messageLst.append(message)
return messageLst
def fullWordList():
fullWordList = []
for messTup in fullTextDict.values():
i = 0
if i>0:
break
i += 1
for mess in messTup:
words = mess.split(" ")
for word in words:
fullWordList.append(word)
return fullWordList
def fullWordListPerson(name, sourceDict=None):
if not sourceDict:
sourceDict = personDict[name]
fullWordList = []
for messTups in sourceDict.values():
if type(messTups) == dict:
messTups = messTups.values()
i = 0
if i>0:
break
i += 1
for message in messTups:
if type(message) == tuple:
for mess in message:
words = mess.split(" ")
for word in words:
fullWordList.append(word)
else:
words = message.split(" ")
for word in words:
fullWordList.append(word)
return fullWordList
def slangList():
from nltk.corpus import words
wordSet = set(words.words())
lst = [slang for slang in fullWordList() if slang not in wordSet and isSlang(slang) and slang[:len(slang) - 1] not in wordSet]
return lst
def isSlang(word):
wordExceptLast = word[:len(slang) - 1]
if isProperNoun(word) or isProperNoun(wordExceptLast):
return False
if 'www' in word or "'" in word:
return False
return True
def isProperNoun(word):
return titlecase(word) == word
# def q():
# from nltk.metrics.association import QuadgramAssocMeasures
# quadgram_measures = QuadgramAssocMeasures()
# finder = QuadgramCollocationFinder.from_words(fullWordList())
# finder.nbest(quadgram_measures.pmi, 10) # doctest: +NORMALIZE_WHITESPACE
def personAvgSentiment(person, month = None, year = None):
from nltk.sentiment.vader import SentimentIntensityAnalyzer
comp = 0
sid = SentimentIntensityAnalyzer()
if month:
msgLst = fullMessageListMonth(person, month, year)
else:
msgLst = fullMessageList(person)
for message in msgLst:
sentimentDict = sid.polarity_scores(message)
comp += sentimentDict['compound']
return comp/len(msgLst) if len(msgLst) != 0 else 0
def messageSentiment(message):
from nltk.sentiment.vader import SentimentIntensityAnalyzer
#may need to download vader_lexicon after calling nltk.download()
sid = SentimentIntensityAnalyzer()
return sid.polarity_scores(message)
"""
908-872-6993
+13106996932
1 (818) 884-9040
(510) 642-9255
[email protected]
"""
def formatPhoneNumber(pnStr):
if '@' in pnStr or '#' in pnStr or pnStr == "": #or len(pnStr) == 0: #when/why does the length == 0?
return pnStr
reformattedStr = ''.join(filter(lambda x: x.isdigit(), pnStr))
if not reformattedStr:
return
elif reformattedStr[0] == '1':
return reformattedStr[1:]
return reformattedStr
def combineMessageTuples(tup1, tup2):
currLst1 = list(tup1)
currLst2 = list(tup2)
currLst1 += currLst2
return tuple(currLst1)
def areEnglishCharacters(s):
#http://stackoverflow.com/a/27084708
if not s:
return True
try:
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE) #http://stackoverflow.com/a/33417311
s = emoji_pattern.sub(r'', s)
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
def main(username, confident):
global me
me = username
parseFBMessages(confident)
parseSMS(me, confident)
#parseAllThreads()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2] if len(sys.argv) >=3 else False) | mit |
Basis/webargs | webargs/core.py | 1 | 18589 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import functools
import inspect
import logging
import warnings
try:
import simplejson as json
except ImportError:
import json
import marshmallow as ma
from marshmallow.compat import iteritems
from marshmallow.utils import missing
logger = logging.getLogger(__name__)
__all__ = [
'WebargsError',
'ValidationError',
'argmap2schema',
'is_multiple',
'Parser',
'get_value',
'missing',
'parse_json',
]
DEFAULT_VALIDATION_STATUS = 422
class WebargsError(Exception):
"""Base class for all webargs-related errors."""
pass
class ValidationError(WebargsError, ma.exceptions.ValidationError):
"""Raised when validation fails on user input. Same as
`marshmallow.ValidationError`, with the addition of the ``status_code`` and
``headers`` arguments.
"""
def __init__(self, message, status_code=DEFAULT_VALIDATION_STATUS, headers=None, **kwargs):
self.status_code = status_code
self.headers = headers
ma.exceptions.ValidationError.__init__(self, message, **kwargs)
def __repr__(self):
return 'ValidationError({0!r}, status_code={1}, headers={2})'.format(
self.args[0], self.status_code, self.headers
)
def _callable_or_raise(obj):
"""Makes sure an object is callable if it is not ``None``. If not
callable, a ValueError is raised.
"""
if obj and not callable(obj):
raise ValueError('{0!r} is not callable.'.format(obj))
else:
return obj
def get_field_names_for_argmap(argmap):
if isinstance(argmap, ma.Schema):
all_field_names = set([fname for fname, fobj in iteritems(argmap.fields)
if not fobj.dump_only])
else:
all_field_names = set(argmap.keys())
return all_field_names
def fill_in_missing_args(ret, argmap):
# WARNING: We modify ret in-place
all_field_names = get_field_names_for_argmap(argmap)
missing_args = all_field_names - set(ret.keys())
for key in missing_args:
ret[key] = missing
return ret
def argmap2schema(argmap, instance=False, **kwargs):
"""Generate a `marshmallow.Schema` class given a dictionary of argument
names to `Fields <marshmallow.fields.Field>`.
"""
class Meta(object):
strict = True
attrs = dict(argmap, Meta=Meta)
cls = type(str('ArgSchema'), (ma.Schema,), attrs)
return cls if not instance else cls(**kwargs)
def is_multiple(field):
"""Return whether or not `field` handles repeated/multi-value arguments."""
return isinstance(field, ma.fields.List) and not hasattr(field, 'delimiter')
def get_mimetype(content_type):
return content_type.split(';')[0].strip() if content_type else None
# Adapted from werkzeug: https://github.com/mitsuhiko/werkzeug/blob/master/werkzeug/wrappers.py
def is_json(mimetype):
"""Indicates if this mimetype is JSON or not. By default a request
is considered to include JSON data if the mimetype is
``application/json`` or ``application/*+json``.
"""
if not mimetype:
return False
if ';' in mimetype: # Allow Content-Type header to be passed
mimetype = get_mimetype(mimetype)
if mimetype == 'application/json':
return True
if mimetype.startswith('application/') and mimetype.endswith('+json'):
return True
return False
def get_value(d, name, field):
"""Get a value from a dictionary. Handles ``MultiDict`` types when
``multiple=True``. If the value is not found, return `missing`.
:param dict d: Dictionary to pull the value from.
:param str name: Name of the key.
:param bool multiple: Whether to handle multiple values.
"""
multiple = is_multiple(field)
val = d.get(name, missing)
if multiple and val is not missing:
if hasattr(d, 'getlist'):
return d.getlist(name)
elif hasattr(d, 'getall'):
return d.getall(name)
elif isinstance(val, (list, tuple)):
return val
else:
return [val]
return val
def parse_json(s):
if isinstance(s, bytes):
s = s.decode('utf-8')
return json.loads(s)
def _ensure_list_of_callables(obj):
if obj:
if isinstance(obj, (list, tuple)):
validators = obj
elif callable(obj):
validators = [obj]
else:
raise ValueError('{0!r} is not a callable or list of callables.'.format(obj))
else:
validators = []
return validators
class Parser(object):
"""Base parser class that provides high-level implementation for parsing
a request.
Descendant classes must provide lower-level implementations for parsing
different locations, e.g. ``parse_json``, ``parse_querystring``, etc.
:param tuple locations: Default locations to parse.
:param callable error_handler: Custom error handler function.
"""
DEFAULT_LOCATIONS = ('querystring', 'form', 'json',)
DEFAULT_VALIDATION_STATUS = DEFAULT_VALIDATION_STATUS
DEFAULT_VALIDATION_MESSAGE = 'Invalid value.'
#: Maps location => method name
__location_map__ = {
'json': 'parse_json',
'querystring': 'parse_querystring',
'query': 'parse_querystring',
'form': 'parse_form',
'headers': 'parse_headers',
'cookies': 'parse_cookies',
'files': 'parse_files',
}
def __init__(self, locations=None, error_handler=None):
self.locations = locations or self.DEFAULT_LOCATIONS
self.error_callback = _callable_or_raise(error_handler)
#: A short-lived cache to store results from processing request bodies.
self._cache = {}
def _validated_locations(self, locations):
"""Ensure that the given locations argument is valid.
:raises: ValueError if a given locations includes an invalid location.
"""
# The set difference between the given locations and the available locations
# will be the set of invalid locations
valid_locations = set(self.__location_map__.keys())
given = set(locations)
invalid_locations = given - valid_locations
if len(invalid_locations):
msg = "Invalid locations arguments: {0}".format(list(invalid_locations))
raise ValueError(msg)
return locations
def _get_value(self, name, argobj, req, location):
# Parsing function to call
# May be a method name (str) or a function
func = self.__location_map__.get(location)
if func:
if inspect.isfunction(func):
function = func
else:
function = getattr(self, func)
value = function(req, name, argobj)
else:
raise ValueError('Invalid location: "{0}"'.format(location))
return value
def parse_arg(self, name, field, req, locations=None):
"""Parse a single argument from a request.
.. note::
This method does not perform validation on the argument.
:param str name: The name of the value.
:param marshmallow.fields.Field field: The marshmallow `Field` for the request
parameter.
:param req: The request object to parse.
:param tuple locations: The locations ('json', 'querystring', etc.) where
to search for the value.
:return: The unvalidated argument value or `missing` if the value cannot be found
on the request.
"""
location = field.metadata.get('location')
if location:
locations_to_check = self._validated_locations([location])
else:
locations_to_check = self._validated_locations(locations or self.locations)
for location in locations_to_check:
value = self._get_value(name, field, req=req, location=location)
# Found the value; validate and return it
if value is not missing:
return value
return missing
def _parse_request(self, schema, req, locations):
"""Return a parsed arguments dictionary for the current request."""
argdict = schema.fields
parsed = {}
for argname, field_obj in iteritems(argdict):
argname = field_obj.load_from or argname
parsed_value = self.parse_arg(argname, field_obj, req,
locations=locations or self.locations)
parsed[argname] = parsed_value
return parsed
def load(self, data, argmap):
if isinstance(argmap, ma.Schema):
schema = argmap
else:
schema = argmap2schema(argmap)()
if not schema.strict:
warnings.warn("It is highly recommended that you set strict=True on your schema "
"so that the parser's error handler will be invoked when expected.", UserWarning)
return schema.load(data)
def _on_validation_error(self, error):
if (isinstance(error, ma.exceptions.ValidationError) and not
isinstance(error, ValidationError)):
# Raise a webargs error instead
error = ValidationError(
error.messages,
status_code=getattr(error, 'status_code', self.DEFAULT_VALIDATION_STATUS),
headers=getattr(error, 'headers', {}),
field_names=error.field_names,
fields=error.fields,
data=error.data
)
if self.error_callback:
self.error_callback(error)
else:
self.handle_error(error)
def _validate_arguments(self, data, validators):
for validator in validators:
if validator(data) is False:
msg = self.DEFAULT_VALIDATION_MESSAGE
raise ValidationError(msg, data=data)
def _get_schema(self, argmap, req):
"""Return a `marshmallow.Schema` for the given argmap and request.
:param argmap: Either a `marshmallow.Schema`, `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable that returns
a `marshmallow.Schema` instance.
:param req: The request object being parsed.
:rtype: marshmallow.Schema
"""
if isinstance(argmap, ma.Schema):
schema = argmap
elif callable(argmap):
schema = argmap(req)
else:
schema = argmap2schema(argmap)()
return schema
def parse(self, argmap, req=None, locations=None, validate=None, force_all=False):
"""Main request parsing method.
:param argmap: Either a `marshmallow.Schema`, a `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable
which accepts a request and returns a `marshmallow.Schema`.
:param req: The request object to parse.
:param tuple locations: Where on the request to search for values.
Can include one or more of ``('json', 'querystring', 'form',
'headers', 'cookies', 'files')``.
:param callable validate: Validation function or list of validation functions
that receives the dictionary of parsed arguments. Validator either returns a
boolean or raises a :exc:`ValidationError`.
:return: A dictionary of parsed arguments
"""
req = req if req is not None else self.get_default_request()
assert req is not None, 'Must pass req object'
ret = None
validators = _ensure_list_of_callables(validate)
schema = self._get_schema(argmap, req)
try:
parsed = self._parse_request(schema=schema, req=req, locations=locations)
result = self.load(parsed, schema)
self._validate_arguments(result.data, validators)
except ma.exceptions.ValidationError as error:
self._on_validation_error(error)
else:
ret = result.data
finally:
self.clear_cache()
if force_all:
fill_in_missing_args(ret, argmap)
return ret
def clear_cache(self):
"""Invalidate the parser's cache."""
self._cache = {}
return None
def get_default_request(self):
"""Optional override. Provides a hook for frameworks that use thread-local
request objects.
"""
return None
def get_request_from_view_args(self, view, args, kwargs):
"""Optional override. Returns the request object to be parsed, given a view
function's args and kwargs.
Used by the `use_args` and `use_kwargs` to get a request object from a
view's arguments.
:param callable view: The view function or method being decorated by
`use_args` or `use_kwargs`
:param tuple args: Positional arguments passed to ``view``.
:param dict kwargs: Keyword arguments passed to ``view``.
"""
return None
def use_args(self, argmap, req=None, locations=None, as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view function or method.
Example usage with Flask: ::
@app.route('/echo', methods=['get', 'post'])
@parser.use_args({'name': fields.Str()})
def greet(args):
return 'Hello ' + args['name']
:param argmap: Either a `marshmallow.Schema`, a `dict`
of argname -> `marshmallow.fields.Field` pairs, or a callable
which accepts a request and returns a `marshmallow.Schema`.
:param tuple locations: Where on the request to search for values.
:param bool as_kwargs: Whether to insert arguments as keyword arguments.
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
"""
locations = locations or self.locations
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
if isinstance(argmap, collections.Mapping):
argmap = argmap2schema(argmap)()
def decorator(func):
req_ = request_obj
@functools.wraps(func)
def wrapper(*args, **kwargs):
req_obj = req_
# if as_kwargs is passed, must include all args
force_all = as_kwargs
if not req_obj:
req_obj = self.get_request_from_view_args(func, args, kwargs)
# NOTE: At this point, argmap may be a Schema, callable, or dict
parsed_args = self.parse(argmap, req=req_obj,
locations=locations, validate=validate,
force_all=force_all)
if as_kwargs:
kwargs.update(parsed_args)
return func(*args, **kwargs)
else:
# Add parsed_args after other positional arguments
new_args = args + (parsed_args, )
return func(*new_args, **kwargs)
return wrapper
return decorator
def use_kwargs(self, *args, **kwargs):
"""Decorator that injects parsed arguments into a view function or method
as keyword arguments.
This is a shortcut to :meth:`use_args` with ``as_kwargs=True``.
Example usage with Flask: ::
@app.route('/echo', methods=['get', 'post'])
@parser.use_kwargs({'name': fields.Str()})
def greet(name):
return 'Hello ' + name
Receives the same ``args`` and ``kwargs`` as :meth:`use_args`.
"""
kwargs['as_kwargs'] = True
return self.use_args(*args, **kwargs)
def location_handler(self, name):
"""Decorator that registers a function for parsing a request location.
The wrapped function receives a request, the name of the argument, and
the corresponding `Field <marshmallow.fields.Field>` object.
Example: ::
from webargs import core
parser = core.Parser()
@parser.location_handler('name')
def parse_data(request, name, field):
return request.data.get(name)
:param str name: The name of the location to register.
"""
def decorator(func):
self.__location_map__[name] = func
return func
return decorator
def error_handler(self, func):
"""Decorator that registers a custom error handling function. The
function should received the raised error. Overrides
the parser's ``handle_error`` method.
Example: ::
from webargs import core
parser = core.Parser()
class CustomError(Exception):
pass
@parser.error_handler
def handle_error(error):
raise CustomError(error)
:param callable func: The error callback to register.
"""
self.error_callback = func
return func
# Abstract Methods
def parse_json(self, req, name, arg):
"""Pull a JSON value from a request object or return `missing` if the
value cannot be found.
"""
return missing
def parse_querystring(self, req, name, arg):
"""Pull a value from the query string of a request object or return `missing` if
the value cannot be found.
"""
return missing
def parse_form(self, req, name, arg):
"""Pull a value from the form data of a request object or return
`missing` if the value cannot be found.
"""
return missing
def parse_headers(self, req, name, arg):
"""Pull a value from the headers or return `missing` if the value
cannot be found.
"""
return missing
def parse_cookies(self, req, name, arg):
"""Pull a cookie value from the request or return `missing` if the value
cannot be found.
"""
return missing
def parse_files(self, req, name, arg):
"""Pull a file from the request or return `missing` if the value file
cannot be found.
"""
return missing
def handle_error(self, error):
"""Called if an error occurs while parsing args. By default, just logs and
raises ``error``.
"""
logger.error(error)
raise error
| mit |
crosswalk-project/chromium-crosswalk-efl | tools/telemetry/telemetry/page/page_test.py | 25 | 12426 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import command_line
from telemetry.page import test_expectations
from telemetry.page.actions import action_runner as action_runner_module
class Failure(Exception):
"""Exception that can be thrown from PageTest to indicate an
undesired but designed-for problem."""
class TestNotSupportedOnPlatformFailure(Failure):
"""Exception that can be thrown to indicate that a certain feature required
to run the test is not available on the platform, hardware configuration, or
browser version."""
class MeasurementFailure(Failure):
"""Exception that can be thrown from MeasurePage to indicate an undesired but
designed-for problem."""
class PageTest(command_line.Command):
"""A class styled on unittest.TestCase for creating page-specific tests.
Test should override ValidateAndMeasurePage to perform test
validation and page measurement as necessary.
class BodyChildElementMeasurement(PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.AddValue(scalar.ScalarValue(
page, 'body_children', 'count', body_child_count))
The class also provide hooks to add test-specific options. Here is
an example:
class BodyChildElementMeasurement(PageTest):
def AddCommandLineArgs(parser):
parser.add_option('--element', action='store', default='body')
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.querySelector('%s').children.length')
results.AddValue(scalar.ScalarValue(
page, 'children', 'count', child_count))
Args:
action_name_to_run: This is the method name in telemetry.page.Page
subclasses to run.
discard_first_run: Discard the first run of this page. This is
usually used with page_repeat and pageset_repeat options.
attempts: The number of attempts to run if we encountered
infrastructure problems (as opposed to test issues), such as
losing a browser.
max_failures: The number of page failures allowed before we stop
running other pages.
is_action_name_to_run_optional: Determines what to do if
action_name_to_run is not empty but the page doesn't have that
action. The page will run (without any action) if
is_action_name_to_run_optional is True, otherwise the page
will fail.
"""
options = {}
def __init__(self,
action_name_to_run='',
needs_browser_restart_after_each_page=False,
discard_first_result=False,
clear_cache_before_each_run=False,
attempts=3,
max_failures=None,
is_action_name_to_run_optional=False):
super(PageTest, self).__init__()
self.options = None
if action_name_to_run:
assert action_name_to_run.startswith('Run') \
and '_' not in action_name_to_run, \
('Wrong way of naming action_name_to_run. By new convention,'
'action_name_to_run must start with Run- prefix and in CamelCase.')
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_page = (
needs_browser_restart_after_each_page)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
self._attempts = attempts
self._max_failures = max_failures
self._is_action_name_to_run_optional = is_action_name_to_run_optional
assert self._attempts > 0, 'Test attempts must be greater than 0'
# If the test overrides the TabForPage method, it is considered a multi-tab
# test. The main difference between this and a single-tab test is that we
# do not attempt recovery for the former if a tab or the browser crashes,
# because we don't know the current state of tabs (how many are open, etc.)
self.is_multi_tab_test = (self.__class__ is not PageTest and
self.TabForPage.__func__ is not
self.__class__.__bases__[0].TabForPage.__func__)
# _exit_requested is set to true when the test requests an early exit.
self._exit_requested = False
@classmethod
def SetArgumentDefaults(cls, parser):
parser.set_defaults(**cls.options)
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
@property
def attempts(self):
"""Maximum number of times test will be attempted."""
return self._attempts
@attempts.setter
def attempts(self, count):
assert self._attempts > 0, 'Test attempts must be greater than 0'
self._attempts = count
@property
def max_failures(self):
"""Maximum number of failures allowed for the page set."""
return self._max_failures
@max_failures.setter
def max_failures(self, count):
self._max_failures = count
def Run(self, args):
# Define this method to avoid pylint errors.
# TODO(dtu): Make this actually run the test with args.page_set.
pass
def RestartBrowserBeforeEachPage(self):
""" Should the browser be restarted for the page?
This returns true if the test needs to unconditionally restart the
browser for each page. It may be called before the browser is started.
"""
return self._needs_browser_restart_after_each_page
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
"""Should the browser be stopped after the page is run?
This is called after a page is run to decide whether the browser needs to
be stopped to clean up its state. If it is stopped, then it will be
restarted to run the next page.
A test that overrides this can look at both the page and the browser to
decide whether it needs to stop the browser.
"""
return False
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
def CustomizeBrowserOptionsForSinglePage(self, page, options):
"""Set options specific to the test and the given page.
This will be called with the current page when the browser is (re)started.
Changing options at this point only makes sense if the browser is being
restarted for each page. Note that if page has a startup_url, the browser
will always be restarted for each run.
"""
if page.startup_url:
options.browser_options.startup_url = page.startup_url
def WillStartBrowser(self, platform):
"""Override to manipulate the browser environment before it launches."""
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
def CanRunForPage(self, page): # pylint: disable=W0613
"""Override to customize if the test can be ran for the given page."""
if self._action_name_to_run and not self._is_action_name_to_run_optional:
return hasattr(page, self._action_name_to_run)
return True
def WillRunTest(self, options):
"""Override to do operations before the page set(s) are navigated."""
self.options = options
def DidRunTest(self, browser, results): # pylint: disable=W0613
"""Override to do operations after all page set(s) are completed.
This will occur before the browser is torn down.
"""
self.options = None
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
def WillRunActions(self, page, tab):
"""Override to do operations before running the actions on the page."""
def DidRunActions(self, page, tab):
"""Override to do operations after running the actions on the page."""
def CleanUpAfterPage(self, page, tab):
"""Called after the test run method was run, even if it failed."""
def CreateExpectations(self, page_set): # pylint: disable=W0613
"""Override to make this test generate its own expectations instead of
any that may have been defined in the page set."""
return test_expectations.TestExpectations()
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
return browser.tabs[0]
def ValidatePageSet(self, page_set):
"""Override to examine the page set before the test run. Useful for
example to validate that the pageset can be used with the test."""
def ValidateAndMeasurePage(self, page, tab, results):
"""Override to check test assertions and perform measurement.
When adding measurement results, call results.AddValue(...) for
each result. Raise an exception or add a failure.FailureValue on
failure. page_test.py also provides several base exception classes
to use.
Prefer metric value names that are in accordance with python
variable style. e.g., metric_name. The name 'url' must not be used.
Put together:
def ValidateAndMeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.AddValue(scalar.ScalarValue(
page, 'two_plus_two', 'count', res))
Args:
page: A telemetry.page.Page instance.
tab: A telemetry.core.Tab instance.
results: A telemetry.results.PageTestResults instance.
"""
# TODO(chrishenry): Switch to raise NotImplementedError() when
# subclasses no longer override ValidatePage/MeasurePage.
self.ValidatePage(page, tab, results)
def ValidatePage(self, page, tab, results):
"""DEPRECATED: Use ValidateAndMeasurePage instead."""
self.MeasurePage(page, tab, results)
def MeasurePage(self, page, tab, results):
"""DEPRECATED: Use ValidateAndMeasurePage instead."""
def RunPage(self, page, tab, results):
# Run actions.
interactive = self.options and self.options.interactive
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
self.WillRunActions(page, tab)
if interactive:
action_runner.PauseInteractive()
else:
self._RunMethod(page, self._action_name_to_run, action_runner)
self.DidRunActions(page, tab)
self.ValidateAndMeasurePage(page, tab, results)
def _RunMethod(self, page, method_name, action_runner):
if hasattr(page, method_name):
run_method = getattr(page, method_name)
run_method(action_runner)
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunNavigateSteps(action_runner)
def IsExiting(self):
return self._exit_requested
def RequestExit(self):
self._exit_requested = True
@property
def action_name_to_run(self):
return self._action_name_to_run
| bsd-3-clause |
oVirt/ovirt-register | src/ovirt_register/system.py | 2 | 3282 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import glob
import os
import selinux
import subprocess
import logging
__LOGGER = logging.getLogger(__name__)
def execute_cmd(sys_cmd, env_shell=False):
"""
Execute a command on the host
sys_cmd -- Command to be executed
shell -- True or False - executed through the shell environment
(True is not recommended for security hazard)
Return:
output, error, returncode
"""
try:
cmd = subprocess.Popen(sys_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=env_shell)
output, err = cmd.communicate()
if cmd.returncode != 0:
raise OSError
except OSError as e:
__LOGGER.error("Cannot execute shell command", exc_info=True)
raise e
return output, err, cmd.returncode
def silent_restorecon(path):
"""Execute selinux restorecon cmd to determined file
Args
path -- full path to file
"""
try:
if selinux.is_selinux_enabled():
selinux.restorecon(path)
except:
__LOGGER.error("restorecon {p} failed".format(p=path), "error")
class NodeImage(object):
"""
REQUIRED: oVirt Node until 3.6
To save the change across reboot, oVirt Node requires
to call the persist API.
To remove a file, it's required to do unpersist first
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def persist(self, fname=None):
"""
Execute the persist command in ovirt-node
"""
try:
if self.check() and fname is not None:
from ovirt.node.utils.fs import Config
Config().persist(fname)
except Exception as e:
self.logger.exception("Exception: {exp}".format(exp=e))
raise RuntimeError("Cannot persist {f}:\n {exc}".format(
f=fname,
exc=e))
def check(self):
"""
Check if the OS running is a node image
Returns:
True or False
"""
return (os.path.exists('/etc/rhev-hypervisor-release') or
bool(glob.glob('/etc/ovirt-node-*-release')))
def unpersist(self, fname):
"""
Execute the unpersist command in ovirt-node
"""
try:
if self.check() and fname is not None:
from ovirt.node.utils.fs import Config
Config().unpersist(fname)
except Exception as e:
self.logger.exception("Exception: {exp}".format(exp=e))
raise RuntimeError("Cannot unpersist {f}:\n {exc}".format(
f=fname,
exc=e))
| gpl-2.0 |
solashirai/edx-platform | cms/djangoapps/contentstore/tests/test_users_default_role.py | 115 | 5183 | """
Unit tests for checking default forum role "Student" of a user when he creates a course or
after deleting it creates same course again
"""
from contentstore.tests.utils import AjaxEnabledTestClient
from contentstore.utils import delete_course_and_groups, reverse_url
from courseware.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.models import CourseEnrollment
class TestUsersDefaultRole(ModuleStoreTestCase):
"""
Unit tests for checking enrollment and default forum role "Student" of a logged in user
"""
def setUp(self):
"""
Add a user and a course
"""
super(TestUsersDefaultRole, self).setUp()
# create and log in a staff user.
self.user = UserFactory(is_staff=True)
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password='test')
# create a course via the view handler to create course
self.course_key = self.store.make_course_key('Org_1', 'Course_1', 'Run_1')
self._create_course_with_given_location(self.course_key)
def _create_course_with_given_location(self, course_key):
"""
Create course at provided location
"""
resp = self.client.ajax_post(
reverse_url('course_handler'),
{
'org': course_key.org,
'number': course_key.course,
'display_name': 'test course',
'run': course_key.run,
}
)
return resp
def tearDown(self):
"""
Reverse the setup
"""
self.client.logout()
super(TestUsersDefaultRole, self).tearDown()
def test_user_forum_default_role_on_course_deletion(self):
"""
Test that a user enrolls and gets "Student" forum role for that course which he creates and remains
enrolled even the course is deleted and keeps its "Student" forum role for that course
"""
# check that user has enrollment for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has his default "Student" forum role for this course
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
delete_course_and_groups(self.course_key, self.user.id)
# check that user's enrollment for this course is not deleted
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has forum role for this course even after deleting it
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
def test_user_role_on_course_recreate(self):
"""
Test that creating same course again after deleting it gives user his default
forum role "Student" for that course
"""
# check that user has enrollment and his default "Student" forum role for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
# delete this course and recreate this course with same user
delete_course_and_groups(self.course_key, self.user.id)
resp = self._create_course_with_given_location(self.course_key)
self.assertEqual(resp.status_code, 200)
# check that user has his enrollment for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# check that user has his default "Student" forum role for this course
self.assertTrue(self.user.roles.filter(name="Student", course_id=self.course_key))
def test_user_role_on_course_recreate_with_change_name_case(self):
"""
Test that creating same course again with different name case after deleting it gives user
his default forum role "Student" for that course
"""
# check that user has enrollment and his default "Student" forum role for this course
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course_key))
# delete this course and recreate this course with same user
delete_course_and_groups(self.course_key, self.user.id)
# now create same course with different name case ('uppercase')
new_course_key = self.course_key.replace(course=self.course_key.course.upper())
resp = self._create_course_with_given_location(new_course_key)
self.assertEqual(resp.status_code, 200)
# check that user has his default "Student" forum role again for this course (with changed name case)
self.assertTrue(
self.user.roles.filter(name="Student", course_id=new_course_key)
)
# Disabled due to case-sensitive test db (sqlite3)
# # check that there user has only one "Student" forum role (with new updated course_id)
# self.assertEqual(self.user.roles.filter(name='Student').count(), 1)
# self.assertEqual(self.user.roles.filter(name='Student')[0].course_id, new_course_location.course_key)
| agpl-3.0 |
iivic/BoiseStateX | lms/djangoapps/lms_migration/migrate.py | 80 | 8921 | #
# migration tools for content team to go from stable-edx4edx to LMS+CMS
#
import json
import logging
import os
import xmodule.modulestore.django as xmodule_django
from xmodule.modulestore.django import modulestore
from django.http import HttpResponse
from django.conf import settings
import track.views
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
from django.contrib.csrf.middleware import csrf_exempt
log = logging.getLogger("edx.lms_migrate")
LOCAL_DEBUG = True
ALLOWED_IPS = settings.LMS_MIGRATION_ALLOWED_IPS
def escape(s):
"""escape HTML special characters in string"""
return str(s).replace('<', '<').replace('>', '>')
def getip(request):
'''
Extract IP address of requester from header, even if behind proxy
'''
ip = request.META.get('HTTP_X_REAL_IP', '') # nginx reverse proxy
if not ip:
ip = request.META.get('REMOTE_ADDR', 'None')
return ip
def get_commit_id(course):
#return course.metadata.get('GIT_COMMIT_ID', 'No commit id')
return getattr(course, 'GIT_COMMIT_ID', 'No commit id')
# getattr(def_ms.courses[reload_dir], 'GIT_COMMIT_ID','No commit id')
def set_commit_id(course, commit_id):
#course.metadata['GIT_COMMIT_ID'] = commit_id
course.GIT_COMMIT_ID = commit_id
# def_ms.courses[reload_dir].GIT_COMMIT_ID = new_commit_id
def manage_modulestores(request, reload_dir=None, commit_id=None):
'''
Manage the static in-memory modulestores.
If reload_dir is not None, then instruct the xml loader to reload that course directory.
'''
html = "<html><body>"
def_ms = modulestore()
courses = def_ms.get_courses()
#----------------------------------------
# check on IP address of requester
ip = getip(request)
if LOCAL_DEBUG:
html += '<h3>IP address: %s <h3>' % ip
html += '<h3>User: %s </h3>' % request.user
html += '<h3>My pid: %s</h3>' % os.getpid()
log.debug(u'request from ip=%s, user=%s', ip, request.user)
if not (ip in ALLOWED_IPS or 'any' in ALLOWED_IPS):
if request.user and request.user.is_staff:
log.debug(u'request allowed because user=%s is staff', request.user)
else:
html += 'Permission denied'
html += "</body></html>"
log.debug('request denied, ALLOWED_IPS=%s', ALLOWED_IPS)
return HttpResponse(html, status=403)
#----------------------------------------
# reload course if specified; handle optional commit_id
if reload_dir is not None:
if reload_dir not in def_ms.courses:
html += '<h2 class="inline-error">Error: "%s" is not a valid course directory</h2>' % reload_dir
else:
# reloading based on commit_id is needed when running mutiple worker threads,
# so that a given thread doesn't reload the same commit multiple times
current_commit_id = get_commit_id(def_ms.courses[reload_dir])
log.debug('commit_id="%s"', commit_id)
log.debug('current_commit_id="%s"', current_commit_id)
if (commit_id is not None) and (commit_id == current_commit_id):
html += "<h2>Already at commit id %s for %s</h2>" % (commit_id, reload_dir)
track.views.server_track(request,
'reload %s skipped already at %s (pid=%s)' % (reload_dir,
commit_id,
os.getpid(),
),
{}, page='migrate')
else:
html += '<h2>Reloaded course directory "%s"</h2>' % reload_dir
def_ms.try_load_course(reload_dir)
gdir = settings.DATA_DIR / reload_dir
new_commit_id = os.popen('cd %s; git log -n 1 | head -1' % gdir).read().strip().split(' ')[1]
set_commit_id(def_ms.courses[reload_dir], new_commit_id)
html += '<p>commit_id=%s</p>' % new_commit_id
track.views.server_track(request, 'reloaded %s now at %s (pid=%s)' % (reload_dir,
new_commit_id,
os.getpid()), {}, page='migrate')
#----------------------------------------
html += '<h2>Courses loaded in the modulestore</h2>'
html += '<ol>'
for cdir, course in def_ms.courses.items():
html += '<li><a href="%s/migrate/reload/%s">%s</a> (%s)</li>' % (
settings.EDX_ROOT_URL,
escape(cdir),
escape(cdir),
course.location.to_deprecated_string()
)
html += '</ol>'
#----------------------------------------
#dumpfields = ['definition', 'location', 'metadata']
dumpfields = ['location', 'metadata']
for cdir, course in def_ms.courses.items():
html += '<hr width="100%"/>'
html += '<h2>Course: %s (%s)</h2>' % (course.display_name_with_default, cdir)
html += '<p>commit_id=%s</p>' % get_commit_id(course)
for field in dumpfields:
data = getattr(course, field, None)
html += '<h3>%s</h3>' % field
if isinstance(data, dict):
html += '<ul>'
for k, v in data.items():
html += '<li>%s:%s</li>' % (escape(k), escape(v))
html += '</ul>'
else:
html += '<ul><li>%s</li></ul>' % escape(data)
#----------------------------------------
html += '<hr width="100%"/>'
html += "courses: <pre>%s</pre>" % escape(courses)
ms = xmodule_django._MODULESTORES
html += "modules: <pre>%s</pre>" % escape(ms)
html += "default modulestore: <pre>%s</pre>" % escape(unicode(def_ms))
#----------------------------------------
log.debug('_MODULESTORES=%s', ms)
log.debug('courses=%s', courses)
log.debug('def_ms=%s', unicode(def_ms))
html += "</body></html>"
return HttpResponse(html)
@csrf_exempt
def gitreload(request, reload_dir=None):
'''
This can be used as a github WebHook Service Hook, for reloading of the content repo used by the LMS.
If reload_dir is not None, then instruct the xml loader to reload that course directory.
'''
html = "<html><body>"
ip = getip(request)
html += '<h3>IP address: %s ' % ip
html += '<h3>User: %s ' % request.user
ALLOWED_IPS = [] # allow none by default
if hasattr(settings, 'ALLOWED_GITRELOAD_IPS'): # allow override in settings
ALLOWED_IPS = settings.ALLOWED_GITRELOAD_IPS
if not (ip in ALLOWED_IPS or 'any' in ALLOWED_IPS):
if request.user and request.user.is_staff:
log.debug(u'request allowed because user=%s is staff', request.user)
else:
html += 'Permission denied'
html += "</body></html>"
log.debug('request denied from %s, ALLOWED_IPS=%s', ip, ALLOWED_IPS)
return HttpResponse(html)
#----------------------------------------
# see if request is from github (POST with JSON)
if reload_dir is None and 'payload' in request.POST:
payload = request.POST['payload']
log.debug("payload=%s", payload)
gitargs = json.loads(payload)
log.debug("gitargs=%s", gitargs)
reload_dir = gitargs['repository']['name']
log.debug("github reload_dir=%s", reload_dir)
gdir = settings.DATA_DIR / reload_dir
if not os.path.exists(gdir):
log.debug("====> ERROR in gitreload - no such directory %s", reload_dir)
return HttpResponse('Error')
cmd = "cd %s; git reset --hard HEAD; git clean -f -d; git pull origin; chmod g+w course.xml" % gdir
log.debug(os.popen(cmd).read())
if hasattr(settings, 'GITRELOAD_HOOK'): # hit this hook after reload, if set
gh = settings.GITRELOAD_HOOK
if gh:
ghurl = '%s/%s' % (gh, reload_dir)
r = requests.get(ghurl)
log.debug("GITRELOAD_HOOK to %s: %s", ghurl, r.text)
#----------------------------------------
# reload course if specified
if reload_dir is not None:
def_ms = modulestore()
if reload_dir not in def_ms.courses:
html += '<h2 class="inline-error">Error: "%s" is not a valid course directory</font></h2>' % reload_dir
else:
html += "<h2>Reloaded course directory '%s'</h2>" % reload_dir
def_ms.try_load_course(reload_dir)
track.views.server_track(request, 'reloaded %s' % reload_dir, {}, page='migrate')
return HttpResponse(html)
| agpl-3.0 |
akaariai/django | django/contrib/staticfiles/management/commands/runserver.py | 248 | 1361 | from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.commands.runserver import \
Command as RunserverCommand
class Command(RunserverCommand):
help = "Starts a lightweight Web server for development and also serves static files."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.')
parser.add_argument('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.')
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
| bsd-3-clause |
mjs/juju | acceptancetests/repository/charms/mysql/hooks/common.py | 2 | 3867 | # vim: syntax=python
import os
import sys
import MySQLdb
import subprocess
import uuid
def get_service_user_file(service):
return '/var/lib/mysql/%s.service_user2' % service
def status_set(mode, message):
subprocess.check_call(["status-set", mode, message])
def get_service_user(service):
if service == '':
return (None, None)
sfile = get_service_user_file(service)
if os.path.exists(sfile):
with open(sfile, 'r') as f:
return (f.readline().strip(), f.readline().strip())
(suser, service_password) = subprocess.check_output(['pwgen', '-N 2', '15']).strip().split("\n")
with open(sfile, 'w') as f:
f.write("%s\n" % suser)
f.write("%s\n" % service_password)
f.flush()
return (suser, service_password)
def cleanup_service_user(service):
os.unlink(get_service_user_file(service))
relation_id = os.environ.get('JUJU_RELATION_ID')
change_unit = os.environ.get('JUJU_REMOTE_UNIT')
# We'll name the database the same as the service.
database_name_file = '.%s_database_name' % (relation_id)
# change_unit will be None on broken hooks
database_name = ''
if change_unit:
database_name, _ = change_unit.split("/")
with open(database_name_file, 'w') as dbnf:
dbnf.write("%s\n" % database_name)
dbnf.flush()
elif os.path.exists(database_name_file):
with open(database_name_file, 'r') as dbname:
database_name = dbname.readline().strip()
else:
print 'No established database and no REMOTE_UNIT.'
# A user per service unit so we can deny access quickly
user, service_password = get_service_user(database_name)
connection = None
lastrun_path = '/var/lib/juju/%s.%s.lastrun' % (database_name,user)
slave_configured_path = '/var/lib/juju.slave.configured.for.%s' % database_name
slave_configured = os.path.exists(slave_configured_path)
slave = os.path.exists('/var/lib/juju/i.am.a.slave')
broken_path = '/var/lib/juju/%s.mysql.broken' % database_name
broken = os.path.exists(broken_path)
def get_db_cursor():
# Connect to mysql
passwd = open("/var/lib/mysql/mysql.passwd").read().strip()
connection = MySQLdb.connect(user="root", host="localhost", passwd=passwd)
return connection.cursor()
def database_exists(db_name):
cursor = get_db_cursor()
try:
cursor.execute("SHOW DATABASES")
databases = [i[0] for i in cursor.fetchall()]
finally:
cursor.close()
return db_name in databases
def create_database(db_name):
cursor = get_db_cursor()
try:
cursor.execute("CREATE DATABASE {}".format(db_name))
finally:
cursor.close()
def grant_exists(db_name, db_user, remote_ip):
cursor = get_db_cursor()
try:
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
remote_ip))
grants = [i[0] for i in cursor.fetchall()]
except MySQLdb.OperationalError:
print "No grants found"
return False
finally:
cursor.close()
return "GRANT ALL PRIVILEGES ON `{}`".format(db_name) in grants
def create_grant(db_name, db_user,
remote_ip, password):
cursor = get_db_cursor()
try:
cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "\
"IDENTIFIED BY '{}'".format(db_name,
db_user,
remote_ip,
password))
finally:
cursor.close()
def cleanup_grant(db_user,
remote_ip):
cursor = get_db_cursor()
try:
cursor.execute("DROP FROM mysql.user WHERE user='{}' "\
"AND HOST='{}'".format(db_user,
remote_ip))
finally:
cursor.close()
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.