repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
AutorestCI/azure-sdk-for-python | azure-mgmt-media/azure/mgmt/media/models/regenerate_key_input.py | 5 | 1061 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegenerateKeyInput(Model):
"""The request body for a RegenerateKey API.
:param key_type: The keyType indicating which key you want to regenerate,
Primary or Secondary. Possible values include: 'Primary', 'Secondary'
:type key_type: str or :class:`KeyType <azure.mgmt.media.models.KeyType>`
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'KeyType'},
}
def __init__(self, key_type):
self.key_type = key_type
| mit | -6,920,071,918,550,743,000 | 32.15625 | 77 | 0.580584 | false |
infilect/ml-course1 | deep-learning-keras/keras-scripts/imdb_lstm.py | 4 | 1879 | '''Trains an LSTM model on the IMDB sentiment classification task.
The dataset is actually too small for LSTM to be of any advantage
compared to simpler, much faster methods such as TF-IDF + LogReg.
# Notes
- RNNs are tricky. Choice of batch size is important,
choice of loss and optimizer is critical, etc.
Some configurations won't converge.
- LSTM loss decrease patterns during training can be quite different
from what you see with CNNs/MLPs/etc.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=15,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
| mit | 649,705,111,399,673,500 | 31.964912 | 94 | 0.716871 | false |
prashantbh/kafka | tests/kafkatest/services/kafka/config_property.py | 13 | 9454 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define Kafka configuration property names here.
"""
BROKER_ID = "broker.id"
PORT = "port"
ADVERTISED_HOSTNAME = "advertised.host.name"
NUM_NETWORK_THREADS = "num.network.threads"
NUM_IO_THREADS = "num.io.threads"
SOCKET_SEND_BUFFER_BYTES = "socket.send.buffer.bytes"
SOCKET_RECEIVE_BUFFER_BYTES = "socket.receive.buffer.bytes"
SOCKET_REQUEST_MAX_BYTES = "socket.request.max.bytes"
LOG_DIRS = "log.dirs"
NUM_PARTITIONS = "num.partitions"
NUM_RECOVERY_THREADS_PER_DATA_DIR = "num.recovery.threads.per.data.dir"
LOG_RETENTION_HOURS = "log.retention.hours"
LOG_SEGMENT_BYTES = "log.segment.bytes"
LOG_RETENTION_CHECK_INTERVAL_MS = "log.retention.check.interval.ms"
LOG_CLEANER_ENABLE = "log.cleaner.enable"
AUTO_CREATE_TOPICS_ENABLE = "auto.create.topics.enable"
ZOOKEEPER_CONNECT = "zookeeper.connect"
ZOOKEEPER_CONNECTION_TIMEOUT_MS = "zookeeper.connection.timeout.ms"
INTER_BROKER_PROTOCOL_VERSION = "inter.broker.protocol.version"
"""
From KafkaConfig.scala
/** ********* General Configuration ***********/
val MaxReservedBrokerIdProp = "reserved.broker.max.id"
val MessageMaxBytesProp = "message.max.bytes"
val NumIoThreadsProp = "num.io.threads"
val BackgroundThreadsProp = "background.threads"
val QueuedMaxRequestsProp = "queued.max.requests"
/** ********* Socket Server Configuration ***********/
val PortProp = "port"
val HostNameProp = "host.name"
val ListenersProp = "listeners"
val AdvertisedPortProp = "advertised.port"
val AdvertisedListenersProp = "advertised.listeners"
val SocketSendBufferBytesProp = "socket.send.buffer.bytes"
val SocketReceiveBufferBytesProp = "socket.receive.buffer.bytes"
val SocketRequestMaxBytesProp = "socket.request.max.bytes"
val MaxConnectionsPerIpProp = "max.connections.per.ip"
val MaxConnectionsPerIpOverridesProp = "max.connections.per.ip.overrides"
val ConnectionsMaxIdleMsProp = "connections.max.idle.ms"
/** ********* Log Configuration ***********/
val NumPartitionsProp = "num.partitions"
val LogDirsProp = "log.dirs"
val LogDirProp = "log.dir"
val LogSegmentBytesProp = "log.segment.bytes"
val LogRollTimeMillisProp = "log.roll.ms"
val LogRollTimeHoursProp = "log.roll.hours"
val LogRollTimeJitterMillisProp = "log.roll.jitter.ms"
val LogRollTimeJitterHoursProp = "log.roll.jitter.hours"
val LogRetentionTimeMillisProp = "log.retention.ms"
val LogRetentionTimeMinutesProp = "log.retention.minutes"
val LogRetentionTimeHoursProp = "log.retention.hours"
val LogRetentionBytesProp = "log.retention.bytes"
val LogCleanupIntervalMsProp = "log.retention.check.interval.ms"
val LogCleanupPolicyProp = "log.cleanup.policy"
val LogCleanerThreadsProp = "log.cleaner.threads"
val LogCleanerIoMaxBytesPerSecondProp = "log.cleaner.io.max.bytes.per.second"
val LogCleanerDedupeBufferSizeProp = "log.cleaner.dedupe.buffer.size"
val LogCleanerIoBufferSizeProp = "log.cleaner.io.buffer.size"
val LogCleanerDedupeBufferLoadFactorProp = "log.cleaner.io.buffer.load.factor"
val LogCleanerBackoffMsProp = "log.cleaner.backoff.ms"
val LogCleanerMinCleanRatioProp = "log.cleaner.min.cleanable.ratio"
val LogCleanerEnableProp = "log.cleaner.enable"
val LogCleanerDeleteRetentionMsProp = "log.cleaner.delete.retention.ms"
val LogIndexSizeMaxBytesProp = "log.index.size.max.bytes"
val LogIndexIntervalBytesProp = "log.index.interval.bytes"
val LogFlushIntervalMessagesProp = "log.flush.interval.messages"
val LogDeleteDelayMsProp = "log.segment.delete.delay.ms"
val LogFlushSchedulerIntervalMsProp = "log.flush.scheduler.interval.ms"
val LogFlushIntervalMsProp = "log.flush.interval.ms"
val LogFlushOffsetCheckpointIntervalMsProp = "log.flush.offset.checkpoint.interval.ms"
val LogPreAllocateProp = "log.preallocate"
val NumRecoveryThreadsPerDataDirProp = "num.recovery.threads.per.data.dir"
val MinInSyncReplicasProp = "min.insync.replicas"
/** ********* Replication configuration ***********/
val ControllerSocketTimeoutMsProp = "controller.socket.timeout.ms"
val DefaultReplicationFactorProp = "default.replication.factor"
val ReplicaLagTimeMaxMsProp = "replica.lag.time.max.ms"
val ReplicaSocketTimeoutMsProp = "replica.socket.timeout.ms"
val ReplicaSocketReceiveBufferBytesProp = "replica.socket.receive.buffer.bytes"
val ReplicaFetchMaxBytesProp = "replica.fetch.max.bytes"
val ReplicaFetchWaitMaxMsProp = "replica.fetch.wait.max.ms"
val ReplicaFetchMinBytesProp = "replica.fetch.min.bytes"
val ReplicaFetchBackoffMsProp = "replica.fetch.backoff.ms"
val NumReplicaFetchersProp = "num.replica.fetchers"
val ReplicaHighWatermarkCheckpointIntervalMsProp = "replica.high.watermark.checkpoint.interval.ms"
val FetchPurgatoryPurgeIntervalRequestsProp = "fetch.purgatory.purge.interval.requests"
val ProducerPurgatoryPurgeIntervalRequestsProp = "producer.purgatory.purge.interval.requests"
val AutoLeaderRebalanceEnableProp = "auto.leader.rebalance.enable"
val LeaderImbalancePerBrokerPercentageProp = "leader.imbalance.per.broker.percentage"
val LeaderImbalanceCheckIntervalSecondsProp = "leader.imbalance.check.interval.seconds"
val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable"
val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol"
val InterBrokerProtocolVersionProp = "inter.broker.protocol.version"
/** ********* Controlled shutdown configuration ***********/
val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries"
val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms"
val ControlledShutdownEnableProp = "controlled.shutdown.enable"
/** ********* Consumer coordinator configuration ***********/
val ConsumerMinSessionTimeoutMsProp = "consumer.min.session.timeout.ms"
val ConsumerMaxSessionTimeoutMsProp = "consumer.max.session.timeout.ms"
/** ********* Offset management configuration ***********/
val OffsetMetadataMaxSizeProp = "offset.metadata.max.bytes"
val OffsetsLoadBufferSizeProp = "offsets.load.buffer.size"
val OffsetsTopicReplicationFactorProp = "offsets.topic.replication.factor"
val OffsetsTopicPartitionsProp = "offsets.topic.num.partitions"
val OffsetsTopicSegmentBytesProp = "offsets.topic.segment.bytes"
val OffsetsTopicCompressionCodecProp = "offsets.topic.compression.codec"
val OffsetsRetentionMinutesProp = "offsets.retention.minutes"
val OffsetsRetentionCheckIntervalMsProp = "offsets.retention.check.interval.ms"
val OffsetCommitTimeoutMsProp = "offsets.commit.timeout.ms"
val OffsetCommitRequiredAcksProp = "offsets.commit.required.acks"
/** ********* Quota Configuration ***********/
val ProducerQuotaBytesPerSecondDefaultProp = "quota.producer.default"
val ConsumerQuotaBytesPerSecondDefaultProp = "quota.consumer.default"
val ProducerQuotaBytesPerSecondOverridesProp = "quota.producer.bytes.per.second.overrides"
val ConsumerQuotaBytesPerSecondOverridesProp = "quota.consumer.bytes.per.second.overrides"
val NumQuotaSamplesProp = "quota.window.num"
val QuotaWindowSizeSecondsProp = "quota.window.size.seconds"
val DeleteTopicEnableProp = "delete.topic.enable"
val CompressionTypeProp = "compression.type"
/** ********* Kafka Metrics Configuration ***********/
val MetricSampleWindowMsProp = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG
val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG
val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG
/** ********* SSL Configuration ****************/
val PrincipalBuilderClassProp = SSLConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG
val SSLProtocolProp = SSLConfigs.SSL_PROTOCOL_CONFIG
val SSLProviderProp = SSLConfigs.SSL_PROVIDER_CONFIG
val SSLCipherSuitesProp = SSLConfigs.SSL_CIPHER_SUITES_CONFIG
val SSLEnabledProtocolsProp = SSLConfigs.SSL_ENABLED_PROTOCOLS_CONFIG
val SSLKeystoreTypeProp = SSLConfigs.SSL_KEYSTORE_TYPE_CONFIG
val SSLKeystoreLocationProp = SSLConfigs.SSL_KEYSTORE_LOCATION_CONFIG
val SSLKeystorePasswordProp = SSLConfigs.SSL_KEYSTORE_PASSWORD_CONFIG
val SSLKeyPasswordProp = SSLConfigs.SSL_KEY_PASSWORD_CONFIG
val SSLTruststoreTypeProp = SSLConfigs.SSL_TRUSTSTORE_TYPE_CONFIG
val SSLTruststoreLocationProp = SSLConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG
val SSLTruststorePasswordProp = SSLConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG
val SSLKeyManagerAlgorithmProp = SSLConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG
val SSLTrustManagerAlgorithmProp = SSLConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG
val SSLEndpointIdentificationAlgorithmProp = SSLConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG
val SSLClientAuthProp = SSLConfigs.SSL_CLIENT_AUTH_CONFIG
"""
| apache-2.0 | 5,579,875,960,285,273,000 | 52.412429 | 102 | 0.785276 | false |
nikolay-fedotov/tempest | tempest/api/orchestration/stacks/test_resource_types.py | 9 | 1907 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest import test
class ResourceTypesTest(base.BaseOrchestrationTest):
@test.attr(type='smoke')
def test_resource_type_list(self):
"""Verify it is possible to list resource types."""
resource_types = self.client.list_resource_types()
self.assertIsInstance(resource_types, list)
self.assertIn('OS::Nova::Server', resource_types)
@test.attr(type='smoke')
def test_resource_type_show(self):
"""Verify it is possible to get schema about resource types."""
resource_types = self.client.list_resource_types()
self.assertNotEmpty(resource_types)
for resource_type in resource_types:
type_schema = self.client.get_resource_type(resource_type)
self.assert_fields_in_dict(type_schema, 'properties',
'attributes', 'resource_type')
self.assertEqual(resource_type, type_schema['resource_type'])
@test.attr(type='smoke')
def test_resource_type_template(self):
"""Verify it is possible to get template about resource types."""
type_template = self.client.get_resource_type_template(
'OS::Nova::Server')
self.assert_fields_in_dict(type_template, 'Outputs',
'Parameters', 'Resources') | apache-2.0 | -224,897,668,363,859,550 | 42.363636 | 78 | 0.67226 | false |
kawamon/hue | desktop/core/ext-py/boto-2.46.1/boto/cloudformation/stack.py | 32 | 14230 | from datetime import datetime
from boto.resultset import ResultSet
class Stack(object):
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
self.description = None
self.disable_rollback = None
self.notification_arns = []
self.outputs = []
self.parameters = []
self.capabilities = []
self.tags = []
self.stack_id = None
self.stack_status = None
self.stack_status_reason = None
self.stack_name = None
self.timeout_in_minutes = None
@property
def stack_name_reason(self):
return self.stack_status_reason
@stack_name_reason.setter
def stack_name_reason(self, value):
self.stack_status_reason = value
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
return self.parameters
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
elif name == "Capabilities":
self.capabilities = ResultSet([('member', Capability)])
return self.capabilities
elif name == "Tags":
self.tags = Tag()
return self.tags
elif name == 'NotificationARNs':
self.notification_arns = ResultSet([('member', NotificationARN)])
return self.notification_arns
else:
return None
def endElement(self, name, value, connection):
if name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "Description":
self.description = value
elif name == "DisableRollback":
if str(value).lower() == 'true':
self.disable_rollback = True
else:
self.disable_rollback = False
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
self.stack_name = value
elif name == 'StackStatus':
self.stack_status = value
elif name == "StackStatusReason":
self.stack_status_reason = value
elif name == "TimeoutInMinutes":
self.timeout_in_minutes = int(value)
elif name == "member":
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_stack(stack_name_or_id=self.stack_id)
def describe_events(self, next_token=None):
return self.connection.describe_stack_events(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def describe_resource(self, logical_resource_id):
return self.connection.describe_stack_resource(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
def get_policy(self):
"""
Returns the stack policy for this stack. If it has no policy
then, a null value is returned.
"""
return self.connection.get_stack_policy(self.stack_id)
def set_policy(self, stack_policy_body=None, stack_policy_url=None):
"""
Sets a stack policy for this stack.
:type stack_policy_body: string
:param stack_policy_body: Structure containing the stack policy body.
(For more information, go to ` Prevent Updates to Stack Resources`_
in the AWS CloudFormation User Guide.)
You must pass `StackPolicyBody` or `StackPolicyURL`. If both are
passed, only `StackPolicyBody` is used.
:type stack_policy_url: string
:param stack_policy_url: Location of a file containing the stack
policy. The URL must point to a policy (max size: 16KB) located in
an S3 bucket in the same region as the stack. You must pass
`StackPolicyBody` or `StackPolicyURL`. If both are passed, only
`StackPolicyBody` is used.
"""
return self.connection.set_stack_policy(self.stack_id,
stack_policy_body=stack_policy_body,
stack_policy_url=stack_policy_url)
class StackSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
try:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == "DeletionTime":
try:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter(object):
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
class Capability(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "Capability:\"%s\"" % (self.value)
class Tag(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self._current_key = None
self._current_value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Key":
self._current_key = value
elif name == "Value":
self._current_value = value
else:
setattr(self, name, value)
if self._current_key and self._current_value:
self[self._current_key] = self._current_value
self._current_key = None
self._current_value = None
class NotificationARN(object):
def __init__(self, connection=None):
self.connection = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
self.value = value
def __repr__(self):
return "NotificationARN:\"%s\"" % (self.value)
class StackResource(object):
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
try:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackResourceSummary(object):
def __init__(self, connection=None):
self.connection = connection
self.last_updated_time = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "LastUpdatedTime":
try:
self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%SZ'
)
except ValueError:
self.last_updated_time = datetime.strptime(
value,
'%Y-%m-%dT%H:%M:%S.%fZ'
)
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
else:
setattr(self, name, value)
def __repr__(self):
return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackEvent(object):
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
def __init__(self, connection=None):
self.connection = connection
self.event_id = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_properties = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "EventId":
self.event_id = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceProperties":
self.resource_properties = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
try:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackEvent %s %s %s" % (self.resource_type,
self.logical_resource_id, self.resource_status)
| apache-2.0 | -7,131,283,241,473,819,000 | 32.640662 | 86 | 0.571047 | false |
cpitclaudel/company-coq | etc/find-missing-tactics.py | 4 | 1507 | import subprocess
import re
TACTIC = re.compile(r'\| IDENT "([^"]+)"')
FIRST_WORD = re.compile("^[a-zA-Z_]*")
UNDOCUMENTED = set(("autounfold_one", "autounfoldify", "casetype", "convert_concl_no_check", "exact_no_check", "fauto", "finduction", "fold_match", "fold_matches", "implify", "ipattern", "is_fix", "is_ground", "poseq", "prolog", "destauto", "substitute", "soft functional induction", "setoid_etransitivity", "new auto", "gintuition", "infoH"))
UNDOCUMENTED_LTAC = set(("external", "now", "ltac"))
MORPHISMS = set(("head_of_constr", "hget_evar", "not_evar"))
STDLIB = set(("autoapply", "destruct_with_eqn", "rew", "rewrite_all", "rewrite_db", "typeclasses eauto"))
TO_ADD = set(("info", "info_auto", "info_eauto", "info_trivial", "debug auto", "debug eauto", "debug trivial"))
def first_word(tactic):
return FIRST_WORD.match(tactic).group(0)
def manual_tactics():
with open('tactics') as found_file:
return set(first_word(line.strip()) for line in found_file)
def grammar_tactics():
COQTOP_INPUT = "Print Grammar tactic.\nQuit."
coqtop_tactics = subprocess.check_output(["/build/coq/bin/coqtop", "-coqlib", "/build/coq/"], input=COQTOP_INPUT, stderr=-1, universal_newlines = True)
return set(first_word(match.group(1)) for match in TACTIC.finditer(coqtop_tactics))
manual = manual_tactics()
grammar = grammar_tactics()
for tac in sorted(x for x in (grammar - manual - UNDOCUMENTED - UNDOCUMENTED_LTAC - MORPHISMS - STDLIB - TO_ADD) if len(x) > 3):
print(tac)
| gpl-3.0 | 6,816,290,856,527,489,000 | 50.965517 | 343 | 0.675514 | false |
namanjain236/personfinder | tools/babel/util.py | 61 | 10411 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Various utility classes and functions."""
import codecs
from datetime import timedelta, tzinfo
import os
import re
try:
set = set
except NameError:
from sets import Set as set
import textwrap
import time
from itertools import izip, imap
missing = object()
__all__ = ['distinct', 'pathmatch', 'relpath', 'wraptext', 'odict', 'UTC',
'LOCALTZ']
__docformat__ = 'restructuredtext en'
def distinct(iterable):
"""Yield all items in an iterable collection that are distinct.
Unlike when using sets for a similar effect, the original ordering of the
items in the collection is preserved by this function.
>>> print list(distinct([1, 2, 1, 3, 4, 4]))
[1, 2, 3, 4]
>>> print list(distinct('foobar'))
['f', 'o', 'b', 'a', 'r']
:param iterable: the iterable collection providing the data
:return: the distinct items in the collection
:rtype: ``iterator``
"""
seen = set()
for item in iter(iterable):
if item not in seen:
yield item
seen.add(item)
# Regexp to match python magic encoding line
PYTHON_MAGIC_COMMENT_re = re.compile(
r'[ \t\f]* \# .* coding[=:][ \t]*([-\w.]+)', re.VERBOSE)
def parse_encoding(fp):
"""Deduce the encoding of a source file from magic comment.
It does this in the same way as the `Python interpreter`__
.. __: http://docs.python.org/ref/encodings.html
The ``fp`` argument should be a seekable file object.
(From Jeff Dairiki)
"""
pos = fp.tell()
fp.seek(0)
try:
line1 = fp.readline()
has_bom = line1.startswith(codecs.BOM_UTF8)
if has_bom:
line1 = line1[len(codecs.BOM_UTF8):]
m = PYTHON_MAGIC_COMMENT_re.match(line1)
if not m:
try:
import parser
parser.suite(line1)
except (ImportError, SyntaxError):
# Either it's a real syntax error, in which case the source is
# not valid python source, or line2 is a continuation of line1,
# in which case we don't want to scan line2 for a magic
# comment.
pass
else:
line2 = fp.readline()
m = PYTHON_MAGIC_COMMENT_re.match(line2)
if has_bom:
if m:
raise SyntaxError(
"python refuses to compile code with both a UTF8 "
"byte-order-mark and a magic encoding comment")
return 'utf_8'
elif m:
return m.group(1)
else:
return None
finally:
fp.seek(pos)
def pathmatch(pattern, filename):
"""Extended pathname pattern matching.
This function is similar to what is provided by the ``fnmatch`` module in
the Python standard library, but:
* can match complete (relative or absolute) path names, and not just file
names, and
* also supports a convenience pattern ("**") to match files at any
directory level.
Examples:
>>> pathmatch('**.py', 'bar.py')
True
>>> pathmatch('**.py', 'foo/bar/baz.py')
True
>>> pathmatch('**.py', 'templates/index.html')
False
>>> pathmatch('**/templates/*.html', 'templates/index.html')
True
>>> pathmatch('**/templates/*.html', 'templates/foo/bar.html')
False
:param pattern: the glob pattern
:param filename: the path name of the file to match against
:return: `True` if the path name matches the pattern, `False` otherwise
:rtype: `bool`
"""
symbols = {
'?': '[^/]',
'?/': '[^/]/',
'*': '[^/]+',
'*/': '[^/]+/',
'**/': '(?:.+/)*?',
'**': '(?:.+/)*?[^/]+',
}
buf = []
for idx, part in enumerate(re.split('([?*]+/?)', pattern)):
if idx % 2:
buf.append(symbols[part])
elif part:
buf.append(re.escape(part))
match = re.match(''.join(buf) + '$', filename.replace(os.sep, '/'))
return match is not None
class TextWrapper(textwrap.TextWrapper):
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))' # em-dash
)
def wraptext(text, width=70, initial_indent='', subsequent_indent=''):
"""Simple wrapper around the ``textwrap.wrap`` function in the standard
library. This version does not wrap lines on hyphens in words.
:param text: the text to wrap
:param width: the maximum line width
:param initial_indent: string that will be prepended to the first line of
wrapped output
:param subsequent_indent: string that will be prepended to all lines save
the first of wrapped output
:return: a list of lines
:rtype: `list`
"""
wrapper = TextWrapper(width=width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
break_long_words=False)
return wrapper.wrap(text)
class odict(dict):
"""Ordered dict implementation.
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
"""
def __init__(self, data=None):
dict.__init__(self, data or {})
self._keys = dict.keys(self)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self._keys:
self._keys.append(key)
def __iter__(self):
return iter(self._keys)
iterkeys = __iter__
def clear(self):
dict.clear(self)
self._keys = []
def copy(self):
d = odict()
d.update(self)
return d
def items(self):
return zip(self._keys, self.values())
def iteritems(self):
return izip(self._keys, self.itervalues())
def keys(self):
return self._keys[:]
def pop(self, key, default=missing):
if default is missing:
return dict.pop(self, key)
elif key not in self:
return default
self._keys.remove(key)
return dict.pop(self, key, default)
def popitem(self, key):
self._keys.remove(key)
return dict.popitem(key)
def setdefault(self, key, failobj = None):
dict.setdefault(self, key, failobj)
if key not in self._keys:
self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self[key] = val
def values(self):
return map(self.get, self._keys)
def itervalues(self):
return imap(self.get, self._keys)
try:
relpath = os.path.relpath
except AttributeError:
def relpath(path, start='.'):
"""Compute the relative path to one path from another.
>>> relpath('foo/bar.txt', '').replace(os.sep, '/')
'foo/bar.txt'
>>> relpath('foo/bar.txt', 'foo').replace(os.sep, '/')
'bar.txt'
>>> relpath('foo/bar.txt', 'baz').replace(os.sep, '/')
'../foo/bar.txt'
:return: the relative path
:rtype: `basestring`
"""
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list) - i) + path_list[i:]
return os.path.join(*rel_list)
try:
from operator import attrgetter, itemgetter
except ImportError:
def itemgetter(name):
def _getitem(obj):
return obj[name]
return _getitem
try:
''.rsplit
def rsplit(a_string, sep=None, maxsplit=None):
return a_string.rsplit(sep, maxsplit)
except AttributeError:
def rsplit(a_string, sep=None, maxsplit=None):
parts = a_string.split(sep)
if maxsplit is None or len(parts) <= maxsplit:
return parts
maxsplit_index = len(parts) - maxsplit
non_splitted_part = sep.join(parts[:maxsplit_index])
splitted = parts[maxsplit_index:]
return [non_splitted_part] + splitted
ZERO = timedelta(0)
class FixedOffsetTimezone(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name=None):
self._offset = timedelta(minutes=offset)
if name is None:
name = 'Etc/GMT+%d' % offset
self.zone = name
def __str__(self):
return self.zone
def __repr__(self):
return '<FixedOffset "%s" %s>' % (self.zone, self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self.zone
def dst(self, dt):
return ZERO
try:
from pytz import UTC
except ImportError:
UTC = FixedOffsetTimezone(0, 'UTC')
"""`tzinfo` object for UTC (Universal Time).
:type: `tzinfo`
"""
STDOFFSET = timedelta(seconds = -time.timezone)
if time.daylight:
DSTOFFSET = timedelta(seconds = -time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
LOCALTZ = LocalTimezone()
"""`tzinfo` object for local time-zone.
:type: `tzinfo`
"""
| apache-2.0 | -5,452,820,674,669,644,000 | 27.137838 | 79 | 0.573336 | false |
dturner-tw/pants | src/python/pants/backend/graph_info/tasks/list_owners.py | 2 | 1221 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.exceptions import TaskError
from pants.build_graph.source_mapper import LazySourceMapper
from pants.task.console_task import ConsoleTask
class ListOwners(ConsoleTask):
"""Print targets that own a source file.
$ pants targets -- path/to/my/source.java
path/to/my:target1
another/path:target2
"""
@classmethod
def supports_passthru_args(cls):
return True
def console_output(self, targets):
sources = self.get_passthru_args()
if not sources:
raise TaskError('No source was specified')
elif len(sources) > 1:
raise TaskError('Too many sources specified.')
lazy_source_mapper = LazySourceMapper(self.context.address_mapper, self.context.build_graph)
for source in sources:
target_addresses_for_source = lazy_source_mapper.target_addresses_for_source(source)
for address in target_addresses_for_source:
yield address.spec
| apache-2.0 | -3,135,101,541,443,977,000 | 33.885714 | 96 | 0.719902 | false |
ABaldwinHunter/django-clone-classic | tests/template_tests/tests.py | 7 | 5075 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from django.contrib.auth.models import Group
from django.template import Context, Engine, TemplateSyntaxError
from django.template.base import UNKNOWN_SOURCE
from django.test import SimpleTestCase, override_settings
from django.urls import NoReverseMatch
class TemplateTests(SimpleTestCase):
def test_string_origin(self):
template = Engine().from_string('string template')
self.assertEqual(template.origin.name, UNKNOWN_SOURCE)
self.assertEqual(template.origin.loader_name, None)
self.assertEqual(template.source, 'string template')
@override_settings(SETTINGS_MODULE=None)
def test_url_reverse_no_settings_module(self):
"""
#9005 -- url tag shouldn't require settings.SETTINGS_MODULE to
be set.
"""
t = Engine(debug=True).from_string('{% url will_not_match %}')
c = Context()
with self.assertRaises(NoReverseMatch):
t.render(c)
def test_url_reverse_view_name(self):
"""
#19827 -- url tag should keep original strack trace when reraising
exception.
"""
t = Engine().from_string('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5,
"The traceback context was lost when reraising the traceback. See #19827")
def test_no_wrapped_exception(self):
"""
# 16770 -- The template system doesn't wrap exceptions, but annotates
them.
"""
engine = Engine(debug=True)
c = Context({"coconuts": lambda: 42 / 0})
t = engine.from_string("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as e:
t.render(c)
debug = e.exception.template_debug
self.assertEqual(debug['start'], 0)
self.assertEqual(debug['end'], 14)
def test_invalid_block_suggestion(self):
"""
#7876 -- Error messages should include the unexpected block name.
"""
engine = Engine()
msg = (
"Invalid block tag on line 1: 'endblock', expected 'elif', 'else' "
"or 'endif'. Did you forget to register or load this tag?"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
engine.from_string("{% if 1 %}lala{% endblock %}{% endif %}")
def test_unknown_block_tag(self):
engine = Engine()
msg = (
"Invalid block tag on line 1: 'foobar'. Did you forget to "
"register or load this tag?"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
engine.from_string("lala{% foobar %}")
def test_compile_filter_expression_error(self):
"""
19819 -- Make sure the correct token is highlighted for
FilterExpression errors.
"""
engine = Engine(debug=True)
msg = "Could not parse the remainder: '@bar' from 'foo@bar'"
with self.assertRaisesMessage(TemplateSyntaxError, msg) as e:
engine.from_string("{% if 1 %}{{ foo@bar }}{% endif %}")
debug = e.exception.template_debug
self.assertEqual((debug['start'], debug['end']), (10, 23))
self.assertEqual((debug['during']), '{{ foo@bar }}')
def test_compile_tag_error(self):
"""
Errors raised while compiling nodes should include the token
information.
"""
engine = Engine(
debug=True,
libraries={'bad_tag': 'template_tests.templatetags.bad_tag'},
)
with self.assertRaises(RuntimeError) as e:
engine.from_string("{% load bad_tag %}{% badtag %}")
self.assertEqual(e.exception.template_debug['during'], '{% badtag %}')
def test_super_errors(self):
"""
#18169 -- NoReverseMatch should not be silence in block.super.
"""
engine = Engine(app_dirs=True)
t = engine.get_template('included_content.html')
with self.assertRaises(NoReverseMatch):
t.render(Context())
def test_debug_tag_non_ascii(self):
"""
#23060 -- Test non-ASCII model representation in debug output.
"""
group = Group(name="清風")
c1 = Context({"objs": [group]})
t1 = Engine().from_string('{% debug %}')
self.assertIn("清風", t1.render(c1))
def test_extends_generic_template(self):
"""
#24338 -- Allow extending django.template.backends.django.Template
objects.
"""
engine = Engine()
parent = engine.from_string('{% block content %}parent{% endblock %}')
child = engine.from_string(
'{% extends parent %}{% block content %}child{% endblock %}')
self.assertEqual(child.render(Context({'parent': parent})), 'child')
| bsd-3-clause | 6,495,954,376,303,050,000 | 34.683099 | 90 | 0.585554 | false |
tdautc19841202/wechatpy | wechatpy/client/api/semantic.py | 7 | 1772 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatSemantic(BaseWeChatAPI):
def search(self,
query,
category,
uid=None,
latitude=None,
longitude=None,
city=None,
region=None):
"""
发送语义理解请求
详情请参考
http://mp.weixin.qq.com/wiki/0/0ce78b3c9524811fee34aba3e33f3448.html
:param query: 输入文本串
:param category: 需要使用的服务类型,多个可传入列表
:param uid: 可选,用户唯一id(非开发者id),用户区分公众号下的不同用户(建议填入用户openid)
:param latitude: 可选,纬度坐标,与经度同时传入;与城市二选一传入
:param longitude: 可选,经度坐标,与纬度同时传入;与城市二选一传入
:param city: 可选,城市名称,与经纬度二选一传入
:param region: 可选,区域名称,在城市存在的情况下可省;与经纬度二选一传入
:return: 返回的 JSON 数据包
"""
if isinstance(category, (tuple, list)):
category = ','.join(category)
data = optionaldict()
data['query'] = query
data['category'] = category
data['uid'] = uid
data['latitude'] = latitude
data['longitude'] = longitude
data['city'] = city
data['region'] = region
data['appid'] = self._client.appid
return self._post(
url='https://api.weixin.qq.com/semantic/semproxy/search',
data=data
)
| mit | -8,640,124,292,685,211,000 | 29.468085 | 76 | 0.571229 | false |
COCS4950G7/COSC4950 | Source/Network/GUI_WXPythonExamples/GUI_FocusEvent.py | 1 | 1980 | __author__ = 'Chris Hamm'
#GUI_FocusEvent
#the focus indicates the currently selected widgets in application.
#The text entered from the keyboard or pasted from the clipboard is sent to the widget, which has focus
#wx.EVT_SET_FOCUS which is generated when a widget recives focus
#wx.EVT_KILL_FOCUS is generated when the widget looses focus. focus is changed by clicking or by a keuboard key
import wx
class MyWindow(wx.Panel):
def __init__(self,parent):
super(MyWindow, self).__init__(parent)
self.color= '#b3b3b3'
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
def OnPaint(self, e):
dc= wx.PaintDC(self)
dc.SetPen(wx.Pen(self.color))
x, y =self.GetSize()
dc.DrawRectangle(0,0,x,y)
def OnSize(self, e):
self.Refresh()
def OnSetFocus(self, e):
self.color= '#0099f7'
self.Refresh()
def OnKillFocus(self, e):
self.color= '#b3b3b3'
self.Refresh()
class example(wx.Frame):
def __init__(self, *args, **kw):
super(example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
grid = wx.GridSizer(2,2,10,10)
grid.AddMany([(MyWindow(self),0, wx.EXPAND|wx.TOP|wx.LEFT, 9),
(MyWindow(self),0, wx.EXPAND|wx.TOP|wx.RIGHT, 9),
(MyWindow(self),0, wx.EXPAND|wx.BOTTOM|wx.LEFT, 9),
(MyWindow(self),0, wx.EXPAND|wx.BOTTOM|wx.RIGHT, 9)])
self.SetSizer(grid)
self.SetSize((350,250))
self.SetTitle('Focus event')
self.Centre()
self.Show(True)
def OnMove(self, e):
print e.GetEventObject()
x, y = e.GetPosition()
self.st1.SetLabel(str(x))
self.st2.SetLabel(str(y))
def main():
ex= wx.App()
example(None)
ex.MainLoop()
if __name__ == '__main__':
main() | gpl-3.0 | -9,159,968,910,904,013,000 | 26.901408 | 111 | 0.601515 | false |
rolando-contrib/scrapy | tests/test_selector.py | 16 | 8970 | import warnings
import weakref
from twisted.trial import unittest
from scrapy.http import TextResponse, HtmlResponse, XmlResponse
from scrapy.selector import Selector
from scrapy.selector.lxmlsel import XmlXPathSelector, HtmlXPathSelector, XPathSelector
from lxml import etree
class SelectorTestCase(unittest.TestCase):
def test_simple_selection(self):
"""Simple selector tests"""
body = b"<p><input name='a'value='1'/><input name='b'value='2'/></p>"
response = TextResponse(url="http://example.com", body=body, encoding='utf-8')
sel = Selector(response)
xl = sel.xpath('//input')
self.assertEqual(2, len(xl))
for x in xl:
assert isinstance(x, Selector)
self.assertEqual(sel.xpath('//input').extract(),
[x.extract() for x in sel.xpath('//input')])
self.assertEqual([x.extract() for x in sel.xpath("//input[@name='a']/@name")],
[u'a'])
self.assertEqual([x.extract() for x in sel.xpath("number(concat(//input[@name='a']/@value, //input[@name='b']/@value))")],
[u'12.0'])
self.assertEqual(sel.xpath("concat('xpath', 'rules')").extract(),
[u'xpathrules'])
self.assertEqual([x.extract() for x in sel.xpath("concat(//input[@name='a']/@value, //input[@name='b']/@value)")],
[u'12'])
def test_root_base_url(self):
body = b'<html><form action="/path"><input name="a" /></form></html>'
url = "http://example.com"
response = TextResponse(url=url, body=body, encoding='utf-8')
sel = Selector(response)
self.assertEqual(url, sel.root.base)
def test_deprecated_root_argument(self):
with warnings.catch_warnings(record=True) as w:
root = etree.fromstring(u'<html/>')
sel = Selector(_root=root)
self.assertIs(root, sel.root)
self.assertEqual(str(w[-1].message),
'Argument `_root` is deprecated, use `root` instead')
def test_deprecated_root_argument_ambiguous(self):
with warnings.catch_warnings(record=True) as w:
_root = etree.fromstring(u'<xml/>')
root = etree.fromstring(u'<html/>')
sel = Selector(_root=_root, root=root)
self.assertIs(root, sel.root)
self.assertIn('Ignoring deprecated `_root` argument', str(w[-1].message))
def test_flavor_detection(self):
text = b'<div><img src="a.jpg"><p>Hello</div>'
sel = Selector(XmlResponse('http://example.com', body=text, encoding='utf-8'))
self.assertEqual(sel.type, 'xml')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></img></div>'])
sel = Selector(HtmlResponse('http://example.com', body=text, encoding='utf-8'))
self.assertEqual(sel.type, 'html')
self.assertEqual(sel.xpath("//div").extract(),
[u'<div><img src="a.jpg"><p>Hello</p></div>'])
def test_http_header_encoding_precedence(self):
# u'\xa3' = pound symbol in unicode
# u'\xc2\xa3' = pound symbol in utf-8
# u'\xa3' = pound symbol in latin-1 (iso-8859-1)
meta = u'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">'
head = u'<head>' + meta + u'</head>'
body_content = u'<span id="blank">\xa3</span>'
body = u'<body>' + body_content + u'</body>'
html = u'<html>' + head + body + u'</html>'
encoding = 'utf-8'
html_utf8 = html.encode(encoding)
headers = {'Content-Type': ['text/html; charset=utf-8']}
response = HtmlResponse(url="http://example.com", headers=headers, body=html_utf8)
x = Selector(response)
self.assertEquals(x.xpath("//span[@id='blank']/text()").extract(),
[u'\xa3'])
def test_badly_encoded_body(self):
# \xe9 alone isn't valid utf8 sequence
r1 = TextResponse('http://www.example.com', \
body=b'<html><p>an Jos\xe9 de</p><html>', \
encoding='utf-8')
Selector(r1).xpath('//text()').extract()
def test_weakref_slots(self):
"""Check that classes are using slots and are weak-referenceable"""
x = Selector(text='')
weakref.ref(x)
assert not hasattr(x, '__dict__'), "%s does not use __slots__" % \
x.__class__.__name__
def test_deprecated_selector_methods(self):
sel = Selector(TextResponse(url="http://example.com", body=b'<p>some text</p>'))
with warnings.catch_warnings(record=True) as w:
sel.select('//p')
self.assertSubstring('Use .xpath() instead', str(w[-1].message))
with warnings.catch_warnings(record=True) as w:
sel.extract_unquoted()
self.assertSubstring('Use .extract() instead', str(w[-1].message))
def test_deprecated_selectorlist_methods(self):
sel = Selector(TextResponse(url="http://example.com", body=b'<p>some text</p>'))
with warnings.catch_warnings(record=True) as w:
sel.xpath('//p').select('.')
self.assertSubstring('Use .xpath() instead', str(w[-1].message))
with warnings.catch_warnings(record=True) as w:
sel.xpath('//p').extract_unquoted()
self.assertSubstring('Use .extract() instead', str(w[-1].message))
def test_selector_bad_args(self):
with self.assertRaisesRegexp(ValueError, 'received both response and text'):
Selector(TextResponse(url='http://example.com', body=b''), text=u'')
class DeprecatedXpathSelectorTest(unittest.TestCase):
text = '<div><img src="a.jpg"><p>Hello</div>'
def test_warnings_xpathselector(self):
cls = XPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
def test_warnings_xmlxpathselector(self):
cls = XmlXPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(issubclass(cls, XPathSelector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
self.assertTrue(isinstance(sel, XPathSelector))
self.assertTrue(isinstance(usel, XPathSelector))
def test_warnings_htmlxpathselector(self):
cls = HtmlXPathSelector
with warnings.catch_warnings(record=True) as w:
class UserClass(cls):
pass
# subclassing must issue a warning
self.assertEqual(len(w), 1, str(cls))
self.assertIn('scrapy.Selector', str(w[0].message))
# subclass instance doesn't issue a warning
usel = UserClass(text=self.text)
self.assertEqual(len(w), 1)
# class instance must issue a warning
sel = cls(text=self.text)
self.assertEqual(len(w), 2, str((cls, [x.message for x in w])))
self.assertIn('scrapy.Selector', str(w[1].message))
# subclass and instance checks
self.assertTrue(issubclass(cls, Selector))
self.assertTrue(issubclass(cls, XPathSelector))
self.assertTrue(isinstance(sel, Selector))
self.assertTrue(isinstance(usel, Selector))
self.assertTrue(isinstance(sel, XPathSelector))
self.assertTrue(isinstance(usel, XPathSelector))
| bsd-3-clause | 4,693,955,435,050,447,000 | 41.511848 | 130 | 0.581828 | false |
swegener/micropython | tests/extmod/uctypes_sizeof.py | 10 | 1251 | try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
desc = {
# arr is array at offset 0, of UINT8 elements, array size is 2
"arr": (uctypes.ARRAY | 0, uctypes.UINT8 | 2),
# arr2 is array at offset 0, size 2, of structures defined recursively
"arr2": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0}),
"arr3": (uctypes.ARRAY | 2, uctypes.UINT16 | 2),
"arr4": (uctypes.ARRAY | 0, 2, {"b": uctypes.UINT8 | 0, "w": uctypes.UINT16 | 1}),
"sub": (0, {
'b1': uctypes.BFUINT8 | 0 | 4 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
'b2': uctypes.BFUINT8 | 0 | 0 << uctypes.BF_POS | 4 << uctypes.BF_LEN,
}),
}
data = bytearray(b"01234567")
S = uctypes.struct(uctypes.addressof(data), desc, uctypes.LITTLE_ENDIAN)
print(uctypes.sizeof(S.arr))
assert uctypes.sizeof(S.arr) == 2
print(uctypes.sizeof(S.arr2))
assert uctypes.sizeof(S.arr2) == 2
print(uctypes.sizeof(S.arr3))
try:
print(uctypes.sizeof(S.arr3[0]))
except TypeError:
print("TypeError")
print(uctypes.sizeof(S.arr4))
assert uctypes.sizeof(S.arr4) == 6
print(uctypes.sizeof(S.sub))
assert uctypes.sizeof(S.sub) == 1
# invalid descriptor
try:
print(uctypes.sizeof([]))
except TypeError:
print("TypeError")
| mit | 2,361,576,658,775,791,600 | 25.617021 | 86 | 0.642686 | false |
JensTimmerman/radical.pilot | src/radical/pilot/db/database.py | 1 | 28615 | #pylint: disable=C0301, C0103, W0212
"""
.. module:: radical.pilot.database
:platform: Unix
:synopsis: Database functions.
.. moduleauthor:: Ole Weidner <[email protected]>
"""
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import saga
import datetime
import gridfs
import radical.utils as ru
from pymongo import *
from radical.pilot.states import *
from radical.pilot.utils import DBConnectionInfo
COMMAND_CANCEL_PILOT = "Cancel_Pilot"
COMMAND_CANCEL_COMPUTE_UNIT = "Cancel_Compute_Unit"
COMMAND_KEEP_ALIVE = "Keep_Alive"
COMMAND_FIELD = "commands"
COMMAND_TYPE = "type"
COMMAND_ARG = "arg"
COMMAND_TIME = "time"
# -----------------------------------------------------------------------------
#
class DBException(Exception):
# -------------------------------------------------------------------------
#
def __init__(self, msg, obj=None):
"""Le constructeur. Creates a new exception object.
"""
Exception.__init__(self, msg)
self._obj = obj
# -----------------------------------------------------------------------------
#
class DBEntryExistsException(Exception):
# -------------------------------------------------------------------------
#
def __init__(self, msg, obj=None):
"""Le constructeur. Creates a new exception object.
"""
Exception.__init__(self, msg)
self._obj = obj
#-----------------------------------------------------------------------------
#
class Session():
#--------------------------------------------------------------------------
#
def __init__(self, db_url, db_name="radicalpilot"):
""" Le constructeur. Should not be called directrly, but rather
via the static methods new() or reconnect().
"""
url = ru.Url (db_url)
if db_name :
url.path = db_name
mongo, db, dbname, pname, cname = ru.mongodb_connect (url)
self._client = mongo
self._db = db
self._dburl = str(url)
self._dbname = dbname
if url.username and url.password:
self._dbauth = "%s:%s" % (url.username, url.password)
else:
self._dbauth = None
self._session_id = None
self._s = None
self._w = None
self._um = None
self._p = None
self._pm = None
#--------------------------------------------------------------------------
#
@staticmethod
def new(sid, name, db_url, db_name="radicalpilot"):
""" Creates a new session (factory method).
"""
creation_time = datetime.datetime.utcnow()
dbs = Session(db_url, db_name)
dbs.create(sid, name, creation_time)
connection_info = DBConnectionInfo(
session_id=sid,
dbname=dbs._dbname,
dbauth=dbs._dbauth,
dburl=dbs._dburl
)
return (dbs, creation_time, connection_info)
#--------------------------------------------------------------------------
#
def create(self, sid, name, creation_time):
""" Creates a new session (private).
A session is a distinct collection with three sub-collections
in MongoDB:
radical.pilot.<sid> | Base collection. Holds some metadata. | self._s
radical.pilot.<sid>.cu | Collection holding all compute units. | self._w
radical.pilot.<sid>.um | Collection holding all unit managers. | self._um
radical.pilot.<sid>.p | Collection holding all pilots. | self._p
radical.pilot.<sid>.pm | Collection holding all pilot managers. | self._pm
All collections are created with a new session. Since MongoDB
uses lazy-create, they only appear in the database after the
first insert. That's ok.
"""
# make sure session doesn't exist already
if sid :
if self._db[sid].count() != 0 :
raise DBEntryExistsException ("Session '%s' already exists." % sid)
# remember session id
self._session_id = sid
self._s = self._db["%s" % sid]
self._s.insert({"_id" : sid,
"name" : name,
"created" : creation_time,
"connected" : creation_time})
# Create the collection shortcut:
self._w = self._db["%s.cu" % sid]
self._um = self._db["%s.um" % sid]
self._p = self._db["%s.p" % sid]
self._pm = self._db["%s.pm" % sid]
#--------------------------------------------------------------------------
#
@staticmethod
def reconnect(sid, db_url, db_name="radical.pilot"):
""" Reconnects to an existing session.
Here we simply check if a radical.pilot.<sid> collection exists.
"""
dbs = Session(db_url, db_name)
session_info = dbs._reconnect(sid)
connection_info = DBConnectionInfo(
session_id=sid,
dbname=dbs._dbname,
dbauth=dbs._dbauth,
dburl=dbs._dburl
)
return (dbs, session_info, connection_info)
#--------------------------------------------------------------------------
#
def _reconnect(self, sid):
""" Reconnects to an existing session (private).
"""
# make sure session exists
#if sid not in self._db.collection_names():
# raise DBEntryDoesntExistException("Session with id '%s' doesn't exists." % sid)
self._s = self._db["%s" % sid]
cursor = self._s.find({"_id": sid})
self._s.update({"_id" : sid},
{"$set" : {"connected" : datetime.datetime.utcnow()}}
)
cursor = self._s.find({"_id": sid})
# cursor -> dict
#if len(cursor) != 1:
# raise DBEntryDoesntExistException("Session with id '%s' doesn't exists." % sid)
self._session_id = sid
# Create the collection shortcut:
self._w = self._db["%s.cu" % sid]
self._um = self._db["%s.um" % sid]
self._p = self._db["%s.p" % sid]
self._pm = self._db["%s.pm" % sid]
try:
return cursor[0]
except:
raise Exception("Couldn't find Session UID '%s' in database." % sid)
#--------------------------------------------------------------------------
#
@property
def session_id(self):
""" Returns the session id.
"""
return self._session_id
#--------------------------------------------------------------------------
#
def delete(self):
""" Removes a session and all associated collections from the DB.
"""
if self._s is None:
raise DBException("No active session.")
for collection in [self._s, self._w, self._um, self._p, self._pm]:
collection.drop()
collection = None
#--------------------------------------------------------------------------
#
def insert_pilot_manager(self, pilot_manager_data, pilot_launcher_workers):
""" Adds a pilot managers to the list of pilot managers.
Pilot manager IDs are just kept for book-keeping.
"""
if self._s is None:
raise Exception("No active session.")
pilot_manager_json = {"data": pilot_manager_data,
"pilot_launcher_workers": pilot_launcher_workers}
result = self._pm.insert(pilot_manager_json)
# return the object id as a string
return str(result)
#--------------------------------------------------------------------------
#
def list_pilot_manager_uids(self):
""" Lists all pilot managers.
"""
if self._s is None:
raise Exception("No active session.")
pilot_manager_uids = []
cursor = self._pm.find()
# cursor -> dict
for obj in cursor:
pilot_manager_uids.append(str(obj['_id']))
return pilot_manager_uids
#--------------------------------------------------------------------------
#
def get_compute_unit_stdout(self, unit_uid):
"""Returns the ComputeUnit's unit's stdout.
"""
if self._s is None:
raise Exception("No active session.")
cursor = self._w.find({"_id": unit_uid})
return cursor[0]['stdout']
#--------------------------------------------------------------------------
#
def get_compute_unit_stderr(self, unit_uid):
"""Returns the ComputeUnit's unit's stderr.
"""
if self._s is None:
raise Exception("No active session.")
cursor = self._w.find({"_id": unit_uid})
return cursor[0]['stderr']
#--------------------------------------------------------------------------
#
def update_pilot_state(self, pilot_uid, started=None, finished=None,
submitted=None, state=None, sagajobid=None,
pilot_sandbox=None, global_sandbox=None,
logs=None):
"""Updates the information of a pilot.
"""
if self._s is None:
raise Exception("No active session.")
# construct the update query
set_query = dict()
push_query = dict()
if state :
set_query["state"] = state
push_query["statehistory"] = [{'state': state, 'timestamp': datetime.datetime.utcnow()}]
if logs :
push_query["log"] = logs
if started : set_query["started"] = started
if finished : set_query["finished"] = finished
if submitted : set_query["submitted"] = submitted
if sagajobid : set_query["sagajobid"] = sagajobid
if pilot_sandbox : set_query["sandbox"] = pilot_sandbox
if global_sandbox : set_query["global_sandbox"] = global_sandbox
# update pilot entry.
self._p.update(
{"_id": pilot_uid},
{"$set": set_query, "$pushAll": push_query},
multi=True
)
#--------------------------------------------------------------------------
#
def insert_pilot(self, pilot_uid, pilot_manager_uid, pilot_description,
pilot_sandbox, global_sandbox):
"""Adds a new pilot document to the database.
"""
if self._s is None:
raise Exception("No active session.")
ts = datetime.datetime.utcnow()
# the SAGA attribute interface does not expose private attribs in
# as_dict(). That semantics may change in the future, for now we copy
# private elems directly.
pd_dict = dict()
for k in pilot_description._attributes_i_list (priv=True):
pd_dict[k] = pilot_description[k]
pilot_doc = {
"_id": pilot_uid,
"description": pd_dict,
"submitted": datetime.datetime.utcnow(),
"input_transfer_started": None,
"input_transfer_finished": None,
"started": None,
"finished": None,
"heartbeat": None,
"output_transfer_started": None,
"output_transfer_finished": None,
"nodes": None,
"cores_per_node": None,
"sagajobid": None,
"sandbox": pilot_sandbox,
"global_sandbox": global_sandbox,
"state": PENDING_LAUNCH,
"statehistory": [{"state": PENDING_LAUNCH, "timestamp": ts}],
"log": [],
"pilotmanager": pilot_manager_uid,
"unitmanager": None,
"commands": []
}
self._p.insert(pilot_doc)
return str(pilot_uid), pilot_doc
#--------------------------------------------------------------------------
#
def list_pilot_uids(self, pilot_manager_uid=None):
""" Lists all pilots for a pilot manager.
"""
if self._s is None:
raise Exception("No active session.")
pilot_ids = []
if pilot_manager_uid is not None:
cursor = self._p.find({"pilotmanager": pilot_manager_uid})
else:
cursor = self._p.find()
# cursor -> dict
for obj in cursor:
pilot_ids.append(str(obj['_id']))
return pilot_ids
#--------------------------------------------------------------------------
#
def get_pilots(self, pilot_manager_id=None, pilot_ids=None):
""" Get a pilot
"""
if self._s is None:
raise Exception("No active session.")
if pilot_manager_id is None and pilot_ids is None:
raise Exception(
"pilot_manager_id and pilot_ids can't both be None.")
if pilot_ids is None:
cursor = self._p.find({"pilotmanager": pilot_manager_id})
else:
if not isinstance(pilot_ids, list):
pilot_ids = [pilot_ids]
# convert ids to object ids
pilot_oid = []
for pid in pilot_ids:
pilot_oid.append(pid)
cursor = self._p.find({"_id": {"$in": pilot_oid}})
pilots_json = []
for obj in cursor:
pilots_json.append(obj)
return pilots_json
#--------------------------------------------------------------------------
#
def send_command_to_pilot(self, cmd, arg=None, pilot_manager_id=None, pilot_ids=None):
""" Send a command to one or more pilots.
"""
if self._s is None:
raise Exception("No active session.")
if pilot_manager_id is None and pilot_ids is None:
raise Exception("Either Pilot Manager or Pilot needs to be specified.")
if pilot_manager_id is not None and pilot_ids is not None:
raise Exception("Pilot Manager and Pilot can not be both specified.")
command = {COMMAND_FIELD: {COMMAND_TYPE: cmd,
COMMAND_ARG: arg,
COMMAND_TIME: datetime.datetime.utcnow()
}}
if pilot_ids is None:
# send command to all pilots that are known to the
# pilot manager.
self._p.update(
{"pilotmanager": pilot_manager_id},
{"$push": command},
multi=True
)
else:
if not isinstance(pilot_ids, list):
pilot_ids = [pilot_ids]
# send command to selected pilots if pilot_ids are
# specified convert ids to object ids
for pid in pilot_ids:
self._p.update(
{"_id": pid},
{"$push": command}
)
#--------------------------------------------------------------------------
#
def publish_compute_pilot_callback_history(self, pilot_uid, callback_history):
if self._s is None:
raise Exception("No active session.")
self._p.update({"_id": pilot_uid},
{"$set": {"callbackhistory": callback_history}})
#--------------------------------------------------------------------------
#
def get_compute_units(self, unit_manager_id, unit_ids=None):
""" Get yerself a bunch of compute units.
"""
if self._s is None:
raise Exception("No active session.")
if unit_ids is None:
cursor = self._w.find(
{"unitmanager": unit_manager_id}
)
else:
# convert ids to object ids
unit_oid = []
for wid in unit_ids:
unit_oid.append(wid)
cursor = self._w.find(
{"_id": {"$in": unit_oid},
"unitmanager": unit_manager_id}
)
units_json = []
for obj in cursor:
units_json.append(obj)
return units_json
#--------------------------------------------------------------------------
#
def change_compute_units (self, filter_dict, set_dict, push_dict):
"""Update the state and the log of all compute units belonging to
a specific pilot.
"""
ts = datetime.datetime.utcnow()
if self._s is None:
raise Exception("No active session.")
self._w.update(spec = filter_dict,
document = {"$set" : set_dict,
"$push": push_dict},
multi = True)
#--------------------------------------------------------------------------
#
def set_compute_unit_state(self, unit_ids, state, log, src_states=None):
"""
Update the state and the log of one or more ComputeUnit(s).
If src_states is given, this will only update units which are currently
in those src states.
"""
ts = datetime.datetime.utcnow()
if not unit_ids :
return
if self._s is None:
raise Exception("No active session.")
# Make sure we work on a list.
if not isinstance(unit_ids, list):
unit_ids = [unit_ids]
if src_states and not isinstance (src_states, list) :
src_states = [src_states]
bulk = self._w.initialize_ordered_bulk_op ()
for uid in unit_ids :
if src_states :
bulk.find ({"_id" : uid,
"state" : {"$in" : src_states} }) \
.update ({"$set" : {"state": state},
"$push" : {"statehistory": {"state": state, "timestamp": ts}},
"$push" : {"log" : {"message": log, "timestamp": ts}}})
else :
bulk.find ({"_id" : uid}) \
.update ({"$set" : {"state": state},
"$push" : {"statehistory": {"state": state, "timestamp": ts}},
"$push" : {"log" : {"message": log, "timestamp": ts}}})
result = bulk.execute()
# TODO: log result.
# WHY DON'T WE HAVE A LOGGER HERE?
#--------------------------------------------------------------------------
#
def get_compute_unit_states(self, unit_manager_id, unit_ids=None):
""" Get yerself a bunch of compute units.
"""
if self._s is None:
raise Exception("No active session.")
if unit_ids is None:
cursor = self._w.find(
{"unitmanager": unit_manager_id},
{"state": 1}
)
else:
# convert ids to object ids
unit_oid = []
for wid in unit_ids:
unit_oid.append(wid)
cursor = self._w.find(
{"_id": {"$in": unit_oid},
"unitmanager": unit_manager_id},
{"state": 1}
)
unit_states = []
for obj in cursor:
unit_states.append(obj['state'])
return unit_states
#--------------------------------------------------------------------------
#
def insert_unit_manager(self, scheduler, input_transfer_workers, output_transfer_workers):
""" Adds a unit managers to the list of unit managers.
Unit manager IDs are just kept for book-keeping.
"""
if self._s is None:
raise Exception("No active session.")
result = self._um.insert(
{"scheduler": scheduler,
"input_transfer_workers": input_transfer_workers,
"output_transfer_workers": output_transfer_workers }
)
# return the object id as a string
return str(result)
#--------------------------------------------------------------------------
#
def get_unit_manager(self, unit_manager_id):
""" Get a unit manager.
"""
if self._s is None:
raise DBException("No active session.")
cursor = self._um.find({"_id": unit_manager_id})
if cursor.count() != 1:
msg = "No unit manager with id %s found in DB." % unit_manager_id
raise DBException(msg=msg)
try:
return cursor[0]
except:
msg = "No UnitManager with id '%s' found in database." % unit_manager_id
raise DBException(msg=msg)
#--------------------------------------------------------------------------
#
def get_pilot_manager(self, pilot_manager_id):
""" Get a unit manager.
"""
if self._s is None:
raise DBException("No active session.")
cursor = self._pm.find({"_id": pilot_manager_id})
try:
return cursor[0]
except:
msg = "No pilot manager with id '%s' found in DB." % pilot_manager_id
raise DBException(msg=msg)
#--------------------------------------------------------------------------
#
def list_unit_manager_uids(self):
""" Lists all pilot managers.
"""
if self._s is None:
raise Exception("No active session.")
unit_manager_uids = []
cursor = self._um.find()
# cursor -> dict
for obj in cursor:
unit_manager_uids.append(str(obj['_id']))
return unit_manager_uids
#--------------------------------------------------------------------------
#
def unit_manager_add_pilots(self, unit_manager_id, pilot_ids):
""" Adds a pilot from a unit manager.
"""
if self._s is None:
raise Exception("No active session.")
for pilot_id in pilot_ids:
self._p.update({"_id": pilot_id},
{"$set": {"unitmanager": unit_manager_id}},
True)
#--------------------------------------------------------------------------
#
def unit_manager_remove_pilots(self, unit_manager_id, pilot_ids):
""" Removes one or more pilots from a unit manager.
"""
if self._s is None:
raise Exception("No active session.")
# Add the ids to the pilot's queue
for pilot_id in pilot_ids:
self._p.update({"_id": pilot_id},
{"$set": {"unitmanager": None}}, True)
#--------------------------------------------------------------------------
#
def unit_manager_list_pilots(self, unit_manager_uid):
""" Lists all pilots associated with a unit manager.
"""
if self._s is None:
raise Exception("No active session.")
cursor = self._p.find({"unitmanager": unit_manager_uid})
# cursor -> dict
pilot_ids = []
for obj in cursor:
pilot_ids.append(str(obj['_id']))
return pilot_ids
#--------------------------------------------------------------------------
#
def unit_manager_list_compute_units(self, unit_manager_uid, pilot_uid=None):
""" Lists all compute units associated with a unit manager.
"""
# FIXME: why is this call not updating local unit state?
if self._s is None:
raise Exception("No active session.")
if pilot_uid :
cursor = self._w.find({"unitmanager": unit_manager_uid,
"pilot" : pilot_uid})
else :
cursor = self._w.find({"unitmanager": unit_manager_uid})
# cursor -> dict
unit_ids = []
for obj in cursor:
unit_ids.append(str(obj['_id']))
return unit_ids
#--------------------------------------------------------------------------
#
def pilot_list_compute_units(self, pilot_uid):
""" Lists all compute units associated with a unit manager.
"""
# FIXME: why is this call not updating local unit state?
if self._s is None:
raise Exception("No active session.")
cursor = self._w.find({"pilot" : pilot_uid})
# cursor -> dict
unit_ids = []
for obj in cursor:
unit_ids.append(str(obj['_id']))
return unit_ids
#--------------------------------------------------------------------------
#
def assign_compute_units_to_pilot(self, units, pilot_uid, pilot_sandbox):
"""Assigns one or more compute units to a pilot.
"""
if not units :
return
if self._s is None:
raise Exception("No active session.")
# Make sure we work on a list.
if not isinstance(units, list):
units = [units]
bulk = self._w.initialize_ordered_bulk_op ()
for unit in units :
bulk.find ({"_id" : unit.uid}) \
.update ({"$set": {"description" : unit.description.as_dict(),
"pilot" : pilot_uid,
"pilot_sandbox" : pilot_sandbox,
"sandbox" : unit.sandbox,
"FTW_Input_Status": unit.FTW_Input_Status,
"FTW_Input_Directives": unit.FTW_Input_Directives,
"Agent_Input_Status": unit.Agent_Input_Status,
"Agent_Input_Directives": unit.Agent_Input_Directives,
"FTW_Output_Status": unit.FTW_Output_Status,
"FTW_Output_Directives": unit.FTW_Output_Directives,
"Agent_Output_Status": unit.Agent_Output_Status,
"Agent_Output_Directives": unit.Agent_Output_Directives
}})
result = bulk.execute()
# TODO: log result.
# WHY DON'T WE HAVE A LOGGER HERE?
#--------------------------------------------------------------------------
#
def publish_compute_unit_callback_history(self, unit_uid, callback_history):
if self._s is None:
raise Exception("No active session.")
self._w.update({"_id": unit_uid},
{"$set": {"callbackhistory": callback_history}})
#--------------------------------------------------------------------------
#
def insert_compute_units(self, unit_manager_uid, units, unit_log):
""" Adds one or more compute units to the database and sets their state
to 'PENDING'.
"""
if self._s is None:
raise Exception("No active session.")
# Make sure we work on a list.
if not isinstance(units, list):
units = [units]
unit_docs = list()
results = dict()
for unit in units:
ts = datetime.datetime.utcnow()
unit_json = {
"_id": unit.uid,
"description": unit.description.as_dict(),
"restartable": unit.description.restartable,
"unitmanager": unit_manager_uid,
"pilot": None,
"pilot_sandbox": None,
"state": unit._local_state,
"statehistory": [{"state": unit._local_state, "timestamp": ts}],
"submitted": datetime.datetime.utcnow(),
"started": None,
"finished": None,
"exec_locs": None,
"exit_code": None,
"sandbox": None,
"stdout": None,
"stderr": None,
"log": unit_log,
"FTW_Input_Status": None,
"FTW_Input_Directives": None,
"Agent_Input_Status": None,
"Agent_Input_Directives": None,
"FTW_Output_Status": None,
"FTW_Output_Directives": None,
"Agent_Output_Status": None,
"Agent_Output_Directives": None
}
unit_docs.append(unit_json)
results[unit.uid] = unit_json
unit_uids = self._w.insert(unit_docs)
assert len(unit_docs) == len(unit_uids)
assert len(results) == len(unit_uids)
return results
| mit | -2,865,921,344,388,207,000 | 32.546307 | 100 | 0.453329 | false |
maxwward/SCOPEBak | askbot/migrations/0039_populate_tag_filter_strategies.py | 2 | 27664 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from askbot import const
class Migration(DataMigration):
def forwards(self, orm):
"""populate email and display filter strategies"""
for user in orm['auth.User'].objects.all():
old_email_setting = user.tag_filter_setting
if old_email_setting == 'ignored':
user.email_tag_filter_strategy = const.EXCLUDE_IGNORED
elif old_email_setting == 'interesting':
user.email_tag_filter_strategy = const.INCLUDE_INTERESTING
hide_ignored = user.hide_ignored_exercises
if hide_ignored == True:
user.display_tag_filter_strategy = const.EXCLUDE_IGNORED
user.save()
def backwards(self, orm):
"""populate ``User.tag_filter_setting``
and
``User.hide_ignored_exercises
"""
for user in orm['auth.User'].objects.all():
email_strategy = user.email_tag_filter_strategy
if email_strategy == const.EXCLUDE_IGNORED:
user.tag_filter_setting = 'ignored'
elif email_strategy == const.INCLUDE_INTERESTING:
user.tag_filter_setting = 'interesting'
if user.display_tag_filter_strategy == const.EXCLUDE_IGNORED:
user.hide_ignored_exercises = True
user.save()
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'through': "'ActivityAuditStatus'", 'to': "orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousproblem': {
'Meta': {'object_name': 'AnonymousProblem'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousexercise': {
'Meta': {'object_name': 'AnonymousExercise'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.problemrevision': {
'Meta': {'ordering': "('-revision',)", 'object_name': 'ProblemRevision', 'db_table': "u'problem_revision'"},
'problem': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Problem']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problemrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'through': "'Award'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoriteexercise': {
'Meta': {'object_name': 'FavoriteExercise', 'db_table': "u'favorite_exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_exercises'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.exercise': {
'Meta': {'object_name': 'Exercise', 'db_table': "u'exercise'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'problem_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'problem_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercises'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_exercises'", 'through': "'FavoriteExercise'", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_exercises'", 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_exercises'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_exercises'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'exercises'", 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.exerciserevision': {
'Meta': {'ordering': "('-revision',)", 'object_name': 'ExerciseRevision', 'db_table': "u'exercise_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exerciserevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Exercise']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'askbot.exerciseview': {
'Meta': {'object_name': 'ExerciseView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Exercise']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'exercise_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'exercise': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Exercise']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'has_custom_avatar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_ignored_exercises': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'exercises_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'tag_filter_setting': ('django.db.models.fields.CharField', [], {'default': "'ignored'", 'max_length': '16'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 | -4,055,692,490,258,369,500 | 82.075075 | 185 | 0.557909 | false |
grandmasterchef/WhatManager2 | WhatManager2/settings.example.py | 1 | 9362 | import os
import djcelery
djcelery.setup_loader()
WHAT_USER_ID = 123456
WHAT_USERNAME = 'your what.cd username'
WHAT_PASSWORD = 'your what.cd password'
# How frequently your profile will be stored, in seconds
WHAT_PROFILE_SNAPSHOT_INTERVAL = 10 * 60
# What part of your disk is guaranteed left empty by WM
MIN_FREE_DISK_SPACE = 0.10
# Under what ratio queued torrents won't be downloaded.
MIN_WHAT_RATIO = 1.3
# Whether the frequent sync will make sure ReleaseInfo is there. Leave False.
SYNC_SYNCS_FILES = False
# You might set this to ssl.what.cd is what.cd has a long downtime, but ssl is up.
WHAT_CD_DOMAIN = 'what.cd'
WHAT_UPLOAD_URL = 'https://{0}/upload.php'.format(WHAT_CD_DOMAIN)
# Only for uploading
WHAT_ANNOUNCE = 'http://tracker.what.cd:34000/SET THIS TO YOUR ANNOUNCE/announce'
# Set this to something reasonable that only you know.
TRANSMISSION_PASSWORD = '9dqQQ2WW'
# Where Transmission system files will go
TRANSMISSION_FILES_ROOT = '/mnt/tank/Torrent/transmission-daemon'
# Transmission's ipv4 bind address. Leave as is or changed to specific ip.
TRANSMISSION_BIND_HOST = '0.0.0.0'
# You only need these if you are uploading books
WHATIMG_USERNAME = 'whatimg username'
WHATIMG_PASSWORD = 'whatimg password'
# Settings for the emails that WM will send you if there is a freeleech. By default, you don't get
# emails. If you want emails, set FREELEECH_HOSTNAME to your machine's hostname. These settings are
# for gmail, but any provider will work
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'username at gmail'
EMAIL_HOST_PASSWORD = 'password at gmail'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
FREELEECH_EMAIL_FROM = u'your own [email protected]'
FREELEECH_EMAIL_TO = u'wherever you want to receive [email protected]'
# Less than this and you won't get an email.
FREELEECH_EMAIL_THRESHOLD = 2
# The script will only send emails if the current hostname is equals this.
FREELEECH_HOSTNAME = u'NO_EMAILS'
# You only need to set that if you'll be using the userscripts. Do not put a trailing slash
USERSCRIPT_WM_ROOT = 'http://hostname.com'
# You only need to set these if you are running the transcoder
TRANSCODER_ADD_TORRENT_URL = 'http://hostname.com/json/add_torrent'
TRANSCODER_HTTP_USERNAME = 'http username'
TRANSCODER_HTTP_PASSWORD = 'http password'
TRANSCODER_TEMP_DIR = '/mnt/bulk/temp/whatup.celery.{0}'.format(os.getpid())
TRANSCODER_ERROR_OUTPUT = '/mnt/bulk/temp/what_error.html'
TRANSCODER_FORMATS = ['V0', '320'] # You can also add V2
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
# You only need to set these if you are running transmission_files_sync
FILES_SYNC_HTTP_USERNAME = 'username'
FILES_SYNC_HTTP_PASSWORD = 'password'
FILES_SYNC_SSH = '[email protected]'
FILES_SYNC_WM_ROOT = 'https://host.com/'
# Used permissions
# home_view_logentry - Viewing logs
# home_add_whattorrent - Adding torrents
# home_view_whattorrent - Viewing torrents
# what_transcode.add_transcoderequest - Adding transcode requests
# home.run_checks = Running checks
# home.view_transinstance_stats - Realtime stats viewing
# queue.view_queueitem - Viewing the queue
# queue.add_queueitem - Add to the queue
# what_profile.view_whatusersnapshot - Viewing the user profile
# home.download_whattorrent - Downloading torrent zips
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'what_manager2', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': '',
'HOST': '127.0.0.1',
# Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Login URL
LOGIN_URL = '/user/login'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'FvvhvjFKYRxKR9Y7xSt883Ww'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'WhatManager2.middleware.HttpBasicAuthMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'WhatManager2.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'WhatManager2.wsgi.application'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\', '/'),)
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Library apps
'djcelery',
'bootstrapform',
# WhatManager2 apps
'WhatManager2',
'login',
'home',
'what_json',
'download',
'queue',
'what_profile',
'what_transcode',
'books',
'bibliotik',
'bibliotik_json',
'what_meta',
'whatify',
'myanonamouse',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'WhatManager2.context_processors.context_processor',
)
DATETIME_FORMAT = 'Y-m-d H:i:s'
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'LOCATION': 'wm-cache'
# }
# }
| mit | -1,908,932,392,732,087,800 | 33.546125 | 99 | 0.711173 | false |
jrydberg/moto | tests/test_ec2/test_internet_gateways.py | 19 | 7539 | from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import re
import boto
from boto.exception import EC2ResponseError
import sure # noqa
from moto import mock_ec2
VPC_CIDR="10.0.0.0/16"
BAD_VPC="vpc-deadbeef"
BAD_IGW="igw-deadbeef"
@mock_ec2
def test_igw_create():
""" internet gateway create """
conn = boto.connect_vpc('the_key', 'the_secret')
conn.get_all_internet_gateways().should.have.length_of(0)
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
igw.id.should.match(r'igw-[0-9a-f]+')
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_attach():
""" internet gateway attach """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments[0].vpc_id.should.be.equal(vpc.id)
@mock_ec2
def test_igw_attach_bad_vpc():
""" internet gateway fail to attach w/ bad vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_attach_twice():
""" internet gateway fail to attach twice """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Resource.AlreadyAssociated')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach():
""" internet gateway detach"""
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
conn.detach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_detach_wrong_vpc():
""" internet gateway fail to detach w/ wrong vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_invalid_vpc():
""" internet gateway fail to detach w/ invalid vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_unattached():
""" internet gateway fail to detach unattached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_delete():
""" internet gateway delete"""
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc(VPC_CIDR)
conn.get_all_internet_gateways().should.have.length_of(0)
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
conn.delete_internet_gateway(igw.id)
conn.get_all_internet_gateways().should.have.length_of(0)
@mock_ec2
def test_igw_delete_attached():
""" internet gateway fail to delete attached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.delete_internet_gateway(igw.id)
cm.exception.code.should.equal('DependencyViolation')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_desribe():
""" internet gateway fetch by id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
igw_by_search = conn.get_all_internet_gateways([igw.id])[0]
igw.id.should.equal(igw_by_search.id)
@mock_ec2
def test_igw_desribe_bad_id():
""" internet gateway fail to fetch by bad id """
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_internet_gateways([BAD_IGW])
cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_filter_by_vpc_id():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.vpc-id": vpc.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_tags():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
igw1.add_tag("tests", "yes")
result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_internet_gateway_id():
""" internet gateway filter by internet gateway id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
result = conn.get_all_internet_gateways(filters={"internet-gateway-id": igw1.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_attachment_state():
""" internet gateway filter by attachment state """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.state": "available"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
| apache-2.0 | 660,586,028,355,534,800 | 32.959459 | 86 | 0.689216 | false |
arborh/tensorflow | tensorflow/python/feature_column/utils.py | 30 | 6134 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines functions common to multiple feature column files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
def sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):
"""Returns a [batch_size] Tensor with per-example sequence length."""
with ops.name_scope(None, 'sequence_length') as name_scope:
row_ids = sp_tensor.indices[:, 0]
column_ids = sp_tensor.indices[:, 1]
# Add one to convert column indices to element length
column_ids += array_ops.ones_like(column_ids)
# Get the number of elements we will have per example/row
seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids)
# The raw values are grouped according to num_elements;
# how many entities will we have after grouping?
# Example: orig tensor [[1, 2], [3]], col_ids = (0, 1, 1),
# row_ids = (0, 0, 1), seq_length = [2, 1]. If num_elements = 2,
# these will get grouped, and the final seq_length is [1, 1]
seq_length = math_ops.cast(
math_ops.ceil(seq_length / num_elements), dtypes.int64)
# If the last n rows do not have ids, seq_length will have shape
# [batch_size - n]. Pad the remaining values with zeros.
n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]
padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)
return array_ops.concat([seq_length, padding], axis=0, name=name_scope)
def assert_string_or_int(dtype, prefix):
if (dtype != dtypes.string) and (not dtype.is_integer):
raise ValueError(
'{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
def assert_key_is_string(key):
if not isinstance(key, six.string_types):
raise ValueError(
'key must be a string. Got: type {}. Given key: {}.'.format(
type(key), key))
def check_default_value(shape, default_value, dtype, key):
"""Returns default value as tuple if it's valid, otherwise raises errors.
This function verifies that `default_value` is compatible with both `shape`
and `dtype`. If it is not compatible, it raises an error. If it is compatible,
it casts default_value to a tuple and returns it. `key` is used only
for error message.
Args:
shape: An iterable of integers specifies the shape of the `Tensor`.
default_value: If a single value is provided, the same value will be applied
as the default value for every item. If an iterable of values is
provided, the shape of the `default_value` should be equal to the given
`shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
key: Column name, used only for error messages.
Returns:
A tuple which will be used as default value.
Raises:
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
if default_value is None:
return None
if isinstance(default_value, int):
return _create_tuple(shape, default_value)
if isinstance(default_value, float) and dtype.is_floating:
return _create_tuple(shape, default_value)
if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays
default_value = default_value.tolist()
if nest.is_sequence(default_value):
if not _is_shape_and_default_value_compatible(default_value, shape):
raise ValueError(
'The shape of default_value must be equal to given shape. '
'default_value: {}, shape: {}, key: {}'.format(
default_value, shape, key))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = all(
isinstance(v, int) for v in nest.flatten(default_value))
is_list_has_float = any(
isinstance(v, float) for v in nest.flatten(default_value))
if is_list_all_int:
return _as_tuple(default_value)
if is_list_has_float and dtype.is_floating:
return _as_tuple(default_value)
raise TypeError('default_value must be compatible with dtype. '
'default_value: {}, dtype: {}, key: {}'.format(
default_value, dtype, key))
def _create_tuple(shape, value):
"""Returns a tuple with given shape and filled with value."""
if shape:
return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])
return value
def _as_tuple(value):
if not nest.is_sequence(value):
return value
return tuple([_as_tuple(v) for v in value])
def _is_shape_and_default_value_compatible(default_value, shape):
"""Verifies compatibility of shape and default_value."""
# Invalid condition:
# * if default_value is not a scalar and shape is empty
# * or if default_value is an iterable and shape is not empty
if nest.is_sequence(default_value) != bool(shape):
return False
if not shape:
return True
if len(default_value) != shape[0]:
return False
for i in range(shape[0]):
if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):
return False
return True
| apache-2.0 | -4,958,732,391,228,748,000 | 38.831169 | 80 | 0.682263 | false |
hasgeek/lastuser | lastuserapp/__init__.py | 1 | 3065 | # -*- coding: utf-8 -*-
from flask import Flask
from flask_migrate import Migrate
from baseframe import Version, assets, baseframe
import coaster.app
from ._version import __version__
import lastuser_core # isort:skip
import lastuser_oauth # isort:skip
import lastuser_ui # isort:skip
from lastuser_core import login_registry # isort:skip
from lastuser_core.models import db # isort:skip
from lastuser_oauth import providers, rq # isort:skip
version = Version(__version__)
app = Flask(__name__, instance_relative_config=True)
app.register_blueprint(lastuser_core.lastuser_core)
app.register_blueprint(lastuser_oauth.lastuser_oauth)
app.register_blueprint(lastuser_ui.lastuser_ui)
from . import views # NOQA # isort:skip
assets['lastuser-oauth.js'][version] = (lastuser_oauth.lastuser_oauth_js,)
assets['lastuser-oauth.css'][version] = lastuser_oauth.lastuser_oauth_css
# Configure the app
coaster.app.init_app(app)
db.init_app(app)
db.app = app # To make it work without an app context
migrate = Migrate(app, db)
rq.init_app(app) # Pick up RQ configuration from the app
baseframe.init_app(
app,
requires=['lastuser-oauth'],
ext_requires=['baseframe-mui', 'jquery.cookie', 'timezone'],
theme='mui',
asset_modules=('baseframe_private_assets',),
)
lastuser_oauth.lastuser_oauth.init_app(app)
lastuser_oauth.mailclient.mail.init_app(app)
lastuser_oauth.views.login.oid.init_app(app)
# Register some login providers
if app.config.get('OAUTH_TWITTER_KEY') and app.config.get('OAUTH_TWITTER_SECRET'):
login_registry['twitter'] = providers.TwitterProvider(
'twitter',
'Twitter',
at_login=True,
priority=True,
icon='twitter',
key=app.config['OAUTH_TWITTER_KEY'],
secret=app.config['OAUTH_TWITTER_SECRET'],
access_key=app.config.get('OAUTH_TWITTER_ACCESS_KEY'),
access_secret=app.config.get('OAUTH_TWITTER_ACCESS_SECRET'),
)
if app.config.get('OAUTH_GOOGLE_KEY') and app.config.get('OAUTH_GOOGLE_SECRET'):
login_registry['google'] = providers.GoogleProvider(
'google',
'Google',
client_id=app.config['OAUTH_GOOGLE_KEY'],
secret=app.config['OAUTH_GOOGLE_SECRET'],
scope=app.config.get('OAUTH_GOOGLE_SCOPE', ['email', 'profile']),
at_login=True,
priority=True,
icon='google',
)
if app.config.get('OAUTH_LINKEDIN_KEY') and app.config.get('OAUTH_LINKEDIN_SECRET'):
login_registry['linkedin'] = providers.LinkedInProvider(
'linkedin',
'LinkedIn',
at_login=True,
priority=False,
icon='linkedin',
key=app.config['OAUTH_LINKEDIN_KEY'],
secret=app.config['OAUTH_LINKEDIN_SECRET'],
)
if app.config.get('OAUTH_GITHUB_KEY') and app.config.get('OAUTH_GITHUB_SECRET'):
login_registry['github'] = providers.GitHubProvider(
'github',
'GitHub',
at_login=True,
priority=False,
icon='github',
key=app.config['OAUTH_GITHUB_KEY'],
secret=app.config['OAUTH_GITHUB_SECRET'],
)
| bsd-2-clause | -6,568,382,015,279,695,000 | 32.681319 | 84 | 0.676672 | false |
plotly/plotly.py | packages/python/plotly/plotly/io/_utils.py | 1 | 1472 | from __future__ import absolute_import
import plotly
import plotly.graph_objs as go
from plotly.offline import get_plotlyjs_version
def validate_coerce_fig_to_dict(fig, validate):
from plotly.basedatatypes import BaseFigure
if isinstance(fig, BaseFigure):
fig_dict = fig.to_dict()
elif isinstance(fig, dict):
if validate:
# This will raise an exception if fig is not a valid plotly figure
fig_dict = plotly.graph_objs.Figure(fig).to_plotly_json()
else:
fig_dict = fig
elif hasattr(fig, "to_plotly_json"):
fig_dict = fig.to_plotly_json()
else:
raise ValueError(
"""
The fig parameter must be a dict or Figure.
Received value of type {typ}: {v}""".format(
typ=type(fig), v=fig
)
)
return fig_dict
def validate_coerce_output_type(output_type):
if output_type == "Figure" or output_type == go.Figure:
cls = go.Figure
elif output_type == "FigureWidget" or (
hasattr(go, "FigureWidget") and output_type == go.FigureWidget
):
cls = go.FigureWidget
else:
raise ValueError(
"""
Invalid output type: {output_type}
Must be one of: 'Figure', 'FigureWidget'"""
)
return cls
def plotly_cdn_url(cdn_ver=get_plotlyjs_version()):
"""Return a valid plotly CDN url."""
return "https://cdn.plot.ly/plotly-{cdn_ver}.min.js".format(cdn_ver=cdn_ver,)
| mit | -6,869,936,403,315,884,000 | 28.44 | 81 | 0.613451 | false |
WASPACDC/hmdsm.repository | plugin.program.addon_activador-1.0.0/activador.py | 3 | 1296 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, os
from sqlite3 import dbapi2 as db_lib
conn =db_lib.connect(os.path.join(xbmc.translatePath('special://profile/Database'),'Addons27.db'))
conn.text_factory = str
def get_kodi_version():
try:
return float(xbmc.getInfoLabel('System.BuildVersion').split('-')[0])
except:
return float(xbmc.getInfoLabel('System.BuildVersion').split('.')[0])
def check_updates():
xbmc.executebuiltin('XBMC.UpdateLocalAddons()')
xbmc.executebuiltin('XBMC.UpdateAddonRepos()')
def set_all_enable():
conn.executemany('update installed set enabled=1 WHERE addonID = (?)',((val,) for val in os.listdir(xbmc.translatePath(os.path.join('special://home','addons')))))
conn.commit()
if get_kodi_version() > 16.9 :
dp = xbmcgui.DialogProgress()
dp.create('Activar os add-ons!','Por favor aguarde ...','')
dp.update(0)
check_updates()
xbmc.sleep(2000)
dp.update(30)
set_all_enable()
xbmc.sleep(2000)
dp.update(60)
check_updates()
xbmc.sleep(2000)
dp.update(100)
xbmc.sleep(2000)
dp.close()
xbmcgui.Dialog().ok('FEITO!', 'Todos os addons encontrados foram ativados!' )
else:
xbmcgui.Dialog().ok('AVISO!', 'Apenas para uso com as versões Kodi 17.0 +!' ) | gpl-2.0 | 5,846,601,725,793,439,000 | 27.173913 | 166 | 0.659459 | false |
pombreda/formalchemy | formalchemy/helpers.py | 2 | 6308 | """
A small module to wrap WebHelpers in FormAlchemy.
"""
from webhelpers.html.tags import text
from webhelpers.html.tags import hidden
from webhelpers.html.tags import password
from webhelpers.html.tags import textarea
from webhelpers.html.tags import checkbox
from webhelpers.html.tags import radio
from webhelpers.html import tags
from webhelpers.html import HTML, literal
def html_escape(s):
return HTML(s)
escape_once = html_escape
def content_tag(name, content, **options):
"""
Create a tag with content
Takes the same keyword args as ``tag``
Examples::
>>> print content_tag("p", "Hello world!")
<p>Hello world!</p>
>>> print content_tag("div", content_tag("p", "Hello world!"), class_="strong")
<div class="strong"><p>Hello world!</p></div>
"""
if content is None:
content = ''
tag = HTML.tag(name, _closed=False, **options) + HTML(content) + literal('</%s>' % name)
return tag
def text_field(name, value=None, **options):
"""
Creates a standard text field.
``value`` is a string, the content of the text field
Options:
* ``disabled`` - If set to True, the user will not be able to use this input.
* ``size`` - The number of visible characters that will fit in the input.
* ``maxlength`` - The maximum number of characters that the browser will allow the user to enter.
Remaining keyword options will be standard HTML options for the tag.
"""
_update_fa(options, name)
return text(name, value=value, **options)
def password_field(name="password", value=None, **options):
"""
Creates a password field
Takes the same options as text_field
"""
_update_fa(options, name)
return password(name, value=value, **options)
def text_area(name, content='', **options):
"""
Creates a text input area.
Options:
* ``size`` - A string specifying the dimensions of the textarea.
Example::
>>> print text_area("Body", '', size="25x10")
<textarea cols="25" id="Body" name="Body" rows="10"></textarea>
"""
_update_fa(options, name)
if 'size' in options:
options["cols"], options["rows"] = options["size"].split("x")
del options['size']
return textarea(name, content=content, **options)
def check_box(name, value="1", checked=False, **options):
"""
Creates a check box.
"""
_update_fa(options, name)
if checked:
options["checked"] = "checked"
return tags.checkbox(name, value=value, **options)
def hidden_field(name, value=None, **options):
"""
Creates a hidden field.
Takes the same options as text_field
"""
_update_fa(options, name)
return tags.hidden(name, value=value, **options)
def file_field(name, value=None, **options):
"""
Creates a file upload field.
If you are using file uploads then you will also need to set the multipart option for the form.
Example::
>>> print file_field('myfile')
<input id="myfile" name="myfile" type="file" />
"""
_update_fa(options, name)
return tags.file(name, value=value, type="file", **options)
def radio_button(name, *args, **options):
_update_fa(options, name)
return radio(name, *args, **options)
def tag_options(**options):
strip_unders(options)
if 'options' in options:
del options['options']
cleaned_options = convert_booleans(dict([(x, y) for x, y in options.iteritems() if y is not None]))
optionlist = ['%s="%s"' % (x, escape_once(y)) for x, y in cleaned_options.iteritems()]
optionlist.sort()
if optionlist:
return ' ' + ' '.join(optionlist)
else:
return ''
def tag(name, open=False, **options):
"""
Returns an XHTML compliant tag of type ``name``.
``open``
Set to True if the tag should remain open
All additional keyword args become attribute/value's for the tag. To pass in Python
reserved words, append _ to the name of the key. For attributes with no value (such as
disabled and readonly), a value of True is permitted.
Examples::
>>> print tag("br")
<br />
>>> print tag("br", True)
<br>
>>> print tag("input", type="text")
<input type="text" />
>>> print tag("input", type='text', disabled='disabled')
<input disabled="disabled" type="text" />
"""
return HTML.tag(name, _closed=not open, **options)
def label(value, **kwargs):
"""
Return a label tag
>>> print label('My label', for_='fieldname')
<label for="fieldname">My label</label>
"""
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
return tag('label', open=True, **kwargs) + literal(value) + literal('</label>')
def select(name, selected, select_options, **attrs):
"""
Creates a dropdown selection box::
<select id="people" name="people">
<option value="George">George</option>
</select>
"""
if 'options' in attrs:
del attrs['options']
if select_options and isinstance(select_options[0], (list, tuple)):
select_options = [(v, k) for k, v in select_options]
_update_fa(attrs, name)
return tags.select(name, selected, select_options, **attrs)
def options_for_select(container, selected=None):
import warnings
warnings.warn(DeprecationWarning('options_for_select will be removed in FormAlchemy 2.5'))
if hasattr(container, 'values'):
container = container.items()
if not isinstance(selected, (list, tuple)):
selected = (selected,)
options = []
for elem in container:
if isinstance(elem, (list, tuple)):
name, value = elem
n = html_escape(name)
v = html_escape(value)
else :
name = value = elem
n = v = html_escape(elem)
if value in selected:
options.append('<option value="%s" selected="selected">%s</option>' % (v, n))
else :
options.append('<option value="%s">%s</option>' % (v, n))
return "\n".join(options)
def _update_fa(attrs, name):
if 'id' not in attrs:
attrs['id'] = name
if 'options' in attrs:
del attrs['options']
if __name__=="__main__":
import doctest
doctest.testmod()
| mit | -129,308,596,076,430,930 | 28.339535 | 103 | 0.613348 | false |
NeCTAR-RC/external_naginator | external_naginator/__init__.py | 1 | 28225 | """
Generate all the nagios configuration files based on puppetdb information.
"""
import os
import sys
import grp
import pdb
import stat
import logging
import configparser
import filecmp
import shutil
import tempfile
import subprocess
import traceback
from os import path
from io import StringIO
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
from pypuppetdb import connect
LOG = logging.getLogger(__name__)
@contextmanager
def temporary_dir(*args, **kwds):
name = tempfile.mkdtemp(*args, **kwds)
set_permissions(name, stat.S_IRGRP + stat.S_IXGRP)
try:
yield name
finally:
shutil.rmtree(name)
@contextmanager
def nagios_config(config_dirs):
"""
.. function:: nagios_config(config_dirs)
Combine the config_dirs with builtin nagios commands and nagios-plugins
commands as a temporary file.
:param config_dirs: name(s) of directory/ies to be tested
:type config_dirs: list
:rtype: str
"""
temp_dir = tempfile.mkdtemp()
set_permissions(temp_dir, stat.S_IRGRP + stat.S_IWGRP + stat.S_IXGRP)
with tempfile.NamedTemporaryFile(mode="w") as config:
set_permissions(config.name, stat.S_IRGRP)
config_lines = ["cfg_file=/etc/nagios4/commands.cfg",
"cfg_dir=/etc/nagios-plugins/config",
"check_result_path=%s" % temp_dir]
config_lines.extend(["cfg_dir=%s" % s for s in config_dirs])
config.write("\n".join(config_lines))
config.flush()
try:
yield config.name
finally:
shutil.rmtree(temp_dir)
def nagios_verify(config_dirs, config_file=None):
with nagios_config(config_dirs) as tmp_config_file:
LOG.info("Validating Nagios config %s" % ', '.join(config_dirs))
p = subprocess.Popen(['/usr/sbin/nagios4', '-v',
config_file or tmp_config_file],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf8')
output, err = p.communicate()
return_code = p.returncode
for line in output.split('\n'):
LOG.debug(line)
for line in err.split('\n'):
LOG.debug(line)
if return_code > 0:
print(output)
raise Exception("Nagios validation failed.")
def nagios_restart():
"""Restart Nagios"""
LOG.info("Restarting Nagios")
p = subprocess.Popen(['/usr/sbin/service', 'nagios4', 'restart'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf8')
output, err = p.communicate()
return_code = p.returncode
if return_code > 0:
print(output)
raise Exception("Failed to restart Nagios.")
def nagios_gid():
return grp.getgrnam('nagios').gr_gid
def set_permissions(path, mode):
if os.getuid() == 0:
os.chmod(path, mode)
os.chown(path, -1, nagios_gid())
class NagiosType(object):
directives = None
def __init__(self, db, output_dir,
nodefacts=None,
query=None,
environment=None,
nagios_hosts={}):
self.db = db
self.output_dir = output_dir
self.environment = environment
self.nodefacts = nodefacts
self.query = query
self.nagios_hosts = nagios_hosts
def query_string(self, nagios_type=None):
if not nagios_type:
nagios_type = 'Nagios_' + self.nagios_type
if not self.query:
return '["=", "type", "%s"]' % (nagios_type)
query_parts = ['["=", "%s", "%s"]' % q for q in self.query]
query_parts.append('["=", "type", "%s"]' % (nagios_type))
return '["and", %s]' % ", ".join(query_parts)
def file_name(self):
return "{0}/auto_{1}.cfg".format(self.output_dir, self.nagios_type)
def generate_name(self, resource, stream):
stream.write(" %-30s %s\n" % (self.nagios_type + '_name',
resource.name))
def generate_parameters(self, resource, stream):
for param_name, param_value in resource.parameters.items():
if not param_value:
continue
if param_name in set(['target', 'require', 'tag', 'notify',
'ensure', 'mode']):
continue
if self.directives and param_name not in self.directives:
continue
# Convert all lists into csv values
if isinstance(param_value, list):
param_value = ",".join(param_value)
stream.write(" %-30s %s\n" % (param_name, param_value))
def generate_resource(self, resource, stream):
stream.write("define %s {\n" % self.nagios_type)
self.generate_name(resource, stream)
self.generate_parameters(resource, stream)
stream.write("}\n")
def generate(self):
"""
Generate a nagios configuration for a single type
The output of this will be a single file for each type.
eg.
auto_hosts.cfg
auto_checks.cfg
"""
stream = open(self.file_name(), 'w')
# Query puppetdb only throwing back the resource that match
# the Nagios type.
unique_list = set([])
for r in self.db.resources(query=self.query_string()):
# Make sure we do not try and make more than one resource
# for each one.
if r.name in unique_list:
LOG.info("duplicate: %s" % r.name)
continue
unique_list.add(r.name)
if 'host_name' in r.parameters:
hostname = r.parameters.get('host_name')
if hostname not in self.nagios_hosts:
LOG.info("Can't find host %s skipping %s, %s" % (
r.parameters['host_name'],
self.nagios_type,
r.name))
else:
s = StringIO()
self.generate_resource(r, s)
s.seek(0)
self.nagios_hosts[hostname].append(s.read())
continue
self.generate_resource(r, stream)
class NagiosHost(NagiosType):
nagios_type = 'host'
directives = set(['host_name', 'alias', 'display_name', 'address',
'parents', 'hostgroups', 'check_command',
'initial_state', 'max_check_attempts',
'check_interval', 'retry_interval',
'active_checks_enabled', 'passive_checks_enabled',
'check_period', 'obsess_over_host', 'check_freshness',
'freshness_threshold', 'event_handler',
'event_handler_enabled', 'low_flap_threshold',
'high_flap_threshold', 'flap_detection_enabled',
'flap_detection_options', 'process_perf_data',
'retain_status_information',
'retain_nonstatus_information',
'contacts', 'contact_groups', 'notification_interval',
'first_notification_delay', 'notification_period',
'notification_options', 'notifications_enabled',
'stalking_options', 'notes', 'notes_url',
'action_url', 'icon_image', 'icon_image_alt',
'vrml_image', 'statusmap_image', '2d_coords',
'3d_coords', 'use'])
def generate_name(self, resource, stream):
if resource.name in self.nodefacts or 'use' in resource.parameters:
stream.write(" %-30s %s\n" % ("host_name", resource.name))
else:
stream.write(" %-30s %s\n" % ("name", resource.name))
def is_host(self, resource):
if resource.name in self.nodefacts or 'use' in resource.parameters:
return True
return False
def generate(self):
unique_list = set([])
stream = open(self.file_name(), 'w')
# Query puppetdb only throwing back the resource that match
# the Nagios type.
for r in self.db.resources(query=self.query_string()):
# Make sure we do not try and make more than one resource
# for each one.
if r.name in unique_list:
LOG.info("duplicate: %s" % r.name)
continue
unique_list.add(r.name)
if self.is_host(r):
tmp_file = ("{0}/host_{1}.cfg"
.format(self.output_dir, r.name))
f = open(tmp_file, 'w')
self.generate_resource(r, f)
for resource in sorted(self.nagios_hosts[r.name]):
f.write(resource)
f.close()
continue
else:
self.generate_resource(r, stream)
class NagiosServiceGroup(NagiosType):
nagios_type = 'servicegroup'
directives = set(['servicegroup_name', 'alias', 'members',
'servicegroup_members', 'notes', 'notes_url',
'action_url'])
class NagiosAutoServiceGroup(NagiosType):
def generate(self):
# Query puppetdb only throwing back the resource that match
# the Nagios type.
unique_list = set([])
# Keep track of sevice to hostname
servicegroups = defaultdict(list)
for r in self.db.resources(query=self.query_string('Nagios_service')):
# Make sure we do not try and make more than one resource
# for each one.
if r.name in unique_list:
continue
unique_list.add(r.name)
if 'host_name' in r.parameters \
and r.parameters['host_name'] not in self.nagios_hosts:
LOG.info("Can't find host %s skipping, %s" % (
r.parameters['host_name'],
r.name))
continue
# Add services to service group
if 'host_name' in r.parameters:
host_name = r.parameters['host_name']
servicegroups[r.parameters['service_description']]\
.append(host_name)
for servicegroup_name, host_list in servicegroups.items():
tmp_file = ("{0}/auto_servicegroup_{1}.cfg"
.format(self.output_dir, servicegroup_name))
members = []
for host in host_list:
members.append("%s,%s" % (host, servicegroup_name))
f = open(tmp_file, 'w')
f.write("define servicegroup {\n")
f.write(" servicegroup_name %s\n" % servicegroup_name)
f.write(" alias %s\n" % servicegroup_name)
f.write(" members %s\n" % ",".join(members))
f.write("}\n")
f.close()
class NagiosService(NagiosType):
nagios_type = 'service'
directives = set(['host_name', 'hostgroup_name',
'service_description', 'display_name',
'servicegroups', 'is_volatile', 'check_command',
'initial_state', 'max_check_attempts',
'check_interval', 'retry_interval',
'active_checks_enabled', 'passive_checks_enabled',
'check_period', 'obsess_over_service',
'check_freshness', 'freshness_threshold',
'event_handler', 'event_handler_enabled',
'low_flap_threshold', 'high_flap_threshold',
'flap_detection_enabled', 'flap_detection_options',
'process_perf_data', 'retain_status_information',
'retain_nonstatus_information',
'notification_interval', 'register',
'first_notification_delay',
'notification_period', 'notification_options',
'notifications_enabled', 'contacts',
'contact_groups', 'stalking_options', 'notes',
'notes_url', 'action_url', 'icon_image',
'icon_image_alt', 'use'])
def generate_name(self, resource, stream):
if 'host_name' not in resource.parameters:
stream.write(" %-30s %s\n" % ("name", resource.name))
class NagiosHostGroup(NagiosType):
nagios_type = 'hostgroup'
directives = set(['hostgroup_name', 'alias', 'members',
'hostgroup_members', 'notes',
'notes_url', 'action_url'])
class NagiosHostEscalation(NagiosType):
nagios_type = 'hostescalation'
class NagiosHostDependency(NagiosType):
nagios_type = 'hostdependency'
class NagiosHostExtInfo(NagiosType):
nagios_type = 'hostextinfo'
class NagiosServiceEscalation(NagiosType):
nagios_type = 'serviceescalation'
class NagiosServiceDependency(NagiosType):
nagios_type = 'servicedependency'
class NagiosServiceExtInfo(NagiosType):
nagios_type = 'serviceextinfo'
class NagiosTimePeriod(NagiosType):
nagios_type = 'timeperiod'
class NagiosCommand(NagiosType):
nagios_type = 'command'
directives = set(['command_name', 'command_line'])
class NagiosContact(NagiosType):
nagios_type = 'contact'
directives = set(['contact_name', 'alias', 'contactgroups',
'host_notifications_enabled',
'service_notifications_enabled',
'host_notification_period',
'service_notification_period',
'host_notification_options',
'service_notification_options',
'host_notification_commands',
'service_notification_commands',
'email', 'pager', 'addressx',
'can_submit_commands',
'retain_status_information',
'retain_nonstatus_information'])
class NagiosContactGroup(NagiosType):
nagios_type = 'contactgroup'
directives = set(['contactgroup_name', 'alias', 'members',
'contactgroup_members'])
class CustomNagiosHostGroup(NagiosType):
def __init__(self, db, output_dir, name,
nodefacts=None,
nodes=None,
query=None,
environment=None,
nagios_hosts={}):
self.nagios_type = name
self.nodes = nodes
super(CustomNagiosHostGroup, self).__init__(db=db,
output_dir=output_dir,
nodefacts=nodefacts,
query=query,
environment=environment)
def generate(self, hostgroup_name, traits):
traits = dict(traits)
fact_template = traits.pop('fact_template')
hostgroup_name = hostgroup_name.split('_', 1)[1]
hostgroup_alias = traits.pop('name')
# Gather hosts base on some resource traits.
members = []
for node in self.nodes:
for type_, title in traits.items():
if not len(list(node.resources(type_, title))) > 0:
break
else:
members.append(node)
hostgroup = defaultdict(list)
for node in members or self.nodes:
if node.name not in self.nagios_hosts:
LOG.info("Skipping host with no nagios_host resource %s" %
node.name)
continue
facts = self.nodefacts[node.name]
try:
fact_name = hostgroup_name.format(**facts)
fact_alias = hostgroup_alias.format(**facts)
except KeyError:
LOG.error("Can't find facts for hostgroup %s" % fact_template)
raise
hostgroup[(fact_name, fact_alias)].append(node)
# if there are no hosts in the group then exit
if not hostgroup.items():
return
for hostgroup_name, hosts in hostgroup.items():
tmp_file = "{0}/auto_hostgroup_{1}.cfg".format(self.output_dir,
hostgroup_name[0])
f = open(tmp_file, 'w')
f.write("define hostgroup {\n")
f.write(" hostgroup_name %s\n" % hostgroup_name[0])
f.write(" alias %s\n" % hostgroup_name[1])
f.write(" members %s\n" % ",".join([h.name for h in hosts]))
f.write("}\n")
class NagiosConfig:
def __init__(self, hostname, port, api_version, output_dir,
nodefacts=None, query=None, environment=None,
ssl_verify=None, ssl_key=None, ssl_cert=None, timeout=None):
self.db = connect(host=hostname,
port=port,
ssl_verify=ssl_verify,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
timeout=timeout)
self.db.resources = self.db.resources
self.output_dir = output_dir
self.environment = environment
if not nodefacts:
self.nodefacts = self.get_nodefacts()
else:
self.nodefacts = nodefacts
self.query = query or {}
self.nagios_hosts = defaultdict(list,
[(h, [])
for h in self.get_nagios_hosts()])
def query_string(self, **kwargs):
query_parts = []
for name, value in kwargs.items():
query_parts.append('["=", "%s", "%s"]' % (name, value))
return '["and", %s]' % ", ".join(query_parts)
def resource_query_string(self, **kwargs):
query = dict(self.query)
query.update(kwargs)
return self.query_string(**query)
def node_query_string(self, **kwargs):
if not self.environment:
return None
query = {'catalog_environment': self.environment,
'facts_environment': self.environment}
query.update(kwargs)
return self.query_string(**query)
def get_nodefacts(self):
"""
Get all the nodes & facts from puppetdb.
This can be used to construct hostgroups, etc.
{
'hostname': {
'factname': factvalue,
'factname': factvalue,
}
}
"""
nodefacts = {}
self.nodes = []
for node in self.db.nodes(query=self.node_query_string()):
self.nodes.append(node)
nodefacts[node.name] = {}
for f in node.facts():
nodefacts[node.name][f.name] = f.value
return nodefacts
def get_nagios_hosts(self):
"""This is used during other parts of the generation process to make
sure that there is host consistency.
"""
return set(
[h.name for h in self.db.resources(
query=self.resource_query_string(type='Nagios_host'))])
def generate_all(self, excluded_classes=[]):
for cls in NagiosType.__subclasses__():
if cls.__name__.startswith('Custom'):
continue
if cls.__name__ == 'NagiosHost':
continue
if cls.__name__ in excluded_classes:
continue
inst = cls(db=self.db,
output_dir=self.output_dir,
nodefacts=self.nodefacts,
query=self.query,
environment=self.environment,
nagios_hosts=self.nagios_hosts)
inst.generate()
hosts = NagiosHost(db=self.db,
output_dir=self.output_dir,
nodefacts=self.nodefacts,
query=self.query,
environment=self.environment,
nagios_hosts=self.nagios_hosts)
hosts.generate()
def verify(self, extra_cfg_dirs=[]):
LOG.debug("NagiosConfig.verify got extra_cfg_dirs %s" % extra_cfg_dirs)
return nagios_verify([self.output_dir] + extra_cfg_dirs)
def update_nagios(new_config_dir, updated_config, removed_config,
backup_dir, output_dir, nagios_cfg,
extra_cfg_dirs=[]):
# Backup the existing configuration
shutil.copytree(output_dir, backup_dir)
for filename in updated_config:
LOG.info("Copying changed file: %s" % filename)
shutil.copy(path.join(new_config_dir, filename),
path.join(output_dir, filename))
for filename in removed_config:
LOG.info("Removing files: %s" % filename)
os.remove(path.join(output_dir, filename))
# Verify the config in place.
try:
nagios_verify([output_dir] + extra_cfg_dirs, nagios_cfg)
except Exception:
# Remove the new config
map(lambda d: os.remove(path.join(output_dir, d)),
os.listdir(output_dir))
# Copy the backup back
for filename in os.listdir(backup_dir):
shutil.copy(path.join(backup_dir, filename),
path.join(output_dir, filename))
raise
def config_get(config, section, option, default=None):
try:
return config.get(section, option)
except Exception:
return default
def main():
import argparse
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
self.exit(2, '%s: error: %s\n' % (self.prog, message))
parser = ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--output-dir', action='store', required=True, type=path.abspath,
help="The directory to write the Nagios config into.")
parser.add_argument(
'-c', '--config', action='store',
help="The location of the configuration file..")
parser.add_argument(
'--update', action='store_true',
help="Update the Nagios configuration files.")
parser.add_argument(
'--no-restart', action='store_true', default=False,
help="Restart the Nagios service.")
parser.add_argument(
'--host', action='store', default='localhost',
help="The hostname of the puppet DB server.")
parser.add_argument(
'--port', action='store', default=8080, type=int,
help="The port of the puppet DB server.")
parser.add_argument(
'-V', '--api-version', action='store', default=4, type=int,
help="The puppet DB version")
parser.add_argument(
'--pdb', action='store_true', default=False,
help="Unable PDB on error.")
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help="Increase verbosity (specify multiple times for more)")
args = parser.parse_args()
log_level = logging.WARNING
if args.verbose == 1:
log_level = logging.INFO
elif args.verbose >= 2:
log_level = logging.DEBUG
logging.basicConfig(
level=log_level,
stream=sys.stderr,
format='%(asctime)s %(name)s %(levelname)s %(message)s')
config = configparser.ConfigParser()
if args.config:
config.readfp(open(args.config))
query = {}
if 'query' in config.sections():
query = config.items('query')
# PuppetDB Variables
get_puppet_cfg = partial(config_get, config, 'puppet')
environment = get_puppet_cfg('environment')
ssl_verify = get_puppet_cfg('ca_cert')
ssl_key = get_puppet_cfg('ssl_key')
ssl_cert = get_puppet_cfg('ssl_cert')
timeout = int(get_puppet_cfg('timeout', 20))
# Nagios Variables
get_nagios_cfg = partial(config_get, config, 'nagios')
nagios_cfg = get_nagios_cfg('nagios_cfg', '/etc/nagios4/nagios.cfg')
extra_cfg_dirs = [d.strip()
for d in get_nagios_cfg('extra_cfg_dirs', '').split(',')
if d]
get_naginator_cfg = partial(config_get, config, 'naginator')
excluded_classes = [d.strip()
for d in (get_naginator_cfg('excluded_classes', '')
.split(','))
if d]
hostgroups = {}
for section in config.sections():
if not section.startswith('hostgroup_'):
continue
hostgroups[section] = config.items(section)
try:
with generate_config(hostname=args.host,
port=args.port,
api_version=args.api_version,
query=query,
environment=environment,
ssl_verify=ssl_verify,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
timeout=timeout,
excluded_classes=excluded_classes,
hostgroups=hostgroups) as nagios_config:
if args.update:
update_config(nagios_config, args.output_dir,
nagios_cfg, extra_cfg_dirs)
if not args.no_restart:
nagios_restart()
except Exception:
if args.pdb:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
raise
@contextmanager
def generate_config(hostname, port, api_version, query, environment,
ssl_verify, ssl_key, ssl_cert, timeout,
excluded_classes=[], hostgroups={}):
with temporary_dir() as tmp_dir:
new_config_dir = path.join(tmp_dir, 'new_config')
# Generate new configuration
os.mkdir(new_config_dir)
set_permissions(new_config_dir, stat.S_IRGRP + stat.S_IXGRP)
cfg = NagiosConfig(hostname=hostname,
port=port,
api_version=api_version,
output_dir=new_config_dir,
query=query,
environment=environment,
ssl_verify=ssl_verify,
ssl_key=ssl_key,
ssl_cert=ssl_cert,
timeout=timeout)
cfg.generate_all(excluded_classes=excluded_classes)
for name, cfg in hostgroups.items():
group = CustomNagiosHostGroup(cfg.db,
new_config_dir,
name,
nodefacts=cfg.nodefacts,
nodes=cfg.nodes,
query=query,
environment=environment,
nagios_hosts=cfg.nagios_hosts)
group.generate(name, cfg)
try:
yield cfg
finally:
pass
def update_config(config, output_dir, nagios_cfg, extra_cfg_dirs):
with temporary_dir() as tmp_dir:
backup_dir = path.join(tmp_dir, 'backup_config')
# Generate list of changed and added files
diff = filecmp.dircmp(config.output_dir, output_dir)
updated_config = diff.diff_files + diff.left_only
# Only remove the auto files, leaving the old hosts.
removed_config = [f for f in diff.right_only if f.startswith('auto_')]
if not updated_config:
return
# Validate new configuration
config.verify(extra_cfg_dirs=extra_cfg_dirs)
update_nagios(config.output_dir, updated_config, removed_config,
backup_dir, output_dir, nagios_cfg=nagios_cfg,
extra_cfg_dirs=extra_cfg_dirs)
| mit | 1,099,402,090,502,372,200 | 35.466408 | 79 | 0.53357 | false |
shouldmakemusic/yaas | controller/DeviceController.py | 1 | 11127 | # Copyright (C) 2015 Manuel Hirschauer ([email protected])
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Manuel Hirschauer <[email protected]>
"""
Control everything that can happen with or inside a device
"""
from YaasController import *
from ..consts import CURRENT
class DeviceController (YaasController):
"""
Control everything that can happen with or inside a device
"""
_parameter_names_for_device_in_set = {}
def __init__(self, yaas):
YaasController.__init__(self, yaas)
self.log.debug("(DeviceController) init")
def navigate_device_focus(self, params, value):
"""
Selects next or previous device
@param params[0]: track_index
@param params[1]: next? True : False
"""
self.log.verbose("(DeviceController) navigate_device_focus called")
track_index = params[0]
self.log.verbose("(DeviceController) for track " + str(track_index))
next = params[1]
selected_track = self.song_helper().get_selected_track()
target_track = self.song_helper().get_track(track_index)
self.log.debug('target ' + target_track.get_name() + ', selected ' + selected_track.get_name())
# ensures there is an active device
device = self.device_helper().get_currently_selected_device(track_index)
if device is None:
device = target_track.get_device_for_id(0)
if selected_track.get_track_index() == target_track.get_track_index() and self.application().view.is_view_visible("Detail/DeviceChain"):
if next == True:
self.application().view.focus_view("Detail")
self.application().view.scroll_view(3, "Detail/DeviceChain", 0)
else:
self.application().view.focus_view("Detail")
self.application().view.scroll_view(2, "Detail/DeviceChain", 0)
else:
self.view_helper().focus_on_track_helper(target_track)
if device is not None:
self.song().view.select_device(device)
def toggle_device(self, params, value):
"""
Switches defined device on/off
@param params[0]: track_index
@param params[1]: device_index
"""
track_index = params[0]
device_index = params[1]
if track_index == CURRENT:
track_index = self.song_helper().get_selected_track().get_track_index()
if device_index == CURRENT:
device = self.device_helper().get_currently_selected_device(track_index)
else:
device = self.song_helper().get_track(track_index).get_device_for_id(device_index)
self.application().view.focus_view("Detail/DeviceChain")
if device.parameters[0].value == 0.0:
device.parameters[0].value = 1.0
self.log.debug("toogle " + device.name + " on")
else:
device.parameters[0].value = 0.0
self.log.debug("toogle " + device.name + " off")
def trigger_device_chain(self, params, value):
"""
Use the current active hash device and if it is a rack
switch the chain with the given index
exclusive means only one chain is not muted
inclusive means the selected chain gets switched
@param params[0]: chain_index
@param params[1]: True means Exclusive / False means Inclusive
"""
self.log.verbose("(DeviceController) trigger_device_chain called")
chain_index = params[0]
exclusive = params[1]
self.log.verbose("(DeviceController) for chain " + str(chain_index) + ", exclusive: " + str(exclusive))
device = self.device_helper().get_hash_device()
if device is not None:
if exclusive:
if len(device.chains) > chain_index:
#self.log.debug("Trigger chain " + str(chain_index + 1) + " with " + str(len(device.chains)) + " chains")
if device.chains[chain_index].mute == True:
self.log.debug("was muted")
for index in range(len(device.chains)):
if index == chain_index:
device.chains[index].mute = False
else:
device.chains[index].mute = True
else:
self.log.debug("was not muted")
device.chains[chain_index].mute = True
device.chains[0].mute = False
else:
if len(device.chains) > chain_index:
self.log.debug("Trigger chain " + str(chain_index + 1) + " with " + str(len(device.chains)) + " chains")
if device.chains[chain_id].mute == True:
device.chains[chain_id].mute = False
else:
device.chains[chain_id].mute = True
def set_chain_selector(self, params, value):
"""
Use the current active hash device and if it is a rack
select the chain with the given chain selector value
You can also define a button that sets the chain selector to a given value.
One button is wired to a certain chain (or multiple if you add them at the
given chain selector position)
When switching between chain positions for each position the values of
parameters 1-4 are saved (persistantly, that means for every device with
this exact name and in a file, so it will be restored when reopening the
set and selecting a chain with this method)
@param params[0]: chain_index
"""
self.log.verbose("(DeviceController) set_chain_selector called")
chain_index = params[0]
self.log.verbose("(DeviceController) for chain " + str(chain_index))
device = self.device_helper().get_hash_device()
if device is not None:
# find chain parameter
chain_parameter = None
if device.parameters[9].is_enabled == True:
self.log.verbose('the chain selector is not bound to a control')
chain_parameter = device.parameters[9]
else:
for i in range(len(device.parameters)):
if device.parameters[i].name == "Chain Selector" and device.parameters[i].is_enabled:
chain_parameter = device.parameters[i]
self.log.verbose('the chain selector is parameter ' + str(i) )
# store old values
chain_name = device.name + '_' + str(int(chain_parameter.value)) + '_'
self.log.verbose('chain_name: ' + str(chain_name))
self.yaas._value_container.set_value(chain_name + device.parameters[1].name, device.parameters[1].value)
self.yaas._value_container.set_value(chain_name + device.parameters[2].name, device.parameters[2].value)
self.yaas._value_container.set_value(chain_name + device.parameters[3].name, device.parameters[3].value)
self.yaas._value_container.set_value(chain_name + device.parameters[4].name, device.parameters[4].value)
debug_message = 'set chain activator to ' + str(chain_index) + ' from ' + str(len(device.chains) - 1) + ' for ' + device.name
self.log.debug(str(debug_message))
if len(device.chains) > chain_index:
# set selector
value = chain_index
#self.log.debug("max " + str(chain_parameter.max))
#self.log.debug("min " + str(chain_parameter.min))
#if CHAIN_MODE_SHORTENED:
# value = 127 / 7 * value
#self.log.debug("new value " + str(value))
chain_parameter.value = value
#self.log.debug("done for " + chain_parameter.name)
# restore values of first four parameters
# only if new chain is not 0 (=normal)
if (chain_index > 0):
chain_name = device.name + '_' + str(chain_index) + '_'
if self.yaas._value_container.has_value(chain_name + device.parameters[1].name):
device.parameters[1].value = self.yaas._value_container.get_single_value(chain_name + device.parameters[1].name)
if self.yaas._value_container.has_value(chain_name + device.parameters[2].name):
device.parameters[2].value = self.yaas._value_container.get_single_value(chain_name + device.parameters[2].name)
if self.yaas._value_container.has_value(chain_name + device.parameters[3].name):
device.parameters[3].value = self.yaas._value_container.get_single_value(chain_name + device.parameters[3].name)
if self.yaas._value_container.has_value(chain_name + device.parameters[4].name):
device.parameters[4].value = self.yaas._value_container.get_single_value(chain_name + device.parameters[4].name)
else:
self.log.verbose('hash device was none')
def select_current_then_select_next_hash_device(self, params, value):
"""
First call select first device that starts with '#'
If the name of the appointed device starts with '#' find a new '#'-device
Store this device - from now on the first call selects this one
@param params[0]: track_index to start search from (optional)
"""
self.log.verbose("(DeviceController) set_chain_selector called")
if len(params) == 0:
track_index = 0
else:
track_index = params[0]
if track_index == '':
track_index = 0
self.log.verbose("(DeviceController) for track " + str(track_index))
self.device_helper().select_current_then_select_next_hash_device(track_index)
def connect_to_rack_parameter(self, params, value):
"""
Use the current active hash device and connects to the given parameter
0 -> enable/disable device
1-8 -> device params
9 -> chain selector if not mapped
@param params[0]: parameter_id
"""
self.log.verbose("(DeviceController) connect_to_rack_parameter called")
parameter_id = params[0]
device = self.device_helper().get_hash_device()
self.log.verbose("(DeviceController) for device " + device.name + ", parameter " + str(parameter_id))
if device is not None:
set_name = 'default'
name = set_name + '_' + device.name
parameter = device.parameters[parameter_id]
if not(name in self._parameter_names_for_device_in_set.keys()):
parameter_names = {}
for index in range(len(device.parameters)):
parameter_name = device.parameters[index].name
parameter_names[parameter_name] = index
self.log.verbose("added param " + parameter_name + " with index " + str(index))
self._parameter_names_for_device_in_set[name] = parameter_names
self.log.debug("stored parameters for " + name)
min = parameter.min
max = parameter.max
max_name = "Max " + parameter.name
self.log.verbose("max name " + max_name)
if max_name in self._parameter_names_for_device_in_set[name]:
#self.log.debug("found")
index = self._parameter_names_for_device_in_set[name][max_name]
#self.log.debug("index " + str(index))
max = device.parameters[index].value + 1
#self.log.debug("max value " + str(max))
# TODO same fix as in trackconroller (use rangeutil)
#value = self.get_normalized_value(min, max, value)
self.range_util.set_target_min_max(min, max)
new_value = self.range_util.get_target_value(value);
parameter.value = new_value
| gpl-2.0 | -8,390,716,330,668,589,000 | 37.368966 | 138 | 0.681495 | false |
hoodie/libavg | src/python/app/settings.py | 2 | 8384 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# libavg - Media Playback Engine.
# Copyright (C) 2003-2014 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
#
# Original author of this file is OXullo Interecans <x at brainrapers dot org>
import sys
import re
import optparse
import libavg
class Option(object):
def __init__(self, key, value, help=None):
if not isinstance(key, str):
raise ValueError('The type of %s key is not string (value=%s)' % (key, value))
self.__key = key
self.value = value
self.__help = help
def __repr__(self):
return '<%s key=%s value=%s help=%s>' % (self.__class__.__name__,
self.key, self.value, self.help)
@property
def key(self):
return self.__key
@property
def value(self):
return self.__value
@value.setter
def value(self, value):
if not isinstance(value, str):
raise ValueError('The type of %s value (%s) '
'must be string instead of %s' % (self.__key, value, type(value)))
self.__value = value
@property
def group(self):
components = self.__getComponents()
if len(components) == 1:
return 'DEFAULT'
else:
return components[0]
@property
def tail(self):
components = self.__getComponents()
if len(components) == 1:
return self.key
else:
return components[1]
@property
def help(self):
return self.__help
def __getComponents(self):
return self.key.split('_', 1)
class KargsExtender(object):
def __init__(self, optionsKargs):
self.__optionsKargs = optionsKargs
def __call__(self, optionsList):
optionsKeyset = set([option.key for option in optionsList])
kaKeyset = set(self.__optionsKargs.keys())
if not optionsKeyset.issuperset(kaKeyset):
raise libavg.Exception(
'No such option/s: %s' % list(kaKeyset - optionsKeyset))
for option in optionsList:
if option.key in self.__optionsKargs:
option.value = self.__optionsKargs[option.key]
return optionsList
class HelpPrintingOptionParser(optparse.OptionParser):
def error(self, *args, **kargs):
self.print_help()
optparse.OptionParser.error(self, *args, **kargs)
def print_help(self):
optparse.OptionParser.print_help(self)
print
print "All options can also be set using the App.run() method."
class ArgvExtender(object):
def __init__(self, appVersionInfo, args=None):
self.__appVersionInfo = appVersionInfo
self.__parser = HelpPrintingOptionParser()
self.__args = args
self.__parsedArgs = None
def __call__(self, optionsList):
self.__parser.add_option('-v', '--version', dest='version', action='store_true',
help='print libavg and application version information')
groups = self.__groupOptionsKeys(optionsList)
for group in sorted(groups):
parserGroup = optparse.OptionGroup(self.__parser,
'%s section' % group.title())
keys = sorted(groups[group])
for option in [option for option in optionsList if option.key in keys]:
cliKey = '--%s' % option.key.replace('_', '-').lower()
currentValue = option.value if option.value else '<undefined>'
help = '[Default: %s]' % currentValue
if option.help:
help = '%s %s' % (option.help, help)
parserGroup.add_option(cliKey, help=help)
self.__parser.add_option_group(parserGroup)
if self.__args is None:
self.__args = sys.argv[1:]
self.__parsedArgs = self.__parser.parse_args(args=self.__args)
parsedOptions = self.__parsedArgs[0]
if parsedOptions.version:
print 'libavg'
vi = libavg.VersionInfo()
print ' version : %s' % vi.full
print ' builder : %s (%s)' % (vi.builder, vi.buildtime)
print
print 'application'
print ' version: %s' % self.__appVersionInfo
sys.exit(0)
for key, value in parsedOptions.__dict__.iteritems():
if value is not None:
for option in optionsList:
if option.key == key:
option.value = value
return optionsList
@property
def parsedArgs(self):
if self.__parsedArgs is None:
raise libavg.Exception(
'Cannot provide parsedArgs before applying the extender')
return self.__parsedArgs
@property
def parser(self):
return self.__parser
def __groupOptionsKeys(self, optionsList):
groups = {}
for option in optionsList:
if not option.group in groups:
groups[option.group] = []
groups[option.group].append(option.key)
return groups
class Settings(object):
def __init__(self, defaults=[]):
if (type(defaults) not in (tuple, list) or
not all([isinstance(opt, Option) for opt in defaults])):
raise ValueError('Settings must be initialized with a list '
'of Option instances')
self.__options = []
for option in defaults:
self.addOption(option)
def __iter__(self):
return self.__options.__iter__()
def applyExtender(self, extender):
self.__options = extender(self.__options)
def hasOption(self, key):
return self.__getOptionOrNone(key) is not None
def getOption(self, key):
option = self.__getOptionOrNone(key)
if option is None:
raise libavg.Exception('Cannot find key %s in the settings' % key)
return option
def get(self, key, convertFunc=lambda v: v):
option = self.getOption(key)
try:
return convertFunc(option.value)
except (TypeError, ValueError), e:
raise ValueError('%s (option=%s)' % (e, option))
def getJson(self, key):
import json
return self.get(key, json.loads)
def getPoint2D(self, key):
value = self.get(key)
maybeTuple = re.split(r'\s*[,xX]\s*', value)
if len(maybeTuple) != 2:
raise ValueError('Cannot convert key %s value %s to Point2D' % (key, value))
return libavg.Point2D(map(float, maybeTuple))
def getInt(self, key):
return self.get(key, int)
def getFloat(self, key):
return self.get(key, float)
def getBoolean(self, key):
value = self.get(key).lower()
if value in ('yes', 'true'):
return True
elif value in ('no', 'false'):
return False
else:
raise ValueError('Cannot convert %s to boolean' % value)
def set(self, key, value):
option = self.getOption(key)
option.value = value
def addOption(self, option):
if not isinstance(option, Option):
raise TypeError('Must be an instance of Option')
if self.__getOptionOrNone(option.key):
raise libavg.Exception('Option %s has been already defined' % option.key)
self.__options.append(option)
def __getOptionOrNone(self, key):
for option in self.__options:
if option.key == key:
return option
return None
| lgpl-2.1 | -5,335,792,898,479,195,000 | 28.730496 | 90 | 0.579914 | false |
blaze225/zulip | zerver/webhooks/stash/tests.py | 26 | 1101 | # -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class StashHookTests(WebhookTestCase):
STREAM_NAME = 'stash'
URL_TEMPLATE = u"/api/v1/external/stash?stream={stream}"
def test_stash_message(self):
# type: () -> None
"""
Messages are generated by Stash on a `git push`.
The subject describes the repo and Stash "project". The
content describes the commits pushed.
"""
expected_subject = u"Secret project/Operation unicorn: master"
expected_message = """`f259e90` was pushed to **master** in **Secret project/Operation unicorn** with:
* `f259e90`: Updating poms ..."""
self.send_and_test_stream_message('push', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded",
**self.api_auth(self.TEST_USER_EMAIL))
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("stash", fixture_name, file_type="json")
| apache-2.0 | -2,553,588,233,211,449,300 | 39.777778 | 110 | 0.608538 | false |
h1ds/h1ds | h1ds/h1ds_summary/migrations/0010_auto__del_field_summaryattribute_source__add_field_summaryattribute_so.py | 1 | 3957 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SummaryAttribute.source'
db.delete_column('h1ds_summary_summaryattribute', 'source')
# Adding field 'SummaryAttribute.source_url'
db.add_column('h1ds_summary_summaryattribute', 'source_url', self.gf('django.db.models.fields.URLField')(default='http://example.com', max_length=1000), keep_default=False)
def backwards(self, orm):
# We cannot add back in field 'SummaryAttribute.source'
raise RuntimeError(
"Cannot reverse this migration. 'SummaryAttribute.source' and its values cannot be restored.")
# Deleting field 'SummaryAttribute.source_url'
db.delete_column('h1ds_summary_summaryattribute', 'source_url')
models = {
'h1ds_summary.datetimeattributeinstance': {
'Meta': {'object_name': 'DateTimeAttributeInstance'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.SummaryAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.Shot']"}),
'value': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'h1ds_summary.floatattributeinstance': {
'Meta': {'object_name': 'FloatAttributeInstance'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.SummaryAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.Shot']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'h1ds_summary.integerattributeinstance': {
'Meta': {'object_name': 'IntegerAttributeInstance'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.SummaryAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['h1ds_summary.Shot']"}),
'value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'h1ds_summary.shot': {
'Meta': {'object_name': 'Shot'},
'shot': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'h1ds_summary.summaryattribute': {
'Meta': {'object_name': 'SummaryAttribute'},
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'default_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'default_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'display_format': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'full_description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'short_description': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'})
}
}
complete_apps = ['h1ds_summary']
| mit | -2,852,872,805,563,367,400 | 55.528571 | 180 | 0.583523 | false |
coderjames/pascal | quex-0.63.1/quex/engine/state_machine/setup_pre_context.py | 1 | 3271 | # (C) Frank-Rene Schaefer
# ABSOLUTELY NO WARRANTY
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.algorithm.acceptance_pruning as acceptance_pruning
from quex.blackboard import E_PreContextIDs, setup as Setup
def do(the_state_machine, pre_context_sm, BeginOfLinePreContextF):
"""Sets up a pre-condition to the given state machine. This process
is entirely different from any sequentializing or parallelization
of state machines. Here, the state machine representing the pre-
condition is **not** webbed into the original state machine!
Instead, the following happens:
-- the pre-condition state machine is inverted, because
it is to be walked through backwards.
-- the inverted state machine is marked with the state machine id
of the_state_machine.
-- the original state machine will refer to the inverse
state machine of the pre-condition.
-- the initial state origins and the origins of the acceptance
states are marked as 'pre-conditioned' indicating the id
of the inverted state machine of the pre-condition.
"""
#___________________________________________________________________________________________
# (*) do some consistency checking
# -- state machines with no states are senseless here.
assert not the_state_machine.is_empty()
assert pre_context_sm is None or not pre_context_sm.is_empty()
# -- trivial pre-conditions should be added last, for simplicity
#___________________________________________________________________________________________
if pre_context_sm is None:
if BeginOfLinePreContextF:
# Mark all acceptance states with the 'trivial pre-context BeginOfLine' flag
for state in the_state_machine.get_acceptance_state_list():
state.set_pre_context_id(E_PreContextIDs.BEGIN_OF_LINE)
return None
# (*) invert the state machine of the pre-condition
inverse_pre_context = pre_context_sm.get_inverse()
if BeginOfLinePreContextF:
# Extend the existing pre-context with a preceeding 'begin-of-line'.
inverse_pre_context.mount_newline_to_acceptance_states(Setup.dos_carriage_return_newline_f, InverseF=True)
# (*) Clean up what has been done by inversion (and optionally 'BeginOfLinePreContextF')
inverse_pre_context = beautifier.do(inverse_pre_context)
# (*) Once an acceptance state is reached no further analysis is necessary.
acceptance_pruning.do(inverse_pre_context)
# (*) let the state machine refer to it
# [Is this necessary? Is it not enough that the acceptance origins point to it? <fschaef>]
pre_context_sm_id = inverse_pre_context.get_id()
# (*) create origin data, in case where there is none yet create new one.
# (do not delete, otherwise existing information gets lost)
for state in the_state_machine.states.values():
if not state.is_acceptance(): continue
state.set_pre_context_id(pre_context_sm_id)
return inverse_pre_context
| bsd-2-clause | 775,866,475,204,237,000 | 48.560606 | 114 | 0.633445 | false |
mjhoy/dotfiles | offlineimap.py | 1 | 1082 | #!/usr/bin/python
import re, subprocess, os
def get_password(path):
return os.popen("pass %s" %(path)).read()
def get_authinfo_password(machine, login, port):
s = "machine %s login %s port %s password ([^\s]*)" % (machine, login, port)
p = re.compile(s)
authinfo = os.popen("gpg -q --no-tty -d ~/.authinfo.gpg").read()
return p.search(authinfo).group(1)
# # OSX keychain; not sure if this still works.
# def get_keychain_pass(account=None, server=None):
# params = {
# 'security': '/usr/bin/security',
# 'command': 'find-internet-password',
# 'account': account,
# 'server': server,
# 'keychain': '/Users/mjhoy/Library/Keychains/login.keychain',
# }
# command = "sudo -u mjhoy %(security)s -v %(command)s -g -a %(account)s -s %(server)s %(keychain)s" % params
# output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
# outtext = [l for l in output.splitlines()
# if l.startswith('password: ')][0]
# return re.match(r'password: "(.*)"', outtext).group(1)
| mit | 6,887,556,220,862,612,000 | 39.074074 | 113 | 0.608133 | false |
timpalpant/KaggleTSTextClassification | scripts/csv_to_npz.py | 1 | 1146 | #!/usr/bin/env python
'''
Convert data to npz file for easy loading
'''
import argparse
from common import *
def opts():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('train',
help='Training data (csv)')
parser.add_argument('train_labels',
help='Training data labels (csv)')
parser.add_argument('test',
help='Test data (csv)')
parser.add_argument('train_out',
help='Training data output file (npz)')
parser.add_argument('train_labels_out',
help='Training data labels output file (npz)')
parser.add_argument('test_out',
help='Test data output file (npz)')
return parser
if __name__ == "__main__":
args = opts().parse_args()
for input, output in ((args.train, args.train_out),
(args.train_labels, args.train_labels_out),
(args.test, args.test_out)):
loader = guess_loader(input)
print >>sys.stderr, "Loading data from %s" % input
data = loader(input)
print >>sys.stderr, "Saving to %s" % output
save_npz(output, **data)
del data | gpl-3.0 | 4,213,318,312,893,817,000 | 30.861111 | 69 | 0.593368 | false |
wangkangcheng/ccc | qap/functional_preproc.py | 3 | 14228 |
def get_idx(in_files, stop_idx=None, start_idx=None):
"""
Method to get the first and the last volume for
the functional run. It verifies the user specified
first and last volume. If the values are not valid, it
calculates and returns the very first and the last slice
Parameters
----------
in_file : string (nifti file)
Path to input functional run
stop_idx : int
Last volume to be considered, specified by user
in the configuration file
stop_idx : int
First volume to be considered, specified by user
in the configuration file
Returns
-------
stop_idx : int
Value of first volume to consider for the functional run
start_idx : int
Value of last volume to consider for the functional run
"""
#stopidx = None
#startidx = None
from nibabel import load
nvols = load(in_files).shape[3]
if (start_idx == None) or (start_idx < 0) or (start_idx > (nvols - 1)):
startidx = 0
else:
startidx = start_idx
if (stop_idx == None) or (stop_idx > (nvols - 1)):
stopidx = nvols - 1
else:
stopidx = stop_idx
return stopidx, startidx
def func_motion_correct_workflow(workflow, resource_pool, config):
# resource pool should have:
# functional_scan
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl.maths as fsl
from nipype.interfaces.afni import preprocess
from workflow_utils import check_input_resources, \
check_config_settings
check_input_resources(resource_pool, "functional_scan")
check_config_settings(config, "start_idx")
check_config_settings(config, "stop_idx")
check_config_settings(config, "slice_timing_correction")
func_get_idx = pe.Node(util.Function(input_names=['in_files',
'stop_idx',
'start_idx'],
output_names=['stopidx',
'startidx'],
function=get_idx),
name='func_get_idx')
func_get_idx.inputs.in_files = resource_pool["functional_scan"]
func_get_idx.inputs.start_idx = config["start_idx"]
func_get_idx.inputs.stop_idx = config["stop_idx"]
func_drop_trs = pe.Node(interface=preprocess.Calc(),
name='func_drop_trs')
func_drop_trs.inputs.in_file_a = resource_pool["functional_scan"]
func_drop_trs.inputs.expr = 'a'
func_drop_trs.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_get_idx, 'startidx',
func_drop_trs, 'start_idx')
workflow.connect(func_get_idx, 'stopidx',
func_drop_trs, 'stop_idx')
#workflow.connect(func_drop_trs, 'out_file',
# outputNode, 'drop_tr')
func_slice_timing_correction = pe.Node(interface=preprocess.TShift(),
name='func_slice_time_correction')
func_slice_timing_correction.inputs.outputtype = 'NIFTI_GZ'
func_deoblique = pe.Node(interface=preprocess.Refit(),
name='func_deoblique')
func_deoblique.inputs.deoblique = True
if config["slice_timing_correction"] == True:
workflow.connect(func_drop_trs, 'out_file',
func_slice_timing_correction,'in_file')
workflow.connect(func_slice_timing_correction, 'out_file',
func_deoblique, 'in_file')
else:
workflow.connect(func_drop_trs, 'out_file',
func_deoblique, 'in_file')
func_reorient = pe.Node(interface=preprocess.Resample(),
name='func_reorient')
func_reorient.inputs.orientation = 'RPI'
func_reorient.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_deoblique, 'out_file',
func_reorient, 'in_file')
func_get_mean_RPI = pe.Node(interface=preprocess.TStat(),
name='func_get_mean_RPI')
func_get_mean_RPI.inputs.options = '-mean'
func_get_mean_RPI.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_reorient, 'out_file',
func_get_mean_RPI, 'in_file')
# calculate motion parameters
func_motion_correct = pe.Node(interface=preprocess.Volreg(),
name='func_motion_correct')
func_motion_correct.inputs.args = '-Fourier -twopass'
func_motion_correct.inputs.zpad = 4
func_motion_correct.inputs.outputtype = 'NIFTI_GZ'
workflow.connect(func_reorient, 'out_file',
func_motion_correct, 'in_file')
workflow.connect(func_get_mean_RPI, 'out_file',
func_motion_correct, 'basefile')
func_get_mean_motion = func_get_mean_RPI.clone('func_get_mean_motion')
workflow.connect(func_motion_correct, 'out_file',
func_get_mean_motion, 'in_file')
func_motion_correct_A = func_motion_correct.clone('func_motion_correct_A')
func_motion_correct_A.inputs.md1d_file = 'max_displacement.1D'
workflow.connect(func_reorient, 'out_file',
func_motion_correct_A, 'in_file')
workflow.connect(func_get_mean_motion, 'out_file',
func_motion_correct_A, 'basefile')
resource_pool["func_motion_correct"] = (func_motion_correct_A, 'out_file')
resource_pool["coordinate_transformation"] = \
(func_motion_correct_A, 'oned_matrix_save')
return workflow, resource_pool
def run_func_motion_correct(functional_scan, start_idx, stop_idx,
slice_timing_correction=False, run=True):
# stand-alone runner for functional motion correct workflow
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
workflow = pe.Workflow(name='func_motion_correct_workflow')
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, "func_motion_correct")
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["functional_scan"] = functional_scan
config["start_idx"] = start_idx
config["stop_idx"] = stop_idx
config["slice_timing_correction"] = slice_timing_correction
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_func_motion_correct')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, ds, 'func_motion_correct')
ds = pe.Node(nio.DataSink(), name='datasink_coordinate_transformation')
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool["coordinate_transformation"]
workflow.connect(node, out_file, ds, 'coordinate_transformation')
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "func_motion_correct",\
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def functional_brain_mask_workflow(workflow, resource_pool, config):
# resource pool should have:
# func_motion_correct
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl as fsl
from nipype.interfaces.afni import preprocess
#check_input_resources(resource_pool, "func_motion_correct")
if "use_bet" not in config.keys():
config["use_bet"] = False
if "func_motion_correct" not in resource_pool.keys():
from functional_preproc import func_motion_correct_workflow
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
if config["use_bet"] == False:
func_get_brain_mask = pe.Node(interface=preprocess.Automask(),
name='func_get_brain_mask')
func_get_brain_mask.inputs.outputtype = 'NIFTI_GZ'
else:
func_get_brain_mask = pe.Node(interface=fsl.BET(),
name='func_get_brain_mask_BET')
func_get_brain_mask.inputs.mask = True
func_get_brain_mask.inputs.functional = True
erode_one_voxel = pe.Node(interface=fsl.ErodeImage(),
name='erode_one_voxel')
erode_one_voxel.inputs.kernel_shape = 'box'
erode_one_voxel.inputs.kernel_size = 1.0
#if isinstance(tuple, resource_pool["func_motion_correct"]):
if len(resource_pool["func_motion_correct"]) == 2:
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, func_get_brain_mask, 'in_file')
else:
func_get_brain_mask.inputs.in_file = \
resource_pool["func_motion_correct"]
if config["use_bet"] == False:
resource_pool["functional_brain_mask"] = (func_get_brain_mask, \
'out_file')
else:
workflow.connect(func_get_brain_mask, 'mask_file',
erode_one_voxel, 'in_file')
resource_pool["functional_brain_mask"] = (erode_one_voxel, 'out_file')
return workflow, resource_pool
def run_functional_brain_mask(func_motion_correct, use_bet=False, run=True):
# stand-alone runner for functional brain mask workflow
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "functional_brain_mask"
workflow = pe.Workflow(name='%s_workflow' % output)
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["func_motion_correct"] = func_motion_correct
config["use_bet"] = use_bet
workflow, resource_pool = \
functional_brain_mask_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool[output]
workflow.connect(node, out_file, ds, output)
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "functional_brain" \
"_mask", "*"))[0]
return outpath
else:
return workflow, workflow.base_dir
def mean_functional_workflow(workflow, resource_pool, config):
# resource pool should have:
# func_motion_correct
''' this version does NOT remove background noise '''
import os
import sys
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
import nipype.interfaces.fsl.maths as fsl
from nipype.interfaces.afni import preprocess
from workflow_utils import check_input_resources
#check_input_resources(resource_pool, "func_motion_correct")
#check_input_resources(resource_pool, "functional_brain_mask")
if "func_motion_correct" not in resource_pool.keys():
from functional_preproc import func_motion_correct_workflow
workflow, resource_pool = \
func_motion_correct_workflow(workflow, resource_pool, config)
func_mean_skullstrip = pe.Node(interface=preprocess.TStat(),
name='func_mean_skullstrip')
func_mean_skullstrip.inputs.options = '-mean'
func_mean_skullstrip.inputs.outputtype = 'NIFTI_GZ'
if len(resource_pool["func_motion_correct"]) == 2:
node, out_file = resource_pool["func_motion_correct"]
workflow.connect(node, out_file, func_mean_skullstrip, 'in_file')#func_edge_detect, 'in_file_a')
else:
func_mean_skullstrip.inputs.in_file = \
resource_pool["func_motion_correct"]
resource_pool["mean_functional"] = (func_mean_skullstrip, 'out_file')
return workflow, resource_pool
def run_mean_functional(func_motion_correct, run=True):
# stand-alone runner for mean functional workflow
''' this version does NOT remove background noise '''
import os
import sys
import glob
import nipype.interfaces.io as nio
import nipype.pipeline.engine as pe
output = "mean_functional"
workflow = pe.Workflow(name='%s_workflow' % output)
current_dir = os.getcwd()
workflow_dir = os.path.join(current_dir, output)
workflow.base_dir = workflow_dir
resource_pool = {}
config = {}
num_cores_per_subject = 1
resource_pool["func_motion_correct"] = func_motion_correct
workflow, resource_pool = \
mean_functional_workflow(workflow, resource_pool, config)
ds = pe.Node(nio.DataSink(), name='datasink_%s' % output)
ds.inputs.base_directory = workflow_dir
node, out_file = resource_pool[output]
workflow.connect(node, out_file, ds, output)
if run == True:
workflow.run(plugin='MultiProc', plugin_args= \
{'n_procs': num_cores_per_subject})
outpath = glob.glob(os.path.join(workflow_dir, "mean_functional", \
"*"))[0]
return outpath
else:
return workflow, workflow.base_dir
| bsd-3-clause | -8,111,842,691,503,999,000 | 26.680934 | 104 | 0.599733 | false |
balloob/home-assistant | homeassistant/components/lutron/cover.py | 6 | 2050 | """Support for Lutron shades."""
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron shades."""
devs = []
for (area_name, device) in hass.data[LUTRON_DEVICES]["cover"]:
dev = LutronCover(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
add_entities(devs, True)
return True
class LutronCover(LutronDevice, CoverEntity):
"""Representation of a Lutron shade."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._lutron_device.last_level() < 1
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._lutron_device.last_level()
def close_cover(self, **kwargs):
"""Close the cover."""
self._lutron_device.level = 0
def open_cover(self, **kwargs):
"""Open the cover."""
self._lutron_device.level = 100
def set_cover_position(self, **kwargs):
"""Move the shade to a specific position."""
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
self._lutron_device.level = position
def update(self):
"""Call when forcing a refresh of the device."""
# Reading the property (rather than last_level()) fetches value
level = self._lutron_device.level
_LOGGER.debug("Lutron ID: %d updated to %f", self._lutron_device.id, level)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"Lutron Integration ID": self._lutron_device.id}
| apache-2.0 | -1,934,956,601,237,655,600 | 28.710145 | 83 | 0.636585 | false |
jruiperezv/ANALYSE | common/djangoapps/embargo/tests/test_middleware.py | 12 | 15449 | """
Tests for EmbargoMiddleware
"""
import mock
import pygeoip
import unittest
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db import connection, transaction
from django.test.utils import override_settings
import ddt
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase, mixed_store_config
)
# Explicitly import the cache from ConfigurationModel so we can reset it after each test
from config_models.models import cache
from embargo.models import EmbargoedCourse, EmbargoedState, IPFilter
# Since we don't need any XML course fixtures, use a modulestore configuration
# that disables the XML modulestore.
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@ddt.ddt
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EmbargoMiddlewareTests(ModuleStoreTestCase):
"""
Tests of EmbargoMiddleware
"""
def setUp(self):
self.user = UserFactory(username='fred', password='secret')
self.client.login(username='fred', password='secret')
self.embargo_course = CourseFactory.create()
self.embargo_course.save()
self.regular_course = CourseFactory.create(org="Regular")
self.regular_course.save()
self.embargoed_page = '/courses/' + self.embargo_course.id.to_deprecated_string() + '/info'
self.regular_page = '/courses/' + self.regular_course.id.to_deprecated_string() + '/info'
EmbargoedCourse(course_id=self.embargo_course.id, embargoed=True).save()
EmbargoedState(
embargoed_countries="cu, ir, Sy, SD",
changed_by=self.user,
enabled=True
).save()
CourseEnrollment.enroll(self.user, self.regular_course.id)
CourseEnrollment.enroll(self.user, self.embargo_course.id)
# Text from lms/templates/static_templates/embargo.html
self.embargo_text = "Unfortunately, at this time edX must comply with export controls, and we cannot allow you to access this particular course."
self.patcher = mock.patch.object(pygeoip.GeoIP, 'country_code_by_addr', self.mock_country_code_by_addr)
self.patcher.start()
def tearDown(self):
# Explicitly clear ConfigurationModel's cache so tests have a clear cache
# and don't interfere with each other
cache.clear()
self.patcher.stop()
def mock_country_code_by_addr(self, ip_addr):
"""
Gives us a fake set of IPs
"""
ip_dict = {
'1.0.0.0': 'CU',
'2.0.0.0': 'IR',
'3.0.0.0': 'SY',
'4.0.0.0': 'SD',
'5.0.0.0': 'AQ', # Antartica
'2001:250::': 'CN',
'2001:1340::': 'CU',
}
return ip_dict.get(ip_addr, 'US')
def test_countries(self):
# Accessing an embargoed page from a blocked IP should cause a redirect
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 302)
# Following the redirect should give us the embargo page
response = self.client.get(
self.embargoed_page,
HTTP_X_FORWARDED_FOR='1.0.0.0',
REMOTE_ADDR='1.0.0.0',
follow=True
)
self.assertIn(self.embargo_text, response.content)
# Accessing a regular page from a blocked IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing an embargoed page from a non-embargoed IP should succeed
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular page from a non-embargoed IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
def test_countries_ipv6(self):
# Accessing an embargoed page from a blocked IP should cause a redirect
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='2001:1340::', REMOTE_ADDR='2001:1340::')
self.assertEqual(response.status_code, 302)
# Following the redirect should give us the embargo page
response = self.client.get(
self.embargoed_page,
HTTP_X_FORWARDED_FOR='2001:1340::',
REMOTE_ADDR='2001:1340::',
follow=True
)
self.assertIn(self.embargo_text, response.content)
# Accessing a regular page from a blocked IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='2001:1340::', REMOTE_ADDR='2001:1340::')
self.assertEqual(response.status_code, 200)
# Accessing an embargoed page from a non-embargoed IP should succeed
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='2001:250::', REMOTE_ADDR='2001:250::')
self.assertEqual(response.status_code, 200)
# Accessing a regular page from a non-embargoed IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='2001:250::', REMOTE_ADDR='2001:250::')
self.assertEqual(response.status_code, 200)
def test_ip_exceptions(self):
# Explicitly whitelist/blacklist some IPs
IPFilter(
whitelist='1.0.0.0',
blacklist='5.0.0.0',
changed_by=self.user,
enabled=True
).save()
# Accessing an embargoed page from a blocked IP that's been whitelisted
# should succeed
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular course from a blocked IP that's been whitelisted should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing an embargoed course from non-embargoed IP that's been blacklisted
# should cause a redirect
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 302)
# Following the redirect should give us the embargo page
response = self.client.get(
self.embargoed_page,
HTTP_X_FORWARDED_FOR='5.0.0.0',
REMOTE_ADDR='1.0.0.0',
follow=True
)
self.assertIn(self.embargo_text, response.content)
# Accessing a regular course from a non-embargoed IP that's been blacklisted should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
def test_ip_network_exceptions(self):
# Explicitly whitelist/blacklist some IP networks
IPFilter(
whitelist='1.0.0.1/24',
blacklist='5.0.0.0/16,1.1.0.0/24',
changed_by=self.user,
enabled=True
).save()
# Accessing an embargoed page from a blocked IP that's been whitelisted with a network
# should succeed
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular course from a blocked IP that's been whitelisted with a network
# should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing an embargoed course from non-embargoed IP that's been blacklisted with a network
# should cause a redirect
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='5.0.0.100', REMOTE_ADDR='5.0.0.100')
self.assertEqual(response.status_code, 302)
# Following the redirect should give us the embargo page
response = self.client.get(
self.embargoed_page,
HTTP_X_FORWARDED_FOR='5.0.0.100',
REMOTE_ADDR='5.0.0.100',
follow=True
)
self.assertIn(self.embargo_text, response.content)
# Accessing an embargoed course from non-embargoed IP that's been blaclisted with a network
# should cause a redirect
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.1.0.1', REMOTE_ADDR='1.1.0.1')
self.assertEqual(response.status_code, 302)
# Following the redirect should give us the embargo page
response = self.client.get(
self.embargoed_page,
HTTP_X_FORWARDED_FOR='1.1.0.0',
REMOTE_ADDR='1.1.0.0',
follow=True
)
self.assertIn(self.embargo_text, response.content)
# Accessing an embargoed from a blocked IP that's not blacklisted by the network rule.
# should succeed
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.1.1.0', REMOTE_ADDR='1.1.1.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular course from a non-embargoed IP that's been blacklisted
# should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
@ddt.data(
(None, False),
("", False),
("us", False),
("CU", True),
("Ir", True),
("sy", True),
("sd", True)
)
@ddt.unpack
def test_embargo_profile_country(self, profile_country, is_embargoed):
# Set the country in the user's profile
profile = self.user.profile
profile.country = profile_country
profile.save()
# Attempt to access an embargoed course
response = self.client.get(self.embargoed_page)
# If the user is from an embargoed country, verify that
# they are redirected to the embargo page.
if is_embargoed:
embargo_url = reverse('embargo')
self.assertRedirects(response, embargo_url)
# Otherwise, verify that the student can access the page
else:
self.assertEqual(response.status_code, 200)
# For non-embargoed courses, the student should be able to access
# the page, even if he/she is from an embargoed country.
response = self.client.get(self.regular_page)
self.assertEqual(response.status_code, 200)
def test_embargo_profile_country_cache(self):
# Set the country in the user's profile
profile = self.user.profile
profile.country = "us"
profile.save()
# Warm the cache
with self.assertNumQueries(16):
self.client.get(self.embargoed_page)
# Access the page multiple times, but expect that we hit
# the database to check the user's profile only once
with self.assertNumQueries(10):
self.client.get(self.embargoed_page)
def test_embargo_profile_country_db_null(self):
# Django country fields treat NULL values inconsistently.
# When saving a profile with country set to None, Django saves an empty string to the database.
# However, when the country field loads a NULL value from the database, it sets
# `country.code` to `None`. This caused a bug in which country values created by
# the original South schema migration -- which defaulted to NULL -- caused a runtime
# exception when the embargo middleware treated the value as a string.
# In order to simulate this behavior, we can't simply set `profile.country = None`.
# (because when we save it, it will set the database field to an empty string instead of NULL)
query = "UPDATE auth_userprofile SET country = NULL WHERE id = %s"
connection.cursor().execute(query, [str(self.user.profile.id)])
transaction.commit_unless_managed()
# Attempt to access an embargoed course
# Verify that the student can access the page without an error
response = self.client.get(self.embargoed_page)
self.assertEqual(response.status_code, 200)
@mock.patch.dict(settings.FEATURES, {'EMBARGO': False})
def test_countries_embargo_off(self):
# When the middleware is turned off, all requests should go through
# Accessing an embargoed page from a blocked IP OK
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular page from a blocked IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 200)
# Explicitly whitelist/blacklist some IPs
IPFilter(
whitelist='1.0.0.0',
blacklist='5.0.0.0',
changed_by=self.user,
enabled=True
).save()
# Accessing an embargoed course from non-embargoed IP that's been blacklisted
# should be OK
response = self.client.get(self.embargoed_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
# Accessing a regular course from a non-embargoed IP that's been blacklisted should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
@mock.patch.dict(settings.FEATURES, {'EMBARGO': False, 'SITE_EMBARGOED': True})
def test_embargo_off_embargo_site_on(self):
# When the middleware is turned on with SITE, main site access should be restricted
# Accessing a regular page from a blocked IP is denied.
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 403)
# Accessing a regular page from a non blocked IP should succeed
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='5.0.0.0', REMOTE_ADDR='5.0.0.0')
self.assertEqual(response.status_code, 200)
@mock.patch.dict(settings.FEATURES, {'EMBARGO': False, 'SITE_EMBARGOED': True})
@override_settings(EMBARGO_SITE_REDIRECT_URL='https://www.edx.org/')
def test_embargo_off_embargo_site_on_with_redirect_url(self):
# When the middleware is turned on with SITE_EMBARGOED, main site access
# should be restricted. Accessing a regular page from a blocked IP is
# denied, and redirected to EMBARGO_SITE_REDIRECT_URL rather than returning a 403.
response = self.client.get(self.regular_page, HTTP_X_FORWARDED_FOR='1.0.0.0', REMOTE_ADDR='1.0.0.0')
self.assertEqual(response.status_code, 302)
| agpl-3.0 | 6,012,280,184,676,277,000 | 44.979167 | 153 | 0.653246 | false |
paulocsanz/graphs.c | tsp/unused/tsp.py | 1 | 2205 | from itertools import combinations
from math import sqrt
from sys import float_info
from time import time
class Graph:
def __init__(self, vertices):
self.vertex_map = {}
self.vertex_list = []
self.vertices = vertices
self.path = []
def distance(self, first, second):
if isinstance(first, int):
first = self.vertex_list[first]
if isinstance(second, int):
second = self.vertex_list[second]
return sqrt(((first[0] - second[0])**2) + ((first[1] - second[1])**2))
def from_file(path):
with open(path, "r") as f:
for line in f:
graph = Graph(int(line))
break
for line in f:
node = (lambda x, y: (int(x), int(y)))(*line.split(" "))
graph.vertex_map[node] = len(graph.vertex_map)
graph.vertex_list += [node]
return graph
def B(start, subset, end, graph, path):
if subset == set():
return graph.distance(start, end), path + [graph.vertex_list[end]]
else:
min_dist = float_info.max
final_path = path
for el in subset:
dist, _path = B(start, subset.difference({el}), el, graph, path)
dist += graph.distance(el, end)
_path += [graph.vertex_list[end]]
if dist < min_dist:
min_dist = dist
final_path = _path
return min_dist, final_path
def held_karp(graph):
start = 0
C = set(range(graph.vertices))
min_dist = float_info.max
for t in C.difference({start}):
dist, path = B(start, C.difference({start, t}), t, graph, [])
dist += graph.distance(t, start)
if dist < min_dist:
min_dist = dist
graph.path = path
graph.path = [graph.vertex_list[start]] + graph.path
return min_dist
def main():
for i in [5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 7500]:
g = from_file("points-{}.txt".format(i))
start_time = time()
size = held_karp(g)
execution_time = time() - start_time
print(execution_time)
print(*list(map(lambda x: str(g.vertex_map[x]+1), g.path)))
if __name__ == "__main__":
main()
| agpl-3.0 | -991,842,888,075,741,600 | 30.056338 | 78 | 0.545578 | false |
google-code-export/photivo | mm extern photivo.py | 7 | 4118 | #!/usr/bin/env python
'''
mm extern photivo.py
Passes an image to Photivo
Author:
Michael Munzert (mike photivo org)
Bernd Schoeler (brother.john photivo org)
Version:
2011.08.28 Brother John: Adjust Photivo cli
2011.01.29 Brother John: Ask user for photivo.exe and store in gimprc
2011.01.27 Brother John: Fixed failing execution of Photivo on Windows.
2011.01.02 mike: Initial version.
modelled after the trace plugin (lloyd konneker, lkk, bootch at nc.rr.com)
License:
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
The GNU Public License is available at
http://www.gnu.org/copyleft/gpl.html
'''
from gimpfu import *
from platform import system
import os
import subprocess
import Tkinter, tkFileDialog
def plugin_main(image, drawable, visible):
# Copy so the save operations doesn't affect the original
tempimage = pdb.gimp_image_duplicate(image)
if not tempimage:
raise RuntimeError
# Use temp file names from gimp, it reflects the user's choices in gimp.rc
tempfilename = pdb.gimp_temp_name("tif")
if visible == 0:
# Save in temporary. Note: empty user entered file name
tempdrawable = pdb.gimp_image_get_active_drawable(tempimage)
else:
# Get the current visible
tempdrawable = pdb.gimp_layer_new_from_visible(image, tempimage, "visible")
# !!! Note no run-mode first parameter, and user entered filename is empty string
pdb.gimp_progress_set_text ("Saving a copy")
pdb.gimp_file_save(tempimage, tempdrawable, tempfilename, "")
# cleanup
gimp.delete(tempimage) # delete the temporary image
# Platform dependent full command string for Photivo.
if system() == "Linux":
# We can assume Photivo can be called with a simple photivo.
command = 'photivo --load-and-delete "%s"' % (tempfilename)
elif system() == "Windows":
# There is no way to call Photivo without knowing exactly where it is installed.
# So we ask the user for the path to photivo.exe and store it in the user's gimprc.
cmdWindows = ""
try:
cmdWindows = pdb.gimp_gimprc_query("photivo-executable")
except RuntimeError: # Catch ExecutionError when the key is not found in gimprc
pass
if not os.path.exists(cmdWindows):
root = Tkinter.Tk()
root.withdraw() # Hide the Tkinter main window so only the file dialog shows
cmdWindows = tkFileDialog.askopenfilename(
parent = None,
title = "Where is photivo.exe located?",
filetypes = [('photivo.exe','photivo.exe')],
initialdir = "C:\\"
)
dummy = pdb.gimp_gimprc_set("photivo-executable", cmdWindows)
command = '"%s" --load-and-delete "%s"' % (cmdWindows, tempfilename)
# Invoke Photivo.
pdb.gimp_progress_set_text(command)
pdb.gimp_progress_pulse()
if system() == "Windows":
child = subprocess.Popen(command)
elif system() == "Linux":
child = subprocess.Popen(command, shell = True)
register(
"python_fu_mm_extern_photivo",
"Pass the image to Photivo.",
"Pass the image to Photivo.",
"Michael Munzert (mike photivo org)",
"Copyright 2011 Michael Munzert",
"2011",
"<Image>/Filters/MM-Filters/_Export to Photivo ...",
"*",
[ (PF_RADIO, "visible", "Layer:", 1, (("new from visible", 1),("current layer",0)))
],
[],
plugin_main,
)
main()
| gpl-3.0 | 8,150,004,020,216,476,000 | 33.196581 | 94 | 0.635503 | false |
dorant/home-assistant | homeassistant/components/device_tracker/nmap_tracker.py | 6 | 4567 | """
homeassistant.components.device_tracker.nmap
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a network with nmap.
Configuration:
To use the nmap tracker you will need to add something like the following
to your configuration.yaml file.
device_tracker:
platform: nmap_tracker
hosts: 192.168.1.1/24
Variables:
hosts
*Required
The IP addresses to scan in the network-prefix notation (192.168.1.1/24) or
the range notation (192.168.1.1-255).
home_interval
*Optional
Number of minutes it will not scan devices that it found in previous results.
This is to save battery.
"""
import logging
from datetime import timedelta
from collections import namedtuple
import subprocess
import re
import homeassistant.util.dt as dt_util
from homeassistant.const import CONF_HOSTS
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle, convert
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
# interval in minutes to exclude devices from a scan while they are home
CONF_HOME_INTERVAL = "home_interval"
REQUIREMENTS = ['python-nmap==0.4.3']
def get_scanner(hass, config):
""" Validates config and returns a Nmap scanner. """
if not validate_config(config, {DOMAIN: [CONF_HOSTS]},
_LOGGER):
return None
scanner = NmapDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "name", "ip", "last_update"])
def _arp(ip_address):
""" Get the MAC address for a given IP. """
cmd = ['arp', '-n', ip_address]
arp = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, _ = arp.communicate()
match = re.search(r'(([0-9A-Fa-f]{1,2}\:){5}[0-9A-Fa-f]{1,2})', str(out))
if match:
return match.group(0)
_LOGGER.info("No MAC address found for %s", ip_address)
return None
class NmapDeviceScanner(object):
""" This class scans for devices using nmap. """
def __init__(self, config):
self.last_results = []
self.hosts = config[CONF_HOSTS]
minutes = convert(config.get(CONF_HOME_INTERVAL), int, 0)
self.home_interval = timedelta(minutes=minutes)
self.success_init = self._update_info()
_LOGGER.info("nmap scanner initialized")
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, mac):
""" Returns the name of the given device or None if we don't know. """
filter_named = [device.name for device in self.last_results
if device.mac == mac]
if filter_named:
return filter_named[0]
else:
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Scans the network for devices.
Returns boolean if scanning successful.
"""
_LOGGER.info("Scanning")
from nmap import PortScanner, PortScannerError
scanner = PortScanner()
options = "-F --host-timeout 5"
exclude_targets = set()
if self.home_interval:
now = dt_util.now()
for host in self.last_results:
if host.last_update + self.home_interval > now:
exclude_targets.add(host)
if len(exclude_targets) > 0:
target_list = [t.ip for t in exclude_targets]
options += " --exclude {}".format(",".join(target_list))
try:
result = scanner.scan(hosts=self.hosts, arguments=options)
except PortScannerError:
return False
now = dt_util.now()
self.last_results = []
for ipv4, info in result['scan'].items():
if info['status']['state'] != 'up':
continue
name = info['hostnames'][0] if info['hostnames'] else ipv4
# Mac address only returned if nmap ran as root
mac = info['addresses'].get('mac') or _arp(ipv4)
if mac is None:
continue
device = Device(mac.upper(), name, ipv4, now)
self.last_results.append(device)
self.last_results.extend(exclude_targets)
_LOGGER.info("nmap scan successful")
return True
| mit | -638,606,654,089,353,200 | 29.446667 | 78 | 0.622947 | false |
robbi/pyload | module/plugins/hoster/MegaCoNz.py | 5 | 15848 | # -*- coding: utf-8 -*-
import base64
import os
import random
import re
import struct
import Crypto.Cipher.AES
import Crypto.Util.Counter
from module.network.HTTPRequest import BadHeader
from ..internal.Hoster import Hoster
from ..internal.misc import decode, encode, exists, fsjoin, json
############################ General errors ###################################
# EINTERNAL (-1): An internal error has occurred. Please submit a bug report, detailing the exact circumstances in which this error occurred
# EARGS (-2): You have passed invalid arguments to this command
# EAGAIN (-3): (always at the request level) A temporary congestion or server malfunction prevented your request from being processed. No data was altered. Retry. Retries must be spaced with exponential backoff
# ERATELIMIT (-4): You have exceeded your command weight per time quota. Please wait a few seconds, then try again (this should never happen in sane real-life applications)
#
############################ Upload errors ####################################
# EFAILED (-5): The upload failed. Please restart it from scratch
# ETOOMANY (-6): Too many concurrent IP addresses are accessing this upload target URL
# ERANGE (-7): The upload file packet is out of range or not starting and ending on a chunk boundary
# EEXPIRED (-8): The upload target URL you are trying to access has expired. Please request a fresh one
#
############################ Stream/System errors #############################
# ENOENT (-9): Object (typically, node or user) not found
# ECIRCULAR (-10): Circular linkage attempted
# EACCESS (-11): Access violation (e.g., trying to write to a read-only share)
# EEXIST (-12): Trying to create an object that already exists
# EINCOMPLETE (-13): Trying to access an incomplete resource
# EKEY (-14): A decryption operation failed (never returned by the API)
# ESID (-15): Invalid or expired user session, please relogin
# EBLOCKED (-16): User blocked
# EOVERQUOTA (-17): Request over quota
# ETEMPUNAVAIL (-18): Resource temporarily not available, please try again later
# ETOOMANYCONNECTIONS (-19): Too many connections on this resource
# EWRITE (-20): Write failed
# EREAD (-21): Read failed
# EAPPKEY (-22): Invalid application key; request not processed
# ESSL (-23): SSL verification failed
class MegaCrypto(object):
@staticmethod
def base64_decode(data):
#: Add padding, we need a string with a length multiple of 4
data += '=' * (-len(data) % 4)
return base64.b64decode(str(data), "-_")
@staticmethod
def base64_encode(data):
return base64.b64encode(data, "-_")
@staticmethod
def a32_to_str(a):
return struct.pack(">%dI" % len(a), *a) #: big-endian, unsigned int
@staticmethod
def str_to_a32(s):
# Add padding, we need a string with a length multiple of 4
s += '\0' * (-len(s) % 4)
#: big-endian, unsigned int
return struct.unpack(">%dI" % (len(s) / 4), s)
@staticmethod
def a32_to_base64(a):
return MegaCrypto.base64_encode(MegaCrypto.a32_to_str(a))
@staticmethod
def base64_to_a32(s):
return MegaCrypto.str_to_a32(MegaCrypto.base64_decode(s))
@staticmethod
def cbc_decrypt(data, key):
cbc = Crypto.Cipher.AES.new(MegaCrypto.a32_to_str(key), Crypto.Cipher.AES.MODE_CBC, "\0" * 16)
return cbc.decrypt(data)
@staticmethod
def cbc_encrypt(data, key):
cbc = Crypto.Cipher.AES.new(MegaCrypto.a32_to_str(key), Crypto.Cipher.AES.MODE_CBC, "\0" * 16)
return cbc.encrypt(data)
@staticmethod
def get_cipher_key(key):
"""
Construct the cipher key from the given data
"""
k = (key[0] ^ key[4],
key[1] ^ key[5],
key[2] ^ key[6],
key[3] ^ key[7])
iv = key[4:6] + (0, 0)
meta_mac = key[6:8]
return k, iv, meta_mac
@staticmethod
def decrypt_attr(data, key):
"""
Decrypt an encrypted attribute (usually 'a' or 'at' member of a node)
"""
data = MegaCrypto.base64_decode(data)
k, iv, meta_mac = MegaCrypto.get_cipher_key(key)
attr = MegaCrypto.cbc_decrypt(data, k)
#: Data is padded, 0-bytes must be stripped
return json.loads(re.search(r'{.+}', attr).group(0)) if attr[:6] == 'MEGA{"' else False
@staticmethod
def decrypt_key(data, key):
"""
Decrypt an encrypted key ('k' member of a node)
"""
data = MegaCrypto.base64_decode(data)
return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))
for _i in range(0, len(data), 16)), ())
@staticmethod
def encrypt_key(data, key):
"""
Encrypt a decrypted key
"""
data = MegaCrypto.base64_decode(data)
return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))
for _i in range(0, len(data), 16)), ())
@staticmethod
def get_chunks(size):
"""
Calculate chunks for a given encrypted file size
"""
chunk_start = 0
chunk_size = 0x20000
while chunk_start + chunk_size < size:
yield (chunk_start, chunk_size)
chunk_start += chunk_size
if chunk_size < 0x100000:
chunk_size += 0x20000
if chunk_start < size:
yield (chunk_start, size - chunk_start)
class Checksum(object):
"""
interface for checking CBC-MAC checksum
"""
def __init__(self, key):
k, iv, meta_mac = MegaCrypto.get_cipher_key(key)
self.hash = '\0' * 16
self.key = MegaCrypto.a32_to_str(k)
self.iv = MegaCrypto.a32_to_str(iv[0:2] * 2)
self.AES = Crypto.Cipher.AES.new(self.key, mode=Crypto.Cipher.AES.MODE_CBC, IV=self.hash)
def update(self, chunk):
cbc = Crypto.Cipher.AES.new(self.key, mode=Crypto.Cipher.AES.MODE_CBC, IV=self.iv)
for j in range(0, len(chunk), 16):
block = chunk[j:j + 16].ljust(16, '\0')
hash = cbc.encrypt(block)
self.hash = self.AES.encrypt(hash)
def digest(self):
"""
Return the **binary** (non-printable) CBC-MAC of the message that has been authenticated so far.
"""
d = MegaCrypto.str_to_a32(self.hash)
return (d[0] ^ d[1], d[2] ^ d[3])
def hexdigest(self):
"""
Return the **printable** CBC-MAC of the message that has been authenticated so far.
"""
return "".join("%02x" % ord(x)
for x in MegaCrypto.a32_to_str(self.digest()))
@staticmethod
def new(key):
return MegaCrypto.Checksum(key)
class MegaClient(object):
API_URL = "https://eu.api.mega.co.nz/cs"
def __init__(self, plugin, node_id):
self.plugin = plugin
self.node_id = node_id
def api_response(self, **kwargs):
"""
Dispatch a call to the api, see https://mega.co.nz/#developers
"""
uid = random.randint(10 << 9, 10 ** 10) #: Generate a session id, no idea where to obtain elsewhere
get_params = {'id': uid}
if self.node_id:
get_params['n'] = self.node_id
if hasattr(self.plugin, 'account'):
if self.plugin.account:
mega_session_id = self.plugin.account.info[
'data'].get('mega_session_id', None)
else:
mega_session_id = None
else:
mega_session_id = self.plugin.info[
'data'].get('mega_session_id', None)
if mega_session_id:
get_params['sid'] = mega_session_id
try:
res = self.plugin.load(self.API_URL,
get=get_params,
post=json.dumps([kwargs]))
except BadHeader, e:
if e.code == 500:
self.plugin.retry(wait_time=60, reason=_("Server busy"))
else:
raise
self.plugin.log_debug("Api Response: " + res)
res = json.loads(res)
if isinstance(res, list):
res = res[0]
return res
def check_error(self, code):
ecode = abs(code)
if ecode in (9, 16, 21):
self.plugin.offline()
elif ecode in (3, 13, 17, 18, 19):
self.plugin.temp_offline()
elif ecode in (1, 4, 6, 10, 15, 21):
self.plugin.retry(max_tries=5, wait_time=30, reason=_("Error code: [%s]") % -ecode)
else:
self.plugin.fail(_("Error code: [%s]") % -ecode)
class MegaCoNz(Hoster):
__name__ = "MegaCoNz"
__type__ = "hoster"
__version__ = "0.52"
__status__ = "testing"
__pattern__ = r'(https?://(?:www\.)?mega(\.co)?\.nz/|mega:|chrome:.+?)#(?P<TYPE>N|)!(?P<ID>[\w^_]+)!(?P<KEY>[\w\-,=]+)(?:###n=(?P<OWNER>[\w^_]+))?'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Mega.co.nz hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]"),
("Walter Purcaro", "[email protected]"),
("GammaC0de", "nitzo2001[AT}yahoo[DOT]com")]
FILE_SUFFIX = ".crypted"
def decrypt_file(self, key):
"""
Decrypts and verifies checksum to the file at 'last_download'
"""
k, iv, meta_mac = MegaCrypto.get_cipher_key(key)
ctr = Crypto.Util.Counter.new(128, initial_value=((iv[0] << 32) + iv[1]) << 64)
cipher = Crypto.Cipher.AES.new(MegaCrypto.a32_to_str(k), Crypto.Cipher.AES.MODE_CTR, counter=ctr)
self.pyfile.setStatus("decrypting")
self.pyfile.setProgress(0)
file_crypted = encode(self.last_download)
file_decrypted = file_crypted.rsplit(self.FILE_SUFFIX)[0]
try:
f = open(file_crypted, "rb")
df = open(file_decrypted, "wb")
except IOError, e:
self.fail(e.message)
encrypted_size = os.path.getsize(file_crypted)
checksum_activated = self.config.get("activated", default=False, plugin="Checksum")
check_checksum = self.config.get("check_checksum", default=True, plugin="Checksum")
cbc_mac = MegaCrypto.Checksum(key) if checksum_activated and check_checksum else None
progress = 0
for chunk_start, chunk_size in MegaCrypto.get_chunks(encrypted_size):
buf = f.read(chunk_size)
if not buf:
break
chunk = cipher.decrypt(buf)
df.write(chunk)
progress += chunk_size
self.pyfile.setProgress(int((100.0 / encrypted_size) * progress))
if checksum_activated and check_checksum:
cbc_mac.update(chunk)
self.pyfile.setProgress(100)
f.close()
df.close()
self.log_info(_("File decrypted"))
os.remove(file_crypted)
if checksum_activated and check_checksum:
file_mac = cbc_mac.digest()
if file_mac == meta_mac:
self.log_info(_('File integrity of "%s" verified by CBC-MAC checksum (%s)') %
(self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], meta_mac))
else:
self.log_warning(_('CBC-MAC checksum for file "%s" does not match (%s != %s)') %
(self.pyfile.name.rsplit(self.FILE_SUFFIX)[0], file_mac, meta_mac))
self.checksum_failed(file_decrypted, _("Checksums do not match"))
self.last_download = decode(file_decrypted)
def checksum_failed(self, local_file, msg):
check_action = self.config.get("check_action", default="retry", plugin="Checksum")
if check_action == "retry":
max_tries = self.config.get("max_tries", default=2, plugin="Checksum")
retry_action = self.config.get("retry_action", default="fail", plugin="Checksum")
if all(_r < max_tries for _id, _r in self.retries.items()):
os.remove(local_file)
wait_time = self.config.get("wait_time", default=1, plugin="Checksum")
self.retry(max_tries, wait_time, msg)
elif retry_action == "nothing":
return
elif check_action == "nothing":
return
os.remove(local_file)
self.fail(msg)
def check_exists(self, name):
"""
Because of Mega downloads a temporary encrypted file with the extension of '.crypted',
pyLoad cannot correctly detect if the file exists before downloading.
This function corrects this.
Raises Skip() if file exists and 'skip_existing' configuration option is set to True.
"""
if self.pyload.config.get("download", "skip_existing"):
download_folder = self.pyload.config.get('general', 'download_folder')
dest_file = fsjoin(download_folder,
self.pyfile.package().folder if self.pyload.config.get("general", "folder_per_package") else "",
name)
if exists(dest_file):
self.pyfile.name = name
self.skip(_("File exists."))
def process(self, pyfile):
id = self.info['pattern']['ID']
key = self.info['pattern']['KEY']
public = self.info['pattern']['TYPE'] == ""
owner = self.info['pattern']['OWNER']
if not public and not owner:
self.log_error(_("Missing owner in URL"))
self.fail(_("Missing owner in URL"))
self.log_debug("ID: %s" % id,
_("Key: %s") % key,
_("Type: %s") % ("public" if public else "node"),
_("Owner: %s") % owner)
key = MegaCrypto.base64_to_a32(key)
if len(key) != 8:
self.log_error(_("Invalid key length"))
self.fail(_("Invalid key length"))
mega = MegaClient(self, self.info['pattern'][
'OWNER'] or self.info['pattern']['ID'])
#: G is for requesting a download url
#: This is similar to the calls in the mega js app, documentation is very bad
if public:
res = mega.api_response(a="g", g=1, p=id, ssl=1)
else:
res = mega.api_response(a="g", g=1, n=id, ssl=1)
if isinstance(res, int):
mega.check_error(res)
elif isinstance(res, dict) and 'e' in res:
mega.check_error(res['e'])
attr = MegaCrypto.decrypt_attr(res['at'], key)
if not attr:
self.fail(_("Decryption failed"))
self.log_debug("Decrypted Attr: %s" % decode(attr))
name = attr['n']
self.check_exists(name)
pyfile.name = name + self.FILE_SUFFIX
pyfile.size = res['s']
time_left = res.get('tl', 0)
if time_left:
self.log_warning(_("Free download limit reached"))
self.retry(wait=time_left, msg=_("Free download limit reached"))
# self.req.http.c.setopt(pycurl.SSL_CIPHER_LIST, "RC4-MD5:DEFAULT")
try:
self.download(res['g'])
except BadHeader, e:
if e.code == 509:
self.fail(_("Bandwidth Limit Exceeded"))
else:
raise
self.decrypt_file(key)
#: Everything is finished and final name can be set
pyfile.name = name
| gpl-3.0 | -735,684,338,417,736,400 | 34.936508 | 224 | 0.549849 | false |
thast/EOSC513 | DC/SparseGN/Identity_withoutW/Id_withoutW.py | 1 | 7654 | from SimPEG import Mesh, Regularization, Maps, Utils, EM
from SimPEG.EM.Static import DC
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import copy
import pandas as pd
from scipy.sparse import csr_matrix, spdiags, dia_matrix,diags
from scipy.sparse.linalg import spsolve
from scipy.stats import norm,multivariate_normal
import sys
path ="../pymatsolver/"
path = "../../../Documents/pymatsolver/"
sys.path.append(path)
from pymatsolver import PardisoSolver
from scipy.interpolate import LinearNDInterpolator, interp1d
from sklearn.mixture import GaussianMixture
from SimPEG import DataMisfit, Regularization, Optimization, InvProblem, Directives, Inversion
#2D model
csx, csy, csz = 0.25,0.25,0.25
# Number of core cells in each directiPon s
ncx, ncz = 123,41
# Number of padding cells to add in each direction
npad = 12
# Vectors of cell lengthts in each direction
hx = [(csx,npad, -1.5),(csx,ncx),(csx,npad, 1.5)]
hz= [(csz,npad,-1.5),(csz,ncz)]
# Create mesh
mesh = Mesh.TensorMesh([hx, hz],x0="CN")
# Map mesh coordinates from local to UTM coordiantes
#mesh.x0[2] = mesh.x0[2]-mesh.vectorCCz[-npad-1]
mesh.x0[1] = mesh.x0[1]+csz/2.
#mesh.x0[0] = mesh.x0[0]+csx/2.
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#mesh.plotImage(np.ones(mesh.nC)*np.nan, grid=True)
#plt.gca().set_xlim([-20,20])
#plt.gca().set_ylim([-15,0])
#mesh.plotGrid()
#plt.gca().set_aspect('equal')
#plt.show()
print "Mesh Size: ", mesh.nC
#Model Creation
lnsig_air = 1e-8;
x0,z0, r0 = -6., -4., 3.
x1,z1, r1 = 6., -4., 3.
ln_sigback = -5.
ln_sigc = -3.
ln_sigr = -7.
noisemean = 0.
noisevar = 0.0
overburden_extent = 0.
ln_over = -4.
#m = (lnsig_background)*np.ones(mesh.nC);
#mu =np.ones(mesh.nC);
mtrue = ln_sigback*np.ones(mesh.nC) + norm(noisemean,noisevar).rvs(mesh.nC)
overb = (mesh.gridCC[:,1] >-overburden_extent) & (mesh.gridCC[:,1]<=0)
mtrue[overb] = ln_over*np.ones_like(mtrue[overb])+ norm(noisemean,noisevar).rvs(np.prod((mtrue[overb]).shape))
csph = (np.sqrt((mesh.gridCC[:,1]-z0)**2.+(mesh.gridCC[:,0]-x0)**2.))< r0
mtrue[csph] = ln_sigc*np.ones_like(mtrue[csph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[csph]).shape))
#Define the sphere limit
rsph = (np.sqrt((mesh.gridCC[:,1]-z1)**2.+(mesh.gridCC[:,0]-x1)**2.))< r1
mtrue[rsph] = ln_sigr*np.ones_like(mtrue[rsph]) + norm(noisemean,noisevar).rvs(np.prod((mtrue[rsph]).shape))
mtrue = Utils.mkvc(mtrue);
mesh.plotGrid()
plt.gca().set_xlim([-10,10])
plt.gca().set_ylim([-10,0])
xyzlim = np.r_[[[-10.,10.],[-10.,1.]]]
actind, meshCore = Utils.meshutils.ExtractCoreMesh(xyzlim,mesh)
plt.hist(mtrue[actind],bins =50,normed=True);
fig0 = plt.figure()
ax0 = fig0.add_subplot(111)
mm = meshCore.plotImage(mtrue[actind],ax = ax0)
plt.colorbar(mm[0])
ax0.set_aspect("equal")
#plt.show()
def getCylinderPoints(xc,zc,r):
xLocOrig1 = np.arange(-r,r+r/10.,r/10.)
xLocOrig2 = np.arange(r,-r-r/10.,-r/10.)
# Top half of cylinder
zLoc1 = np.sqrt(-xLocOrig1**2.+r**2.)+zc
# Bottom half of cylinder
zLoc2 = -np.sqrt(-xLocOrig2**2.+r**2.)+zc
# Shift from x = 0 to xc
xLoc1 = xLocOrig1 + xc*np.ones_like(xLocOrig1)
xLoc2 = xLocOrig2 + xc*np.ones_like(xLocOrig2)
topHalf = np.vstack([xLoc1,zLoc1]).T
topHalf = topHalf[0:-1,:]
bottomHalf = np.vstack([xLoc2,zLoc2]).T
bottomHalf = bottomHalf[0:-1,:]
cylinderPoints = np.vstack([topHalf,bottomHalf])
cylinderPoints = np.vstack([cylinderPoints,topHalf[0,:]])
return cylinderPoints
cylinderPoints0 = getCylinderPoints(x0,z1,r0)
cylinderPoints1 = getCylinderPoints(x1,z1,r1)
#Gradient array 1 2D
srclist = []
nSrc = 23
lines = 1
ylines = np.r_[0.]
xlines = np.r_[0.]
z = 0.
#xline
for k in range(lines):
for i in range(nSrc):
if i<=11:
locA = np.r_[-14.+1., z]
locB = np.r_[-8.+2.*i-1., z]
#M = np.c_[np.arange(-12.,-12+2*(i+1),2),np.ones(i+1)*z]
#N = np.c_[np.arange(-10.,-10+2*(i+1),2),np.ones(i+1)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
#print -locA,-locB,"\n",[-M,-N],"\n"
else:
locA = np.r_[-14.+2*(i-11)+1., z]
locB = np.r_[14.-1.,z]
#M = np.c_[np.arange(locA[0]+1.,12.,2),np.ones(nSrc-i)*z]
#N = np.c_[np.arange(locA[0]+3.,14.,2),np.ones(nSrc-i)*z]
M = np.c_[np.arange(-12.,10+1,2),np.ones(12)*z]
N = np.c_[np.arange(-10.,12+1,2),np.ones(12)*z]
rx = DC.Rx.Dipole(M,N)
src= DC.Src.Dipole([rx],locA,locB)
srclist.append(src)
#print "line2",locA,locB,"\n",[M,N],"\n"
#rx = DC.Rx.Dipole(-M,-N)
#src= DC.Src.Dipole([rx],-locA,-locB)
#srclist.append(src)
mapping = Maps.ExpMap(mesh)
survey = DC.Survey(srclist)
problem = DC.Problem3D_CC(mesh, sigmaMap=mapping)
problem.pair(survey)
problem.Solver = PardisoSolver
dmis = DataMisfit.l2_DataMisfit(survey)
survey.dpred(mtrue)
survey.makeSyntheticData(mtrue,std=0.05,force=True)
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
print '# of data: ', survey.dobs.shape
import spgl1
#Parameter for SPGL1 iterations
nits = 10
mID = (-5.)*np.ones_like(mtrue)
it = 0
phi_d_normal = np.load('../../NormalInversion/NormalInversion/phid_normal.npy')
ratio = np.r_[6.5,phi_d_normal[0:-1]/phi_d_normal[1:]]
#ratio = 10.*np.ones(nits)
min_progress = 1.2
xlist = []
print 'ratio: ',ratio
#Parameters for W
#nsubSrc = 5
#InnerIt = 1
#dmisfitsub = []
#Initialize Random Source
#W = np.random.randn(survey.nSrc,nsubSrc)
#problem.unpair()
#roblem.pair(survey)
#Q = problem.getRHS()
#sub = problem.getRHS().dot(W)
#rx_r = SimultaneousRx(locs=P)
#srcList_r = []
#for isrc in range(sub.shape[1]):
# src_r = SimultaneousSrc([rx_r], Q=Q[:,isrc],W=W[:,isrc],QW =Q.dot(W)[:,isrc])
# srcList_r.append(src_r)
#survey_r = DC.Survey(srcList_r)
#problem.unpair()
#problem.pair(survey_r)
d = survey.dpred(mtrue)
survey.dobs = d
survey.std = np.ones_like(d)*0.05
survey.eps = 1e-5*np.linalg.norm(survey.dobs)
dmisfitall = []
dmisfitall.append(dmis.eval(mID)/survey.nD)
print "end iteration: ",it, '; Overall Normalized Misfit: ', dmis.eval(mID)/survey.nD
while (dmis.eval(mID)/survey.nD)>0.5 and it<nits:
def JS(x,mode):
if mode == 1:
return problem.Jvec(mID,x)
else:
return problem.Jtvec(mID,x)
b = survey.dpred(mID)-survey.dpred(mtrue)
opts = spgl1.spgSetParms({'iterations':100, 'verbosity':2})
sigtol = np.linalg.norm(b)/np.maximum(ratio[it],min_progress)
#tautol = 20000.
x,resid,grad,info = spgl1.spg_bpdn(JS, b, sigma = sigtol,options=opts)
#x,resid,grad,info = spgl1.spg_lasso(JS,b,tautol,opts)
#assert dmis.eval(mID) > dmis.eval(mID - x)
mID = mID - x
it +=1
print "end iteration: ",it, '; Normalized Misfit: ', dmis.eval(mID)/survey.nD
dmisfitall.append(dmis.eval(mID)/survey.nD)
xlist.append(x)
np.save('./dmisfitall.npy',dmisfitall)
np.save('./mfinal.npy',mID)
np.savez('./xlist.npz',xlist)
mm = mesh.plotImage(mID)
plt.colorbar(mm[0])
plt.gca().set_xlim([-10.,10.])
plt.gca().set_ylim([-10.,0.])
plt.plot(cylinderPoints0[:,0],cylinderPoints0[:,1], linestyle = 'dashed', color='k')
plt.plot(cylinderPoints1[:,0],cylinderPoints1[:,1], linestyle = 'dashed', color='k')
plt.show() | mit | -6,036,011,230,578,902,000 | 29.742972 | 110 | 0.629736 | false |
mezz64/home-assistant | tests/components/speedtestdotnet/test_init.py | 7 | 2310 | """Tests for SpeedTest integration."""
import speedtest
from homeassistant import config_entries
from homeassistant.components import speedtestdotnet
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_setup_with_config(hass):
"""Test that we import the config and setup the integration."""
config = {
speedtestdotnet.DOMAIN: {
speedtestdotnet.CONF_SERVER_ID: "1",
speedtestdotnet.CONF_MANUAL: True,
speedtestdotnet.CONF_SCAN_INTERVAL: "00:01:00",
}
}
with patch("speedtest.Speedtest"):
assert await async_setup_component(hass, speedtestdotnet.DOMAIN, config)
async def test_successful_config_entry(hass):
"""Test that SpeedTestDotNet is configured successfully."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"), patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert forward_entry_setup.mock_calls[0][1] == (
entry,
"sensor",
)
async def test_setup_failed(hass):
"""Test SpeedTestDotNet failed due to an error."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest", side_effect=speedtest.ConfigRetrievalError):
await hass.config_entries.async_setup(entry.entry_id)
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass):
"""Test removing SpeedTestDotNet."""
entry = MockConfigEntry(
domain=speedtestdotnet.DOMAIN,
data={},
)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest"):
await hass.config_entries.async_setup(entry.entry_id)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
assert speedtestdotnet.DOMAIN not in hass.data
| apache-2.0 | 4,729,427,388,357,643,000 | 28.615385 | 82 | 0.689177 | false |
erwilan/ansible | lib/ansible/modules/cloud/amazon/efs_facts.py | 31 | 11554 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: efs_facts
short_description: Get information about Amazon EFS file systems
description:
- Module searches Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
options:
name:
description:
- Creation Token of Amazon EFS file system.
required: false
default: None
id:
description:
- ID of Amazon EFS.
required: false
default: None
tags:
description:
- List of tags of Amazon EFS. Should be defined as dictionary
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- SubnetId - Mandatory. The ID of the subnet to add the mount target in.
- IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
- SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
required: false
default: None
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
# find all existing efs
- efs_facts:
register: result
- efs_facts:
name: myTestNameTag
- efs_facts:
id: fs-1234abcd
# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
- efs_facts:
tags:
name: myTestNameTag
targets:
- subnet-1a2b3c4d
- sg-4d3c2b1a
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: str
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: str
sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
file_system_id:
description: ID of the file system
returned: always
type: str
sample: fs-xxxxxxxx
life_cycle_state:
description: state of the EFS file system
returned: always
type: str
sample: creating, available, deleting, deleted
mount_point:
description: url of file system
returned: always
type: str
sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: str
sample: my-efs
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: str
sample: XXXXXXXXXXXX
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: str
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from collections import defaultdict
try:
from botocore.exceptions import ClientError
import boto3
HAS_BOTO3 = True
except ImportError as e:
HAS_BOTO3 = False
class EFSConnection(object):
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
except Exception as e:
module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
self.region = region
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = iterate_all(
'Tags',
self.connection.describe_tags,
**kwargs
)
return dict((tag['Key'], tag['Value']) for tag in tags)
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from boto result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
def prefix_to_attr(attr_id):
"""
Helper method to convert ID prefix to mount target attribute
"""
attr_by_prefix = {
'fsmt-': 'MountTargetId',
'subnet-': 'SubnetId',
'eni-': 'NetworkInterfaceId',
'sg-': 'SecurityGroups'
}
prefix = first_or_default(filter(
lambda pref: str(attr_id).startswith(pref),
attr_by_prefix.keys()
))
if prefix:
return attr_by_prefix[prefix]
return 'IpAddress'
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def has_tags(available, required):
"""
Helper method to determine if tag requested already exists
"""
for key, value in required.items():
if key not in available or value != available[key]:
return False
return True
def has_targets(available, required):
"""
Helper method to determine if mount tager requested already exists
"""
grouped = group_list_of_dict(available)
for (value, field) in required:
if field not in grouped or value not in grouped[field]:
return False
return True
def group_list_of_dict(array):
"""
Helper method to group list of dict to dict with all possible values
"""
result = defaultdict(list)
for item in array:
for key, value in item.items():
result[key] += value if isinstance(value, list) else [value]
return result
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
targets = module.params.get('targets')
file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
if tags:
file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
if targets:
targets = [(item, prefix_to_attr(item)) for item in targets]
file_systems_info = filter(lambda item:
has_targets(item['MountTargets'], targets), file_systems_info)
file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -5,584,986,281,401,199,000 | 29.405263 | 156 | 0.595638 | false |
Dramac/TriViSiJu | modules/__init__.py | 1 | 1074 | # To get sub-modules
""" TriViSiJu: Graphical interface for the AstroJeune Festival
Copyright (C) 2012 Jules DAVID, Tristan GREGOIRE, Simon NICOLAS and Vincent PRAT
This file is part of TriViSiJu.
TriViSiJu is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TriViSiJu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TriViSiJu. If not, see <http://www.gnu.org/licenses/>.
"""
from countdown import *
from movieplayer import *
from prompt import *
from teams import *
from scrolltext import *
from decrypt import *
from caract import *
from gsplayer import SongPlayer
from enigme import PopupWindow
| gpl-3.0 | 7,324,211,841,122,520,000 | 33.645161 | 82 | 0.758845 | false |
airportmarc/the416life | src/apps/users/forms.py | 1 | 3834 | """
This is to supply the new singup page over AllAuths default pages
"""
from django import forms
from django.forms.widgets import CheckboxInput
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Field, Button, Div
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
import logging
#from src.apps.Profile.models import Profile, Agent
from src.apps.program.models import ProgramUsers
class SignupForm(forms.Form):
first_name = forms.CharField(max_length=30)
last_name = forms.CharField(max_length=30)
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__( *args, **kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.field_class = ''
self.helper.layout = Layout(
Field('first_name', placeholder='First Name', autocomplete='off'),
Field('last_name', placeholder='Last Name', autocomplete='off'),
Field('email', placeholder='Email', autocomplete='off'),
Field('password1', placeholder='Password', autocomplete='off'),
Div(Submit('Register', 'Register', css_class='btn btn-primary block full-width m-b'), css_class='form-group'),
HTML('<p class="text-muted text-center"><small>Already have an account?</small></p>'),
Div(HTML('<a class ="btn btn-sm btn-white btn-block" href="' + reverse('account_login') + ' " > Login </a>'),css_class='form-group' )
)
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
class ClientSignupForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput())
def __init__(self, **kwargs):
super(ClientSignupForm, self).__init__(**kwargs)
self.helper = FormHelper()
self.helper.form_show_labels = False
self.helper.field_class = ''
self.helper.layout = Layout(
Field('email', placeholder='Email', readonly=True, autocomplete='off'),
Field('password', placeholder='Password', autocomplete='off'),
Submit('Register', 'Register', css_class='btn btn-primary m-b')
)
#
# class ContactDetailsForm(forms.ModelForm):
#
#
# def __init__(self, *args, **kwargs):
# super(ContactDetailsForm, self).__init__(*args, **kwargs)
#
# self.helper = FormHelper(self)
# self.helper.form_class = 'form-inline'
# self.helper.form_id = 'addNewAddress'
# self.helper.disable_csrf = False
# self.helper.label_class = 'col-sm-4'
# self.helper.field_class = 'col-sm-4'
# self.helper.form_tag = False
# self.helper.layout = Layout(
# HTML("<div class='hidden' id='addressDate'>"),
# 'is_current',
# Field('start_date', data_provider='datepicker', css_class='datepicker'),
# Field('end_date', data_provider='datepicker', css_class='datepicker'),
# Field('address_1', data_geo='route'),
# Field('country', data_geo='country'),
# Field('city', data_geo='locality'),
# Field('postal_code', data_geo='postal_code'),
# Field('state', data_geo='administrative_area_level_1'),
# HTML("</div>")
#
# )
#
# class Meta:
# model = ContactDetails
# fields = ['address_1', 'country', 'state', 'city', 'postal_code',
# 'is_current', 'start_date', 'end_date']
# widgets = {
# 'is_current': CheckboxInput()
# }
class ProgramSignUp(forms.ModelForm):
class Meta:
model = ProgramUsers
fields = ['program']
| mit | 7,812,487,095,638,604,000 | 37.727273 | 145 | 0.60746 | false |
JGulbronson/rmc | kittens/add_from_flickr.py | 8 | 3011 | #!/usr/bin/env python
from StringIO import StringIO
import os
import re
import sys
import requests
from PIL import Image
import rmc.kittens.data as kitten_data
import rmc.shared.constants as c
import rmc.shared.secrets as s
def get_photo_info_from_flickr(photo_id):
print >>sys.stderr, 'Getting information about photo id:', photo_id
url = ("http://api.flickr.com/services/rest/?method=flickr.photos.getInfo"
"&api_key=%s"
"&photo_id=%s"
"&format=json"
"&nojsoncallback=1") % (s.FLICKR_API_KEY, photo_id)
return requests.get(url).json["photo"]
COLOR_WIDTH = 150
COLOR_HEIGHT = 150
GREY_WIDTH = 50
GREY_HEIGHT = 50
BASE_OUTDIR = os.path.join(c.RMC_ROOT, 'server', 'static', 'img', 'kittens')
def download_photo(photo_info, index):
photo_url = ('http://farm%(farm_id)s.staticflickr.com/'
'%(server_id)s/%(photo_id)s_%(secret_id)s.jpg') % {
'farm_id': photo_info['farm'],
'server_id': photo_info['server'],
'photo_id': photo_info['id'],
'secret_id': photo_info['secret'],
}
print >>sys.stderr, 'Downloading', photo_url, '...'
photo_content = requests.get(photo_url).content
color_img = Image.open(StringIO(photo_content))
width, height = color_img.size
min_dim = min(width, height)
# Crop to square
crop_box = (
(width - min_dim) / 2,
(height - min_dim) / 2,
width - (width - min_dim) / 2,
height - (height - min_dim) / 2
)
color_img = color_img.crop(crop_box)
try:
grey_img = color_img.copy().convert('L')
except IOError:
print >>sys.stderr
print >>sys.stderr, 'WARNING! You might be missing libjpeg.'
print >>sys.stderr, 'On OSX: brew install libjpeg'
print >>sys.stderr, 'On Ubuntu: sudo apt-get install libjpeg-dev'
print >>sys.stderr, 'On Fedora: yum install libjpeg-devel'
print >>sys.stderr
print >>sys.stderr, 'After you have libjpeg installed:'
print >>sys.stderr, 'pip uninstall Pillow'
print >>sys.stderr, 'pip install -r requirements.txt'
print >>sys.stderr
raise
color_img.thumbnail((COLOR_WIDTH, COLOR_HEIGHT), Image.ANTIALIAS)
grey_img.thumbnail((GREY_WIDTH, GREY_HEIGHT), Image.ANTIALIAS)
color_img_path = os.path.join(BASE_OUTDIR, 'color', '%d.jpg' % index)
grey_img_path = os.path.join(BASE_OUTDIR, 'grey', '%d.jpg' % index)
color_img.save(color_img_path)
print >>sys.stderr, 'Saved', os.path.normpath(color_img_path)
grey_img.save(grey_img_path)
print >>sys.stderr, 'Saved', os.path.normpath(grey_img_path)
if __name__ == '__main__':
new_flickr_url = sys.argv[1]
new_photo_id = re.compile('\d+').findall(new_flickr_url)[-1]
new_photo_info = get_photo_info_from_flickr(new_photo_id)
index = kitten_data.add_kitten_data(new_photo_info)
download_photo(new_photo_info, index)
| mit | 546,238,919,285,766,660 | 31.728261 | 78 | 0.610096 | false |
reimandlab/Visualistion-Framework-for-Genome-Mutations | website/tests/test_imports/test_kinase.py | 2 | 2076 | from imports.protein_data import kinase_mappings as load_kinase_mappings
from database_testing import DatabaseTest
from miscellaneous import make_named_temp_file
from database import db
from models import Kinase, Protein, Gene
# head data/curated_kinase_IDs.txt
curated_kinases_list = """\
LCK LCK
SRC SRC
FYN FYN
ABL ABL1
CDK2 CDK2
CHK1 CHEK1
CDK1 CDK1
PDK-1 PDK1
"""
class TestImport(DatabaseTest):
def test_mappings(self):
genes = {}
for i, gene_name in enumerate(['LCK', 'SRC', 'FYN', 'PDK', 'PDK1']):
gene = Gene(name=gene_name, preferred_isoform=Protein(refseq='NM_000%s' % i))
genes[gene_name] = gene
db.session.add_all(genes.values())
# create a single pre-defined kinase with wrongly assigned protein (PDK for PDK-1)
pdk_1 = Kinase(name='PDK-1', protein=genes['PDK'].preferred_isoform)
# a kinase without protein assigned
fyn = Kinase(name='FYN')
db.session.add_all([pdk_1, fyn])
filename = make_named_temp_file(curated_kinases_list)
with self.app.app_context():
new_kinases = load_kinase_mappings(filename)
# new kinases shall be created only for the 5 gene which are
# already present in the database, but out of these 5 genes
# only 4 occur in the curated list of kinase-gene mappings;
# moreover two of these 4 kinases already exists (PDK-1, FYN)
assert len(new_kinases) == 2
db.session.add_all(new_kinases)
# test protein assignment
lck = Kinase.query.filter_by(name='LCK').one()
cases = {
# was the kinase created and a correct protein assigned?
lck: 'LCK',
# was the protein assigned to existing kinase
fyn: 'FYN',
# was the protein of PDK-1 re-assigned to PDK1?
pdk_1: 'PDK1'
}
for kinase, gene_name in cases.items():
assert kinase.protein == genes[gene_name].preferred_isoform
| lgpl-2.1 | -5,855,577,757,118,663,000 | 30.454545 | 90 | 0.610308 | false |
Joergen/zamboni | settings_test.py | 1 | 3729 | import atexit
import tempfile
from django.utils.functional import lazy
_tmpdirs = set()
def _cleanup():
try:
import sys
import shutil
except ImportError:
return
tmp = None
try:
for tmp in _tmpdirs:
shutil.rmtree(tmp)
except Exception, exc:
sys.stderr.write("\n** shutil.rmtree(%r): %s\n" % (tmp, exc))
atexit.register(_cleanup)
def _polite_tmpdir():
tmp = tempfile.mkdtemp()
_tmpdirs.add(tmp)
return tmp
# See settings.py for documentation:
IN_TEST_SUITE = True
NETAPP_STORAGE = _polite_tmpdir()
ADDONS_PATH = _polite_tmpdir()
GUARDED_ADDONS_PATH = _polite_tmpdir()
SIGNED_APPS_PATH = _polite_tmpdir()
SIGNED_APPS_REVIEWER_PATH = _polite_tmpdir()
UPLOADS_PATH = _polite_tmpdir()
MIRROR_STAGE_PATH = _polite_tmpdir()
TMP_PATH = _polite_tmpdir()
COLLECTIONS_ICON_PATH = _polite_tmpdir()
PACKAGER_PATH = _polite_tmpdir()
REVIEWER_ATTACHMENTS_PATH = _polite_tmpdir()
DUMPED_APPS_PATH = _polite_tmpdir()
# We won't actually send an email.
SEND_REAL_EMAIL = True
# Turn off search engine indexing.
USE_ELASTIC = False
# Ensure all validation code runs in tests:
VALIDATE_ADDONS = True
PAYPAL_PERMISSIONS_URL = ''
SITE_URL = 'http://testserver'
STATIC_URL = SITE_URL + '/'
MOBILE_SITE_URL = ''
MEDIA_URL = '/media/'
# Reset these URLs to the defaults so your settings_local doesn't clobber them:
ADDON_ICONS_DEFAULT_URL = MEDIA_URL + '/img/addon-icons'
ADDON_ICON_BASE_URL = MEDIA_URL + 'img/icons/'
ADDON_ICON_URL = (STATIC_URL +
'img/uploads/addon_icons/%s/%s-%s.png?modified=%s')
PREVIEW_THUMBNAIL_URL = (STATIC_URL +
'img/uploads/previews/thumbs/%s/%d.png?modified=%d')
PREVIEW_FULL_URL = (STATIC_URL +
'img/uploads/previews/full/%s/%d.%s?modified=%d')
USERPICS_URL = STATIC_URL + 'img/uploads/userpics/%s/%s/%s.png?modified=%d'
CACHES = {
'default': {
'BACKEND': 'caching.backends.locmem.LocMemCache',
}
}
# COUNT() caching can't be invalidated, it just expires after x seconds. This
# is just too annoying for tests, so disable it.
CACHE_COUNT_TIMEOUT = None
# No more failures!
APP_PREVIEW = False
# Overrides whatever storage you might have put in local settings.
DEFAULT_FILE_STORAGE = 'amo.utils.LocalFileStorage'
VIDEO_LIBRARIES = ['lib.video.dummy']
ALLOW_SELF_REVIEWS = True
# Make sure debug toolbar output is disabled so it doesn't interfere with any
# html tests.
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': lambda r: False,
'HIDE_DJANGO_SQL': True,
'TAG': 'div',
'ENABLE_STACKTRACES': False,
}
MOZMARKET_VENDOR_EXCLUDE = []
# These are the default languages. If you want a constrainted set for your
# tests, you should add those in the tests.
def lazy_langs(languages):
from product_details import product_details
if not product_details.languages:
return {}
return dict([(i.lower(), product_details.languages[i]['native'])
for i in languages])
AMO_LANGUAGES = (
'af', 'ar', 'bg', 'ca', 'cs', 'da', 'de', 'el', 'en-US', 'es', 'eu', 'fa',
'fi', 'fr', 'ga-IE', 'he', 'hu', 'id', 'it', 'ja', 'ko', 'mn', 'nl', 'pl',
'pt-BR', 'pt-PT', 'ro', 'ru', 'sk', 'sl', 'sq', 'sv-SE', 'uk', 'vi',
'zh-CN', 'zh-TW',
)
LANGUAGES = lazy(lazy_langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
TASK_USER_ID = '4043307'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
SQL_RESET_SEQUENCES = False
GEOIP_URL = ''
GEOIP_DEFAULT_VAL = 'worldwide'
GEOIP_DEFAULT_TIMEOUT = .2
ES_DEFAULT_NUM_REPLICAS = 0
ES_DEFAULT_NUM_SHARDS = 3
PAYMENT_LIMITED = False
IARC_MOCK = True
| bsd-3-clause | 7,081,259,407,107,484,000 | 25.446809 | 79 | 0.654867 | false |
Panda3D-google-code-repositories/naith | game/bin/manager.py | 1 | 7278 | # -*- coding: utf-8 -*-
# Copyright Tom SF Haines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import xml.etree.ElementTree as et
import imp
import types
from direct.showbase.ShowBase import ShowBase
class Manager:
"""The simple plugin system - this is documented in the docs directory."""
def __init__(self,baseDir = ''):
# Basic configuratrion variables...
self.baseDir = baseDir
self.pluginDir = 'plugins'
self.configDir = self.baseDir+'config/'
self.loadingInvFrameRate = 1.0/20.0
# The plugin database - dictionary of modules...
self.plugin = dict()
# Create the instance database - a list in creation order of (obj,name) where name can be None for nameless objects, plus a dictionary to get at the objects by name...
self.objList = []
self.named = dict()
# The above, but only used during transitions...
self.oldObjList = None
self.oldNamed = None
# For pandaStep...
self.lastTime = 0.0
def transition(self,config):
"""Transitions from the current configuration to a new configuration, makes a point to keep letting Panda draw whilst it is doing so, so any special loading screen plugin can do its stuff. Maintains some variables in this class so such a plugin can also display a loading bar."""
# Step 1 - call stop on all current objects- do this immediatly as we can't have some running whilst others are not...
for obj in self.objList:
stop = getattr(obj[0],'stop',None)
if isinstance(stop,types.MethodType):
stop()
# Declare the task that is going to make the transition - done this way to keep rendering whilst we make the transition, for a loading screen etc. This is a generator for conveniance...
def transTask(task):
# Step 2 - move the database to 'old', make a new one...
self.oldObjList = self.objList
self.oldNamed = self.named
self.objList = []
self.named = dict()
yield task.cont
# Step 3 - load and iterate the config file and add in each instance...
elem = et.parse(self.configDir+config+'.xml')
yield task.cont
for obj in elem.findall('obj'):
for blah in self.addObj(obj):
yield task.cont
# Step 4 - destroy the old database - call destroy methods when it exists...
for obj in self.oldObjList:
inst = obj[0]
name = obj[1]
if (not self.oldNamed.has_key(name)) or self.oldNamed[name]!=True:
# It needs to die - we let the reference count being zeroed do the actual deletion but it might have a slow death, so we use the destroy method/generator to make it happen during the progress bar ratehr than blocking the gc at some random point...
destroy = getattr(inst,'destroy',None)
if isinstance(destroy,types.MethodType):
ret = destroy()
yield task.cont
if isinstance(ret,types.GeneratorType):
for blah in ret:
yield task.cont
self.oldObjList = None
self.oldNamed = None
yield task.cont
# Step 5 - call start on all current objects - done in a single step to avoid problems, so no yields...
for obj in self.objList:
start = getattr(obj[0],'start',None)
if isinstance(start,types.MethodType):
start()
def transFrameLimiter(task):
prevTime = globalClock.getRealTime()
for r in transTask(task):
currTime = globalClock.getRealTime()
if (currTime-prevTime)>(1.0/25.0):
yield task.cont
prevTime = currTime
# Create a task to do the dirty work...
taskMgr.add(transFrameLimiter,'Transition')
def end(self):
"""Ends the program neatly - closes down all the plugins before calling sys.exit(). Effectivly a partial transition, though without the framerate maintenance."""
# Stop all the plugins...
for obj in self.objList:
stop = getattr(obj[0],'stop',None)
if isinstance(stop,types.MethodType):
stop()
# Destroy the database...
for obj in self.objList:
inst = obj[0]
name = obj[1]
destroy = getattr(inst,'destroy',None)
if isinstance(destroy,types.MethodType):
ret = destroy()
if isinstance(ret,types.GeneratorType):
for blah in ret:
pass
# Die...
sys.exit()
def addObj(self,element):
"""Given a xml.etree Element of type obj this does the necesary - can only be called during a transition, exposed like this for the Include class. Note that it is a generator."""
# Step 1 - get the details of the plugin we will be making...
plugin = element.get('type')
name = element.get('name')
# Step 2 - get the plugin - load it if it is not already loaded...
if not self.plugin.has_key(plugin):
print 'Loading plugin', plugin
base = self.pluginDir + '.' + plugin.lower()
plug = __import__(base, globals(), locals(),[plugin.lower()])
plug = getattr(plug,plugin.lower())
self.plugin[plugin] = plug
print 'Loaded', plugin
yield None
# Step 3a - check if there is an old object that can be repurposed, otherwise create a new object...
done = False
if self.oldNamed.has_key(name) and isinstance(self.oldNamed[name], getattr(self.plugin[plugin],plugin)) and getattr(self.oldNamed[name],'reload',None)!=None:
print 'Reusing', plugin
inst = self.oldNamed[name]
self.oldNamed[name] = True # So we know its been re-used for during the deletion phase.
inst.reload(self,element)
yield None
print 'Reused',plugin
if getattr(inst,'postReload',None)!=None:
for blah in inst.postReload():
yield None
print 'post reload',plugin
else:
print 'Making', plugin
inst = getattr(self.plugin[plugin],plugin)(self,element)
yield None
print 'Made', plugin
if getattr(inst,'postInit',None)!=None:
for blah in inst.postInit():
yield None
print 'post init',plugin
# Step 3b - Stick it in the object database...
self.objList.append((inst,name))
if name!=None:
self.named[name] = inst
# One last yield, just to keep things going...
yield None
def get(self,name):
"""Returns the plugin instance associated with the given name, or None if it doesn't exist."""
if self.named.has_key(name):
return self.named[name]
else:
return None
def getPercentage(self):
"""During a transition this will return [0,1] indicating percentage done - for a loading plugin to use. Calling at other times will return 1.0 This is not yet implimented, as it needs to get very clever to compensate for variable loading times and includes."""
return 1.0
| apache-2.0 | -8,112,825,600,628,867,000 | 36.709845 | 283 | 0.658285 | false |
flavour/ssf | controllers/patient.py | 3 | 4894 | # -*- coding: utf-8 -*-
"""
Patient Tracking
"""
module = request.controller
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
"Module's Home Page"
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
def prep(r):
if r.representation != "s3json":
# Do not serve other representations here
return False
else:
s3mgr.show_ids = True
return True
response.s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def patient():
""" RESTful CRUD controller """
resourcename = request.function
tablename = "patient_patient"
# Load Models
s3mgr.load("patient_patient")
# Search method
patient_search = s3base.S3Search(
simple = s3base.S3SearchSimpleWidget(
name="patient_search_simple",
label = T("Search"),
comment=T("To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients."),
field = [ "person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$local_name"]),
advanced = (s3base.S3SearchSimpleWidget(
name="patient_search_simple",
label = T("Search"),
comment=T("To search for a patient, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all patients."),
field = [ "person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"person_id$local_name"]),
s3base.S3SearchOptionsWidget(
name = "patient_search_country",
label = T("Country"),
field = ["country"],
cols = 2
),
s3base.S3SearchOptionsWidget(
name = "patient_search_hospital",
label = T("Hospital"),
field = ["hospital_id"],
cols = 2
),
)
)
s3mgr.configure(tablename,
search_method=patient_search,
create_next = URL(args=["[id]", "relative"]))
# Pre-process
def prep(r):
if r.id:
s3mgr.configure("patient_relative",
create_next = URL(args=[str(r.id), "home"]))
return True
response.s3.prep = prep
# Post-process
def postp(r, output):
# No Delete-button in list view
s3_action_buttons(r, deletable=False)
return output
response.s3.postp = postp
tabs = [(T("Basic Details"), None),
(T("Accompanying Relative"), "relative"),
(T("Home"), "home")]
rheader = lambda r: patient_rheader(r, tabs=tabs)
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def patient_rheader(r, tabs=[]):
""" Resource Page Header """
if r.representation == "html":
if r.record is None:
# List or Create form: rheader makes no sense here
return None
table = db.patient_patient
rheader_tabs = s3_rheader_tabs(r, tabs)
patient = r.record
if patient.person_id:
name = s3_fullname(patient.person_id)
else:
name = None
if patient.country:
country = table.country.represent(patient.country)
else:
country = None
if patient.hospital_id:
hospital = table.hospital_id.represent(patient.hospital_id)
else:
hospital = None
rheader = DIV(TABLE(
TR(
TH("%s: " % T("Patient")),
name,
TH("%s: " % T("Country")),
country),
TR(
TH(),
TH(),
TH("%s: " % T("Hospital")),
hospital,
)
), rheader_tabs)
return rheader
return None
# END =========================================================================
| mit | 5,781,058,041,843,743,000 | 30.779221 | 203 | 0.469146 | false |
WASPACDC/hmdsm.repository | plugin.video.quasar/resources/site-packages/quasar/osarch.py | 4 | 1667 | import xbmc
import sys
import platform
def get_platform():
build = xbmc.getInfoLabel("System.BuildVersion")
kodi_version = int(build.split()[0][:2])
ret = {
"arch": sys.maxsize > 2 ** 32 and "x64" or "x86",
"os": "",
"version": platform.release(),
"kodi": kodi_version,
"build": build
}
if xbmc.getCondVisibility("system.platform.android"):
ret["os"] = "android"
if "arm" in platform.machine() or "aarch" in platform.machine():
ret["arch"] = "arm"
elif xbmc.getCondVisibility("system.platform.linux"):
ret["os"] = "linux"
if "aarch" in platform.machine() or "arm64" in platform.machine():
if xbmc.getCondVisibility("system.platform.linux.raspberrypi"):
ret["arch"] = "armv7"
elif platform.architecture()[0].startswith("32"):
ret["arch"] = "arm"
else:
ret["arch"] = "arm64"
elif "armv7" in platform.machine():
ret["arch"] = "armv7"
elif "arm" in platform.machine():
ret["arch"] = "arm"
elif xbmc.getCondVisibility("system.platform.xbox"):
ret["os"] = "windows"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.windows"):
ret["os"] = "windows"
if platform.machine().endswith('64'):
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.osx"):
ret["os"] = "darwin"
ret["arch"] = "x64"
elif xbmc.getCondVisibility("system.platform.ios"):
ret["os"] = "ios"
ret["arch"] = "arm"
return ret
PLATFORM = get_platform()
| gpl-2.0 | 1,493,729,428,259,158,800 | 33.729167 | 75 | 0.55009 | false |
ediardo/folsom | common/database_handler.py | 1 | 2119 | from models.house_record import HouseRecord
from models.user import User
from models.result import Result
from models.base import Base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class DatabaseHandler():
def __init__(self, connection_string):
# self.engine = create_engine('sqlite:///:memory:', echo=True)
self.engine = create_engine('sqlite:///house_record.db', echo=False)
Base.metadata.create_all(self.engine)
self.make_session = sessionmaker(bind=self.engine)
def get_pwd_hash_by_username(self, username):
sesion = self.make_session()
try:
passwd, = sesion.query(User.password).filter(User.username == username)
return passwd[0]
except:
return None
def save_records(self, records):
session = self.make_session()
session.add_all(records)
session.commit()
def get_user_id_by_login(self, login):
session = self.make_session()
id, = session.query(User.id).filter(User.username == login)
return id[0]
def get_record_by_id(self, id):
session = self.make_session()
return session.query(HouseRecord).get(id)
def get_data_for_user(self, login):
user_id = self.get_user_id_by_login(login)
session = self.make_session()
results = []
records = session.query(HouseRecord).filter(HouseRecord.user_id == user_id)
for r in records:
for res in r.results:
results.append(res)
return results
def save_user(self, username, pwd_hash):
session = self.make_session()
user = User(username=username, password=pwd_hash)
session.add(user)
session.commit()
def save_result(self, result, record_id, action="default"):
session = self.make_session()
r = Result(id=record_id, result=result, action=action)
session.add(r)
session.commit()
def get_houserecords(self):
session = self.make_session()
results = session.query(HouseRecord)
return results
| apache-2.0 | -113,806,223,733,194,240 | 32.634921 | 83 | 0.629542 | false |
pastewka/lammps | tools/amber2lmp/amber2lammps.py | 8 | 35867 | #! /usr/bin/evn python2
# This is amber2lammps, a program written by Keir E. Novik to convert
# Amber files to Lammps files.
#
# Copyright 1999, 2000 Keir E. Novik; all rights reserved.
#
# Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README
# Bug Fixed :Third argument in Dihedral Coeffs section is an integer - Ketan S Khare September 26, 2011
# Modified by Vikas Varshney, Oct 8, 2013 to include additional flags (Atomic_Number, Coulombic and van der Waals 1-4 factors which are included in newer versions of .top and .crd files in amber12.
#============================================================
def Pop(S, I=-1):
'Pop item I from list'
X = S[I]
del S[I]
return X
#============================================================
class Lammps:
#--------------------------------------------------------
def Dump(self):
'Write out contents of self (intended for debugging)'
Name_list = self.__dict__.keys()
Name_list.sort()
for Name in Name_list:
print Name + ':', self.__dict__[Name]
#--------------------------------------------------------
def Write_data(self, Basename, Item_list):
'Write the Lammps data to file (used by Write_Lammps)'
import os, sys
Filename = 'data.' + Basename
Dir_list = os.listdir('.')
i = 1
while Filename in Dir_list:
Filename = 'data' + `i` + '.' + Basename
i = i +1
del i
print 'Writing', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename, 'w')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
F.writelines(Item_list)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
F.close()
print 'done.'
#--------------------------------------------------------
def Write_Lammps(self, Basename):
'Write the Lammps data file, ignoring blank sections'
import string
L = []
L.append('LAMMPS data file for ' + self.name + '\n\n')
L.append(`self.atoms` + ' atoms\n')
L.append(`self.bonds` + ' bonds\n')
L.append(`self.angles` + ' angles\n')
L.append(`self.dihedrals` + ' dihedrals\n')
L.append(`self.impropers` + ' impropers\n\n')
L.append(`self.atom_types` + ' atom types\n')
if self.bonds > 0:
L.append(`self.bond_types` + ' bond types\n')
if self.angles > 0:
L.append(`self.angle_types` + ' angle types\n')
if self.dihedrals > 0:
L.append(`self.dihedral_types` + ' dihedral types\n')
L.append('\n')
L.append(`self.xlo` + ' ' + `self.xhi` + ' xlo xhi\n')
L.append(`self.ylo` + ' ' + `self.yhi` + ' ylo yhi\n')
L.append(`self.zlo` + ' ' + `self.zhi` + ' zlo zhi\n\n')
if self.atom_types != 0:
L.append('Masses\n\n')
for i in range(self.atom_types):
L.append(`i+1` + ' ' + `self.Masses[i]` + '\n')
L.append('\n')
L.append('Pair Coeffs\n\n')
for i in range(self.atom_types):
L.append(`i+1`)
for j in range(len(self.Nonbond_Coeffs[0])):
L.append(' ' + `self.Nonbond_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.bonds != 0 and self.bond_types != 0:
L.append('Bond Coeffs\n\n')
for i in range(self.bond_types):
L.append(`i+1`)
for j in range(len(self.Bond_Coeffs[0])):
L.append(' ' + `self.Bond_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.angles != 0 and self.angle_types != 0:
L.append('Angle Coeffs\n\n')
for i in range(self.angle_types):
L.append(`i+1`)
for j in range(len(self.Angle_Coeffs[0])):
L.append(' ' + `self.Angle_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.dihedrals != 0 and self.dihedral_types != 0:
L.append('Dihedral Coeffs\n\n')
for i in range(self.dihedral_types):
L.append(`i+1`)
for j in range(len(self.Dihedral_Coeffs[0])):
L.append(' ' + `self.Dihedral_Coeffs[i][j]`)
L.append('\n')
L.append('\n')
if self.atoms != 0:
L.append('Atoms\n\n')
for i in range(self.atoms):
L.append(`i+1`)
for j in range(len(self.Atoms[0])):
L.append(' ' + `self.Atoms[i][j]`)
L.append('\n')
L.append('\n')
if self.bonds != 0 and self.bond_types != 0:
L.append('Bonds\n\n')
for i in range(self.bonds):
L.append(`i+1`)
for j in range(len(self.Bonds[0])):
L.append(' ' + `self.Bonds[i][j]`)
L.append('\n')
L.append('\n')
if self.angles != 0 and self.angle_types != 0:
L.append('Angles\n\n')
for i in range(self.angles):
L.append(`i+1`)
for j in range(len(self.Angles[0])):
L.append(' ' + `self.Angles[i][j]`)
L.append('\n')
L.append('\n')
if self.dihedrals != 0 and self.dihedral_types != 0:
L.append('Dihedrals\n\n')
for i in range(self.dihedrals):
L.append(`i+1`)
for j in range(len(self.Dihedrals[0])):
L.append(' ' + `self.Dihedrals[i][j]`)
L.append('\n')
L.append('\n')
self.Write_data(Basename, L)
#============================================================
class Amber:
def __init__(self):
'Initialize the Amber class'
self.CRD_is_read = 0
self.TOP_is_read = 0
#--------------------------------------------------------
def Dump(self):
'Write out contents of self (intended for debugging)'
Name_list = self.__dict__.keys()
Name_list.sort()
for Name in Name_list:
print Name + ':', self.__dict__[Name]
#--------------------------------------------------------
def Coerce_to_Lammps(self):
'Return the Amber data converted to Lammps format'
import math
if self.CRD_is_read and self.TOP_is_read:
l = Lammps()
print 'Converting...',
l.name = self.ITITL
l.atoms = self.NATOM
l.bonds = self.NBONH + self.MBONA
l.angles = self.NTHETH + self.MTHETA
l.dihedrals = self.NPHIH + self.MPHIA
l.impropers = 0
l.atom_types = self.NTYPES
l.bond_types = self.NUMBND
l.angle_types = self.NUMANG
l.dihedral_types = self.NPTRA
Shift = 0
if self.__dict__.has_key('BOX'):
l.xlo = 0.0
l.xhi = self.BOX[0]
l.ylo = 0.0
l.yhi = self.BOX[1]
l.zlo = 0.0
l.zhi = self.BOX[2]
if (l.xlo > min(self.X)) or (l.xhi < max(self.X)) or \
(l.ylo > min(self.Y)) or (l.yhi < max(self.Y)) or \
(l.zlo > min(self.Z)) or (l.zhi < max(self.Z)):
# Vikas Modification: Disabling Shifting. This means I am intend to send exact coordinates of each atom and let LAMMPS
# take care of imaging into periodic image cells. If one wants to shift all atoms in the periodic box,
# please uncomment the below 2 lines.
print '(warning: Currently not shifting the atoms to the periodic box)'
#Shift = 1
else:
print '(warning: Guessing at periodic box!)',
l.xlo = min(self.X)
l.xhi = max(self.X)
l.ylo = min(self.Y)
l.yhi = max(self.Y)
l.zlo = min(self.Z)
l.zhi = max(self.Z)
# This doesn't check duplicate values
l.Masses = []
for i in range(l.atom_types):
l.Masses.append(0)
for i in range(self.NATOM):
l.Masses[self.IAC[i] - 1] = self.AMASS[i]
l.Nonbond_Coeffs = []
for i in range(self.NTYPES):
l.Nonbond_Coeffs.append([0,0])
for i in range(self.NTYPES):
j = self.ICO[i * (self.NTYPES + 1)] - 1
if self.CN1[j] == 0.0:
l.Nonbond_Coeffs[i][0] = 0.0
else:
l.Nonbond_Coeffs[i][0] = \
0.25 * (self.CN2[j])**2 / self.CN1[j]
if self.CN2[j] == 0.0:
l.Nonbond_Coeffs[i][1] = 0.0
else:
l.Nonbond_Coeffs[i][1] = \
(self.CN1[j] / self.CN2[j])**(1.0/6.0)
l.Bond_Coeffs = []
for i in range(self.NUMBND):
l.Bond_Coeffs.append([0,0])
for i in range(self.NUMBND):
l.Bond_Coeffs[i][0] = self.RK[i]
l.Bond_Coeffs[i][1] = self.REQ[i]
l.Angle_Coeffs = []
for i in range(self.NUMANG):
l.Angle_Coeffs.append([0,0])
for i in range(self.NUMANG):
l.Angle_Coeffs[i][0] = self.TK[i]
l.Angle_Coeffs[i][1] = (180/math.pi) * self.TEQ[i]
l.Dihedral_Coeffs = []
for i in range(self.NPTRA):
l.Dihedral_Coeffs.append([0,0,0])
for i in range(self.NPTRA):
l.Dihedral_Coeffs[i][0] = self.PK[i]
if self.PHASE[i] == 0:
l.Dihedral_Coeffs[i][1] = 1
else:
l.Dihedral_Coeffs[i][1] = -1
l.Dihedral_Coeffs[i][2] = int(self.PN[i])
l.Atoms = []
for i in range(self.NATOM):
x = self.X[i]
y = self.Y[i]
z = self.Z[i]
if Shift:
while x < l.xlo:
x = x + self.BOX[0]
while x > l.xhi:
x = x - self.BOX[0]
while y < l.ylo:
y = y + self.BOX[1]
while y > l.yhi:
y = y - self.BOX[1]
while z < l.zlo:
z = z + self.BOX[2]
while z > l.zhi:
z = z - self.BOX[2]
l.Atoms.append([0, self.IAC[i], self.CHRG[i]/18.2223, \
x, y, z])
l.Bonds = []
for i in range(l.bonds):
l.Bonds.append([0,0,0])
for i in range(self.NBONH):
l.Bonds[i][0] = self.ICBH[i]
l.Bonds[i][1] = abs(self.IBH[i])/3 + 1
l.Bonds[i][2] = abs(self.JBH[i])/3 + 1
for i in range(self.NBONA):
l.Bonds[self.NBONH + i][0] = self.ICB[i]
l.Bonds[self.NBONH + i][1] = abs(self.IB[i])/3 + 1
l.Bonds[self.NBONH + i][2] = abs(self.JB[i])/3 + 1
l.Angles = []
for i in range(l.angles):
l.Angles.append([0,0,0,0])
for i in range(self.NTHETH):
l.Angles[i][0] = self.ICTH[i]
l.Angles[i][1] = abs(self.ITH[i])/3 + 1
l.Angles[i][2] = abs(self.JTH[i])/3 + 1
l.Angles[i][3] = abs(self.KTH[i])/3 + 1
for i in range(self.NTHETA):
l.Angles[self.NTHETH + i][0] = self.ICT[i]
l.Angles[self.NTHETH + i][1] = abs(self.IT[i])/3 + 1
l.Angles[self.NTHETH + i][2] = abs(self.JT[i])/3 + 1
l.Angles[self.NTHETH + i][3] = abs(self.KT[i])/3 + 1
l.Dihedrals = []
for i in range(l.dihedrals):
l.Dihedrals.append([0,0,0,0,0])
for i in range(self.NPHIH):
l.Dihedrals[i][0] = self.ICPH[i]
l.Dihedrals[i][1] = abs(self.IPH[i])/3 + 1
l.Dihedrals[i][2] = abs(self.JPH[i])/3 + 1
l.Dihedrals[i][3] = abs(self.KPH[i])/3 + 1
l.Dihedrals[i][4] = abs(self.LPH[i])/3 + 1
for i in range(self.NPHIA):
l.Dihedrals[self.NPHIH + i][0] = self.ICP[i]
l.Dihedrals[self.NPHIH + i][1] = abs(self.IP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][2] = abs(self.JP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][3] = abs(self.KP[i])/3 + 1
l.Dihedrals[self.NPHIH + i][4] = abs(self.LP[i])/3 + 1
print 'done.'
return l
else:
print '(Error: Not all the Amber data has been read!)'
#--------------------------------------------------------
def Read_data(self, Filename):
'Read the filename, returning a list of strings'
import string, sys
print 'Reading', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
Lines = F.readlines()
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
F.close()
# If the first line is empty, use the Basename
if Filename[-4:] == '.crd':
if string.split(Lines[0]) == []: # This line corresponds to TITLE name in CRD file
Basename = Filename[:string.find(Filename, '.')]
Item_list = [Basename]
print 'Warning: Title not present... Assigning Basename as Title'
else:
Item_list = []
else:
if string.split(Lines[3]) == []: # This line corresponds to TITLE name in TOPOLOGY file
Basename = Filename[:string.find(Filename, '.')]
Item_list = [Basename]
print 'Warning: Title not present... Assigning Basename as Title'
else:
Item_list = []
for Line in Lines:
if Line[0]!='%': #Vikas' Modification: This condition ignores all the lines starting with % in the topology file.
Item_list.extend(string.split(Line))
return Item_list
#--------------------------------------------------------
def Read_CRD(self, Basename):
'Read the Amber coordinate/restart (.crd) file'
# The optional velocities and periodic box size are not yet parsed.
Item_list = self.Read_data(Basename + '.crd')
if Item_list == None:
return
elif len(Item_list) < 2:
print '(error: File too short!)'
return
# Parse the data
if self.__dict__.has_key('ITITL'):
if Pop(Item_list,0) != self.ITITL:
print '(warning: ITITL differs!)',
else:
self.ITITL = Pop(Item_list,0)
print self.ITITL #Vikas Modification : Priting the Title
if self.__dict__.has_key('NATOM'):
if eval(Pop(Item_list,0)) != self.NATOM:
print '(error: NATOM differs!)'
return
else:
self.NATOM = eval(Pop(Item_list,0))
print self.NATOM # Vikas' Modification: Printing number of atoms just to make sure that the program is reading the correct value.
#if len(Item_list) == 1 + 3 * self.NATOM:
# Vikas' Modification: I changed the condition.
if (len(Item_list)%3) != 0:
self.TIME = eval(Pop(Item_list,0))
else:
self.TIME = 0
print self.TIME # Vikas' Modification : Printing simulation time, just to make sure that the program is readint the correct value.
if len(Item_list) < 3 * self.NATOM:
print '(error: File too short!)'
return
self.X = []
self.Y = []
self.Z = []
for i in range(self.NATOM):
self.X.append(eval(Pop(Item_list,0)))
self.Y.append(eval(Pop(Item_list,0)))
self.Z.append(eval(Pop(Item_list,0)))
if (self.NATOM == 1) and len(Item_list):
print '(warning: Ambiguity!)',
if len(Item_list) >= 3 * self.NATOM:
self.VX = []
self.VY = []
self.VZ = []
for i in range(self.NATOM):
self.VX.append(eval(Pop(Item_list,0)))
self.VY.append(eval(Pop(Item_list,0)))
self.VZ.append(eval(Pop(Item_list,0)))
if len(Item_list) >= 3:
self.BOX = []
for i in range(3):
self.BOX.append(eval(Pop(Item_list,0)))
if len(Item_list):
print '(warning: File too large!)',
print 'done.'
self.CRD_is_read = 1
#--------------------------------------------------------
def Read_TOP(self, Basename):
'Read the Amber parameter/topology (.top) file'
Item_list = self.Read_data(Basename + '.top')
if Item_list == None:
return
elif len(Item_list) < 31:
print '(error: File too short!)'
return
# Parse the data
if self.__dict__.has_key('ITITL'):
if Pop(Item_list,0) != self.ITITL:
print '(warning: ITITL differs!)'
else:
self.ITITL = Pop(Item_list,0)
print self.ITITL # Printing Self Title
if self.__dict__.has_key('NATOM'):
if eval(Pop(Item_list,0)) != self.NATOM:
print '(error: NATOM differs!)'
return
else:
self.NATOM = eval(Pop(Item_list,0))
print self.NATOM # Printing total number of atoms just to make sure that thing are going right
self.NTYPES = eval(Pop(Item_list,0))
self.NBONH = eval(Pop(Item_list,0))
self.MBONA = eval(Pop(Item_list,0))
self.NTHETH = eval(Pop(Item_list,0))
self.MTHETA = eval(Pop(Item_list,0))
self.NPHIH = eval(Pop(Item_list,0))
self.MPHIA = eval(Pop(Item_list,0))
self.NHPARM = eval(Pop(Item_list,0))
self.NPARM = eval(Pop(Item_list,0))
self.NEXT = eval(Pop(Item_list,0))
self.NRES = eval(Pop(Item_list,0))
self.NBONA = eval(Pop(Item_list,0))
self.NTHETA = eval(Pop(Item_list,0))
self.NPHIA = eval(Pop(Item_list,0))
self.NUMBND = eval(Pop(Item_list,0))
self.NUMANG = eval(Pop(Item_list,0))
self.NPTRA = eval(Pop(Item_list,0))
self.NATYP = eval(Pop(Item_list,0))
self.NPHB = eval(Pop(Item_list,0))
self.IFPERT = eval(Pop(Item_list,0))
self.NBPER = eval(Pop(Item_list,0))
self.NGPER = eval(Pop(Item_list,0))
self.NDPER = eval(Pop(Item_list,0))
self.MBPER = eval(Pop(Item_list,0))
self.MGPER = eval(Pop(Item_list,0))
self.MDPER = eval(Pop(Item_list,0))
self.IFBOX = eval(Pop(Item_list,0))
self.NMXRS = eval(Pop(Item_list,0))
self.IFCAP = eval(Pop(Item_list,0))
#....................................................
if len(Item_list) < 5 * self.NATOM + self.NTYPES**2 + \
2*(self.NRES + self.NUMBND + self.NUMANG) + \
3*self.NPTRA + self.NATYP:
print '(error: File too short!)'
return -1
self.IGRAPH = []
Pop(Item_list,0)
# A little kludge is needed here, since the IGRAPH strings are
# not separated by spaces if 4 characters in length.
for i in range(self.NATOM):
if len(Item_list[0]) > 4:
Item_list.insert(1, Item_list[0][4:])
Item_list.insert(1, Item_list[0][0:4])
del Item_list[0]
self.IGRAPH.append(Pop(Item_list,0))
# Vikas' Modification : In the following section, I am printing out each quantity which is currently being read from the topology file.
print 'Reading Charges...'
self.CHRG = []
for i in range(self.NATOM):
self.CHRG.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Number...'
self.ANUMBER = []
for i in range(self.NATOM):
self.ANUMBER.append(eval(Pop(Item_list,0)))
print 'Reading Atomic Masses...'
self.AMASS = []
for i in range(self.NATOM):
self.AMASS.append(eval(Pop(Item_list,0)))
print 'Reading Atom Types...'
self.IAC = []
for i in range(self.NATOM):
self.IAC.append(eval(Pop(Item_list,0)))
print 'Reading Excluded Atoms...'
self.NUMEX = []
for i in range(self.NATOM):
self.NUMEX.append(eval(Pop(Item_list,0)))
print 'Reading Non-bonded Parameter Index...'
self.ICO = []
for i in range(self.NTYPES**2):
self.ICO.append(eval(Pop(Item_list,0)))
print 'Reading Residue Labels...'
self.LABRES = []
for i in range(self.NRES):
self.LABRES.append(Pop(Item_list,0))
print 'Reading Residues Starting Pointers...'
self.IPRES = []
for i in range(self.NRES):
self.IPRES.append(eval(Pop(Item_list,0)))
print 'Reading Bond Force Constants...'
self.RK = []
for i in range(self.NUMBND):
self.RK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Bond Values...'
self.REQ = []
for i in range(self.NUMBND):
self.REQ.append(eval(Pop(Item_list,0)))
print 'Reading Angle Force Constants...'
self.TK = []
for i in range(self.NUMANG):
self.TK.append(eval(Pop(Item_list,0)))
print 'Reading Equilibrium Angle Values...'
self.TEQ = []
for i in range(self.NUMANG):
self.TEQ.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Force Constants...'
self.PK = []
for i in range(self.NPTRA):
self.PK.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Periodicity...'
self.PN = []
for i in range(self.NPTRA):
self.PN.append(eval(Pop(Item_list,0)))
print 'Reading Dihedral Phase...'
self.PHASE = []
for i in range(self.NPTRA):
self.PHASE.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Electrostatic Scaling Factor...'
self.SCEEFAC = []
for i in range(self.NPTRA):
self.SCEEFAC.append(eval(Pop(Item_list,0)))
print 'Reading 1-4 Van der Waals Scaling Factor...'
self.SCNBFAC = []
for i in range(self.NPTRA):
self.SCNBFAC.append(eval(Pop(Item_list,0)))
print 'Reading Solty...' #I think this is currently not used in AMBER. Check it out, though
self.SOLTY = []
for i in range(self.NATYP):
self.SOLTY.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 2 * self.NTYPES * (self.NTYPES + 1) / 2:
print '(error: File too short!)'
return -1
print 'Reading LJ A Coefficient...'
self.CN1 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN1.append(eval(Pop(Item_list,0)))
print 'Reading LJ B Coefficient...'
self.CN2 = []
for i in range(self.NTYPES * (self.NTYPES + 1) / 2):
self.CN2.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < 3 * (self.NBONH + self.NBONA) + \
4 * (self.NTHETH + self.NTHETA) + 5 * (self.NPHIH + self.NPHIA):
print '(error: File too short!)'
return -1
print 'Reading Bonds which include hydrogen...'
self.IBH = []
self.JBH = []
self.ICBH = []
for i in range(self.NBONH):
self.IBH.append(eval(Pop(Item_list,0)))
self.JBH.append(eval(Pop(Item_list,0)))
self.ICBH.append(eval(Pop(Item_list,0)))
print 'Reading Bonds which dont include hydrogen...'
self.IB = []
self.JB = []
self.ICB = []
for i in range(self.NBONA):
self.IB.append(eval(Pop(Item_list,0)))
self.JB.append(eval(Pop(Item_list,0)))
self.ICB.append(eval(Pop(Item_list,0)))
print 'Reading Angles which include hydrogen...'
self.ITH = []
self.JTH = []
self.KTH = []
self.ICTH = []
for i in range(self.NTHETH):
self.ITH.append(eval(Pop(Item_list,0)))
self.JTH.append(eval(Pop(Item_list,0)))
self.KTH.append(eval(Pop(Item_list,0)))
self.ICTH.append(eval(Pop(Item_list,0)))
print 'Reading Angles which dont include hydrogen...'
self.IT = []
self.JT = []
self.KT = []
self.ICT = []
for i in range(self.NTHETA):
self.IT.append(eval(Pop(Item_list,0)))
self.JT.append(eval(Pop(Item_list,0)))
self.KT.append(eval(Pop(Item_list,0)))
self.ICT.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which include hydrogen...'
self.IPH = []
self.JPH = []
self.KPH = []
self.LPH = []
self.ICPH = []
for i in range(self.NPHIH):
self.IPH.append(eval(Pop(Item_list,0)))
self.JPH.append(eval(Pop(Item_list,0)))
self.KPH.append(eval(Pop(Item_list,0)))
self.LPH.append(eval(Pop(Item_list,0)))
self.ICPH.append(eval(Pop(Item_list,0)))
print 'Reading Dihedrals which dont include hydrogen...'
self.IP = []
self.JP = []
self.KP = []
self.LP = []
self.ICP = []
for i in range(self.NPHIA):
self.IP.append(eval(Pop(Item_list,0)))
self.JP.append(eval(Pop(Item_list,0)))
self.KP.append(eval(Pop(Item_list,0)))
self.LP.append(eval(Pop(Item_list,0)))
self.ICP.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list) < self.NEXT + 3 * self.NPHB + 4 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Excluded Atom List...'
self.NATEX = []
for i in range(self.NEXT):
self.NATEX.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond A Coefficient, corresponding to r**12 term for all possible types...'
self.ASOL = []
for i in range(self.NPHB):
self.ASOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond B Coefficient, corresponding to r**10 term for all possible types...'
self.BSOL = []
for i in range(self.NPHB):
self.BSOL.append(eval(Pop(Item_list,0)))
print 'Reading H-Bond Cut...' # I think it is not being used nowadays
self.HBCUT = []
for i in range(self.NPHB):
self.HBCUT.append(eval(Pop(Item_list,0)))
print 'Reading Amber Atom Types for each atom...'
self.ISYMBL = []
for i in range(self.NATOM):
self.ISYMBL.append(Pop(Item_list,0))
print 'Reading Tree Chain Classification...'
self.ITREE = []
for i in range(self.NATOM):
self.ITREE.append(Pop(Item_list,0))
print 'Reading Join Array: Tree joining information' # Currently unused in Sander, an AMBER module
self.JOIN = []
for i in range(self.NATOM):
self.JOIN.append(eval(Pop(Item_list,0)))
print 'Reading IRotate...' # Currently unused in Sander and Gibbs
self.IROTAT = []
for i in range(self.NATOM):
self.IROTAT.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFBOX > 0:
if len(Item_list) < 3:
print '(error: File too short!)'
return -1
print 'Reading final residue which is part of solute...'
self.IPTRES = eval(Pop(Item_list,0))
print 'Reading total number of molecules...'
self.NSPM = eval(Pop(Item_list,0))
print 'Reading first solvent moleule index...'
self.NSPSOL = eval(Pop(Item_list,0))
if len(Item_list) < self.NSPM + 4:
print '(error: File too short!)'
return -1
print 'Reading atom per molecule...'
self.NSP = []
for i in range(self.NSPM):
self.NSP.append(eval(Pop(Item_list,0)))
self.BETA = eval(Pop(Item_list,0))
print 'Reading Box Dimensions...'
if self.__dict__.has_key('BOX'):
BOX = []
for i in range(3):
BOX.append(eval(Pop(Item_list,0)))
for i in range(3):
if BOX[i] != self.BOX[i]:
print '(warning: BOX differs!)',
break
del BOX
else:
self.BOX = []
for i in range(3):
self.BOX.append(eval(Pop(Item_list,0)))
#....................................................
if self.IFCAP > 0:
if len(Item_list) < 5:
print '(error: File too short!)'
return -1
print 'Reading ICAP variables::: For details, refer to online AMBER format manual'
self.NATCAP = eval(Pop(Item_list,0))
self.CUTCAP = eval(Pop(Item_list,0))
self.XCAP = eval(Pop(Item_list,0))
self.YCAP = eval(Pop(Item_list,0))
self.ZCAP = eval(Pop(Item_list,0))
#....................................................
if self.IFPERT > 0:
if len(Item_list) < 4 * self.NBPER + 5 * self.NGPER + \
6 * self.NDPER + self.NRES + 6 * self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading perturb variables, 1. Bond, 2. Angles, 3. Dihedrals, etc etc.::: For details, refer to online AMBER format manual'
self.IBPER = []
self.JBPER = []
for i in range(self.NBPER):
self.IBPER.append(eval(Pop(Item_list,0)))
self.JBPER.append(eval(Pop(Item_list,0)))
self.ICBPER = []
for i in range(2 * self.NBPER):
self.ICBPER.append(eval(Pop(Item_list,0)))
self.ITPER = []
self.JTPER = []
self.KTPER = []
for i in range(self.NGPER):
self.ITPER.append(eval(Pop(Item_list,0)))
self.JTPER.append(eval(Pop(Item_list,0)))
self.KTPER.append(eval(Pop(Item_list,0)))
self.ICTPER = []
for i in range(2 * self.NGPER):
self.ICTPER.append(eval(Pop(Item_list,0)))
self.IPPER = []
self.JPPER = []
self.KPPER = []
self.LPPER = []
for i in range(self.NDPER):
self.IPPER.append(eval(Pop(Item_list,0)))
self.JPPER.append(eval(Pop(Item_list,0)))
self.KPPER.append(eval(Pop(Item_list,0)))
self.LPPER.append(eval(Pop(Item_list,0)))
self.ICPPER = []
for i in range(2 * self.NDPER):
self.ICPPER.append(eval(Pop(Item_list,0)))
LABRES = []
for i in range(self.NRES):
LABRES.append(Pop(Item_list,0))
for i in range(self.NRES):
if LABRES[i] != self.LABRES[i]:
print '(warning: BOX differs!)',
break
self.IGRPER = []
for i in range(self.NATOM):
self.IGRPER.append(eval(Pop(Item_list,0)))
self.ISMPER = []
for i in range(self.NATOM):
self.ISMPER.append(eval(Pop(Item_list,0)))
self.ALMPER = []
for i in range(self.NATOM):
self.ALMPER.append(eval(Pop(Item_list,0)))
self.IAPER = []
for i in range(self.NATOM):
self.IAPER.append(eval(Pop(Item_list,0)))
self.IACPER = []
for i in range(self.NATOM):
self.IACPER.append(eval(Pop(Item_list,0)))
self.CGPER = []
for i in range(self.NATOM):
self.CGPER.append(eval(Pop(Item_list,0)))
#....................................................
self.IPOL = 0
if self.IPOL == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
print 'Reading Polarizability Data. For details, refer to online AMBER format manual'
self.ATPOL = []
for i in range(self.NATOM):
self.ATPOL.append(eval(Pop(Item_list,0)))
if self.IFPERT == 1:
if len(Item_list) < self.NATOM:
print '(error: File too short!)'
return -1
self.ATPOL1 = []
for i in range(self.NATOM):
self.ATPOL1.append(eval(Pop(Item_list,0)))
#....................................................
if len(Item_list):
print '(warning: File too large!)',
print 'done.'
self.TOP_is_read = 1
#============================================================
def Find_Amber_files():
'Look for sets of Amber files to process'
'''If not passed anything on the command line, look for pairs of
Amber files (.crd and .top) in the current directory. For
each set if there is no corresponding Lammps file (data.), or it is
older than any of the Amber files, add its basename to a list of
strings. This list is returned by the function'''
# Date and existence checks not yet implemented
import os, sys
Basename_list = []
# Extract basenames from command line
for Name in sys.argv[1:]:
if Name[-4:] == '.crd':
Basename_list.append(Name[:-4])
else:
if Name[-4:] == '.top':
Basename_list.append(Name[:-4])
else:
Basename_list.append(Name)
# Remove duplicate basenames
for Basename in Basename_list[:]:
while Basename_list.count(Basename) > 1:
Basename_list.remove(Basename)
if Basename_list == []:
print 'Looking for Amber files...',
Dir_list = os.listdir('.')
Dir_list.sort()
for File in Dir_list:
if File[-4:] == '.top':
Basename = File[:-4]
if (Basename + '.crd') in Dir_list:
Basename_list.append(Basename)
if Basename_list != []:
print 'found',
for i in range(len(Basename_list)-1):
print Basename_list[i] + ',',
print Basename_list[-1] + '\n'
if Basename_list == []:
print 'none.\n'
return Basename_list
#============================================================
def Convert_Amber_files():
'Handle the whole conversion process'
print
print 'Welcome to amber2lammps, a program to convert Amber files to Lammps format!'
print
Basename_list = Find_Amber_files()
for Basename in Basename_list:
a = Amber()
a.Read_CRD(Basename)
if a.CRD_is_read:
a.Read_TOP(Basename)
if a.TOP_is_read:
l = a.Coerce_to_Lammps()
l.Write_Lammps(Basename)
del l
del a
print
#============================================================
Convert_Amber_files()
| gpl-2.0 | -8,097,712,517,030,925,000 | 34.582341 | 197 | 0.481139 | false |
soldag/home-assistant | homeassistant/components/rfxtrx/binary_sensor.py | 11 | 7563 | """Support for RFXtrx binary sensors."""
import logging
import RFXtrx as rfxtrxmod
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_SMOKE,
BinarySensorEntity,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE_CLASS,
CONF_DEVICES,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.helpers import event as evt
from . import (
CONF_DATA_BITS,
CONF_OFF_DELAY,
RfxtrxEntity,
connect_auto_add,
find_possible_pt2262_device,
get_device_id,
get_pt2262_cmd,
get_rfx_object,
)
from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST, DEVICE_PACKET_TYPE_LIGHTING4
_LOGGER = logging.getLogger(__name__)
SENSOR_STATUS_ON = [
"Panic",
"Motion",
"Motion Tamper",
"Light Detected",
"Alarm",
"Alarm Tamper",
]
SENSOR_STATUS_OFF = [
"End Panic",
"No Motion",
"No Motion Tamper",
"Dark Detected",
"Normal",
"Normal Tamper",
]
DEVICE_TYPE_DEVICE_CLASS = {
"X10 Security Motion Detector": DEVICE_CLASS_MOTION,
"KD101 Smoke Detector": DEVICE_CLASS_SMOKE,
"Visonic Powercode Motion Detector": DEVICE_CLASS_MOTION,
"Alecto SA30 Smoke Detector": DEVICE_CLASS_SMOKE,
"RM174RF Smoke Detector": DEVICE_CLASS_SMOKE,
}
def supported(event):
"""Return whether an event supports binary_sensor."""
if isinstance(event, rfxtrxmod.ControlEvent):
return True
if isinstance(event, rfxtrxmod.SensorEvent):
return event.values.get("Sensor Status") in [
*SENSOR_STATUS_ON,
*SENSOR_STATUS_OFF,
]
return False
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up platform."""
sensors = []
device_ids = set()
pt2262_devices = []
discovery_info = config_entry.data
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
if device_id in device_ids:
continue
device_ids.add(device_id)
if event.device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
find_possible_pt2262_device(pt2262_devices, event.device.id_string)
pt2262_devices.append(event.device.id_string)
device = RfxtrxBinarySensor(
event.device,
device_id,
entity_info.get(
CONF_DEVICE_CLASS,
DEVICE_TYPE_DEVICE_CLASS.get(event.device.type_string),
),
entity_info.get(CONF_OFF_DELAY),
entity_info.get(CONF_DATA_BITS),
entity_info.get(CONF_COMMAND_ON),
entity_info.get(CONF_COMMAND_OFF),
)
sensors.append(device)
async_add_entities(sensors)
@callback
def binary_sensor_update(event, device_id):
"""Call for control updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added binary sensor (Device ID: %s Class: %s Sub: %s Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
sensor = RfxtrxBinarySensor(
event.device,
device_id,
event=event,
device_class=DEVICE_TYPE_DEVICE_CLASS.get(event.device.type_string),
)
async_add_entities([sensor])
# Subscribe to main RFXtrx events
connect_auto_add(hass, discovery_info, binary_sensor_update)
class RfxtrxBinarySensor(RfxtrxEntity, BinarySensorEntity):
"""A representation of a RFXtrx binary sensor."""
def __init__(
self,
device,
device_id,
device_class=None,
off_delay=None,
data_bits=None,
cmd_on=None,
cmd_off=None,
event=None,
):
"""Initialize the RFXtrx sensor."""
super().__init__(device, device_id, event=event)
self._device_class = device_class
self._data_bits = data_bits
self._off_delay = off_delay
self._state = None
self._delay_listener = None
self._cmd_on = cmd_on
self._cmd_off = cmd_off
async def async_added_to_hass(self):
"""Restore device state."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
if self._state and self._off_delay is not None:
self._state = False
@property
def force_update(self) -> bool:
"""We should force updates. Repeated states have meaning."""
return True
@property
def device_class(self):
"""Return the sensor class."""
return self._device_class
@property
def is_on(self):
"""Return true if the sensor state is True."""
return self._state
def _apply_event_lighting4(self, event):
"""Apply event for a lighting 4 device."""
if self._data_bits is not None:
cmd = get_pt2262_cmd(event.device.id_string, self._data_bits)
cmd = int(cmd, 16)
if cmd == self._cmd_on:
self._state = True
elif cmd == self._cmd_off:
self._state = False
else:
self._state = True
def _apply_event_standard(self, event):
if event.values.get("Command") in COMMAND_ON_LIST:
self._state = True
elif event.values.get("Command") in COMMAND_OFF_LIST:
self._state = False
elif event.values.get("Sensor Status") in SENSOR_STATUS_ON:
self._state = True
elif event.values.get("Sensor Status") in SENSOR_STATUS_OFF:
self._state = False
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.device.packettype == DEVICE_PACKET_TYPE_LIGHTING4:
self._apply_event_lighting4(event)
else:
self._apply_event_standard(event)
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
_LOGGER.debug(
"Binary sensor update (Device ID: %s Class: %s Sub: %s)",
event.device.id_string,
event.device.__class__.__name__,
event.device.subtype,
)
self._apply_event(event)
self.async_write_ha_state()
if self._delay_listener:
self._delay_listener()
self._delay_listener = None
if self.is_on and self._off_delay is not None:
@callback
def off_delay_listener(now):
"""Switch device off after a delay."""
self._delay_listener = None
self._state = False
self.async_write_ha_state()
self._delay_listener = evt.async_call_later(
self.hass, self._off_delay, off_delay_listener
)
| apache-2.0 | 3,038,740,157,943,357,000 | 27.539623 | 82 | 0.578871 | false |
SoteriousIdaofevil/xmlstar | src/libxml2-2.9.1.tar/libxml2-2.9.1/python/generator.py | 13 | 48766 | #!/usr/bin/python -u
#
# generate python wrappers from the XML API description
#
functions = {}
enums = {} # { enumType: { enumConstant: enumValue } }
import os
import sys
import string
if __name__ == "__main__":
# launched as a script
srcPref = os.path.dirname(sys.argv[0])
else:
# imported
srcPref = os.path.dirname(__file__)
#######################################################################
#
# That part if purely the API acquisition phase from the
# XML API description
#
#######################################################################
import os
import xml.sax
debug = 0
def getparser():
# Attach parser to an unmarshalling object. return both objects.
target = docParser()
parser = xml.sax.make_parser()
parser.setContentHandler(target)
return parser, target
class docParser(xml.sax.handler.ContentHandler):
def __init__(self):
self._methodname = None
self._data = []
self.in_function = 0
self.startElement = self.start
self.endElement = self.end
self.characters = self.data
def close(self):
if debug:
print("close")
def getmethodname(self):
return self._methodname
def data(self, text):
if debug:
print("data %s" % text)
self._data.append(text)
def start(self, tag, attrs):
if debug:
print("start %s, %s" % (tag, attrs))
if tag == 'function':
self._data = []
self.in_function = 1
self.function = None
self.function_cond = None
self.function_args = []
self.function_descr = None
self.function_return = None
self.function_file = None
if 'name' in attrs.keys():
self.function = attrs['name']
if 'file' in attrs.keys():
self.function_file = attrs['file']
elif tag == 'cond':
self._data = []
elif tag == 'info':
self._data = []
elif tag == 'arg':
if self.in_function == 1:
self.function_arg_name = None
self.function_arg_type = None
self.function_arg_info = None
if 'name' in attrs.keys():
self.function_arg_name = attrs['name']
if 'type' in attrs.keys():
self.function_arg_type = attrs['type']
if 'info' in attrs.keys():
self.function_arg_info = attrs['info']
elif tag == 'return':
if self.in_function == 1:
self.function_return_type = None
self.function_return_info = None
self.function_return_field = None
if 'type' in attrs.keys():
self.function_return_type = attrs['type']
if 'info' in attrs.keys():
self.function_return_info = attrs['info']
if 'field' in attrs.keys():
self.function_return_field = attrs['field']
elif tag == 'enum':
enum(attrs['type'],attrs['name'],attrs['value'])
def end(self, tag):
if debug:
print("end %s" % tag)
if tag == 'function':
if self.function != None:
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_cond)
self.in_function = 0
elif tag == 'arg':
if self.in_function == 1:
self.function_args.append([self.function_arg_name,
self.function_arg_type,
self.function_arg_info])
elif tag == 'return':
if self.in_function == 1:
self.function_return = [self.function_return_type,
self.function_return_info,
self.function_return_field]
elif tag == 'info':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_descr = str
elif tag == 'cond':
str = ''
for c in self._data:
str = str + c
if self.in_function == 1:
self.function_cond = str
def function(name, desc, ret, args, file, cond):
functions[name] = (desc, ret, args, file, cond)
def enum(type, name, value):
if type not in enums:
enums[type] = {}
enums[type][name] = value
#######################################################################
#
# Some filtering rukes to drop functions/types which should not
# be exposed as-is on the Python interface
#
#######################################################################
skipped_modules = {
'xmlmemory': None,
'DOCBparser': None,
'SAX': None,
'hash': None,
'list': None,
'threads': None,
# 'xpointer': None,
}
skipped_types = {
'int *': "usually a return type",
'xmlSAXHandlerPtr': "not the proper interface for SAX",
'htmlSAXHandlerPtr': "not the proper interface for SAX",
'xmlRMutexPtr': "thread specific, skipped",
'xmlMutexPtr': "thread specific, skipped",
'xmlGlobalStatePtr': "thread specific, skipped",
'xmlListPtr': "internal representation not suitable for python",
'xmlBufferPtr': "internal representation not suitable for python",
'FILE *': None,
}
#######################################################################
#
# Table of remapping to/from the python type or class to the C
# counterpart.
#
#######################################################################
py_types = {
'void': (None, None, None, None),
'int': ('i', None, "int", "int"),
'long': ('l', None, "long", "long"),
'double': ('d', None, "double", "double"),
'unsigned int': ('i', None, "int", "int"),
'xmlChar': ('c', None, "int", "int"),
'unsigned char *': ('z', None, "charPtr", "char *"),
'char *': ('z', None, "charPtr", "char *"),
'const char *': ('z', None, "charPtrConst", "const char *"),
'xmlChar *': ('z', None, "xmlCharPtr", "xmlChar *"),
'const xmlChar *': ('z', None, "xmlCharPtrConst", "const xmlChar *"),
'xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtdPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlDtd *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttrPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlAttr *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntityPtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const xmlEntity *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElementPtr': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'const xmlElement *': ('O', "xmlElement", "xmlElementPtr", "xmlElementPtr"),
'xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttributePtr': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'const xmlAttribute *': ('O', "xmlAttribute", "xmlAttributePtr", "xmlAttributePtr"),
'xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNsPtr': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'const xmlNs *': ('O', "xmlNode", "xmlNsPtr", "xmlNsPtr"),
'xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const xmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDocPtr': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'const htmlDoc *': ('O', "xmlNode", "xmlDocPtr", "xmlDocPtr"),
'htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNodePtr': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'const htmlNode *': ('O', "xmlNode", "xmlNodePtr", "xmlNodePtr"),
'xmlXPathContextPtr': ('O', "xmlXPathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathContext *': ('O', "xpathContext", "xmlXPathContextPtr", "xmlXPathContextPtr"),
'xmlXPathParserContextPtr': ('O', "xmlXPathParserContext", "xmlXPathParserContextPtr", "xmlXPathParserContextPtr"),
'xmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxtPtr': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'htmlParserCtxt *': ('O', "parserCtxt", "xmlParserCtxtPtr", "xmlParserCtxtPtr"),
'xmlValidCtxtPtr': ('O', "ValidCtxt", "xmlValidCtxtPtr", "xmlValidCtxtPtr"),
'xmlCatalogPtr': ('O', "catalog", "xmlCatalogPtr", "xmlCatalogPtr"),
'FILE *': ('O', "File", "FILEPtr", "FILE *"),
'xmlURIPtr': ('O', "URI", "xmlURIPtr", "xmlURIPtr"),
'xmlErrorPtr': ('O', "Error", "xmlErrorPtr", "xmlErrorPtr"),
'xmlOutputBufferPtr': ('O', "outputBuffer", "xmlOutputBufferPtr", "xmlOutputBufferPtr"),
'xmlParserInputBufferPtr': ('O', "inputBuffer", "xmlParserInputBufferPtr", "xmlParserInputBufferPtr"),
'xmlRegexpPtr': ('O', "xmlReg", "xmlRegexpPtr", "xmlRegexpPtr"),
'xmlTextReaderLocatorPtr': ('O', "xmlTextReaderLocator", "xmlTextReaderLocatorPtr", "xmlTextReaderLocatorPtr"),
'xmlTextReaderPtr': ('O', "xmlTextReader", "xmlTextReaderPtr", "xmlTextReaderPtr"),
'xmlRelaxNGPtr': ('O', "relaxNgSchema", "xmlRelaxNGPtr", "xmlRelaxNGPtr"),
'xmlRelaxNGParserCtxtPtr': ('O', "relaxNgParserCtxt", "xmlRelaxNGParserCtxtPtr", "xmlRelaxNGParserCtxtPtr"),
'xmlRelaxNGValidCtxtPtr': ('O', "relaxNgValidCtxt", "xmlRelaxNGValidCtxtPtr", "xmlRelaxNGValidCtxtPtr"),
'xmlSchemaPtr': ('O', "Schema", "xmlSchemaPtr", "xmlSchemaPtr"),
'xmlSchemaParserCtxtPtr': ('O', "SchemaParserCtxt", "xmlSchemaParserCtxtPtr", "xmlSchemaParserCtxtPtr"),
'xmlSchemaValidCtxtPtr': ('O', "SchemaValidCtxt", "xmlSchemaValidCtxtPtr", "xmlSchemaValidCtxtPtr"),
}
py_return_types = {
'xmlXPathObjectPtr': ('O', "foo", "xmlXPathObjectPtr", "xmlXPathObjectPtr"),
}
unknown_types = {}
foreign_encoding_args = (
'htmlCreateMemoryParserCtxt',
'htmlCtxtReadMemory',
'htmlParseChunk',
'htmlReadMemory',
'xmlCreateMemoryParserCtxt',
'xmlCtxtReadMemory',
'xmlCtxtResetPush',
'xmlParseChunk',
'xmlParseMemory',
'xmlReadMemory',
'xmlRecoverMemory',
)
#######################################################################
#
# This part writes the C <-> Python stubs libxml2-py.[ch] and
# the table libxml2-export.c to add when registrering the Python module
#
#######################################################################
# Class methods which are written by hand in libxml.c but the Python-level
# code is still automatically generated (so they are not in skip_function()).
skip_impl = (
'xmlSaveFileTo',
'xmlSaveFormatFileTo',
)
def skip_function(name):
if name[0:12] == "xmlXPathWrap":
return 1
if name == "xmlFreeParserCtxt":
return 1
if name == "xmlCleanupParser":
return 1
if name == "xmlFreeTextReader":
return 1
# if name[0:11] == "xmlXPathNew":
# return 1
# the next function is defined in libxml.c
if name == "xmlRelaxNGFreeValidCtxt":
return 1
if name == "xmlFreeValidCtxt":
return 1
if name == "xmlSchemaFreeValidCtxt":
return 1
#
# Those are skipped because the Const version is used of the bindings
# instead.
#
if name == "xmlTextReaderBaseUri":
return 1
if name == "xmlTextReaderLocalName":
return 1
if name == "xmlTextReaderName":
return 1
if name == "xmlTextReaderNamespaceUri":
return 1
if name == "xmlTextReaderPrefix":
return 1
if name == "xmlTextReaderXmlLang":
return 1
if name == "xmlTextReaderValue":
return 1
if name == "xmlOutputBufferClose": # handled by by the superclass
return 1
if name == "xmlOutputBufferFlush": # handled by by the superclass
return 1
if name == "xmlErrMemory":
return 1
if name == "xmlValidBuildContentModel":
return 1
if name == "xmlValidateElementDecl":
return 1
if name == "xmlValidateAttributeDecl":
return 1
if name == "xmlPopInputCallbacks":
return 1
return 0
def print_function_wrapper(name, output, export, include):
global py_types
global unknown_types
global functions
global skipped_modules
try:
(desc, ret, args, file, cond) = functions[name]
except:
print("failed to get function %s infos")
return
if file in skipped_modules:
return 0
if skip_function(name) == 1:
return 0
if name in skip_impl:
# Don't delete the function entry in the caller.
return 1
c_call = ""
format=""
format_args=""
c_args=""
c_return=""
c_convert=""
c_release=""
num_bufs=0
for arg in args:
# This should be correct
if arg[1][0:6] == "const ":
arg[1] = arg[1][6:]
c_args = c_args + " %s %s;\n" % (arg[1], arg[0])
if arg[1] in py_types:
(f, t, n, c) = py_types[arg[1]]
if (f == 'z') and (name in foreign_encoding_args) and (num_bufs == 0):
f = 's#'
if f != None:
format = format + f
if t != None:
format_args = format_args + ", &pyobj_%s" % (arg[0])
c_args = c_args + " PyObject *pyobj_%s;\n" % (arg[0])
c_convert = c_convert + \
" %s = (%s) Py%s_Get(pyobj_%s);\n" % (arg[0],
arg[1], t, arg[0])
else:
format_args = format_args + ", &%s" % (arg[0])
if f == 's#':
format_args = format_args + ", &py_buffsize%d" % num_bufs
c_args = c_args + " int py_buffsize%d;\n" % num_bufs
num_bufs = num_bufs + 1
if c_call != "":
c_call = c_call + ", "
c_call = c_call + "%s" % (arg[0])
if t == "File":
c_release = c_release + \
" PyFile_Release(%s);\n" % (arg[0])
else:
if arg[1] in skipped_types:
return 0
if arg[1] in unknown_types:
lst = unknown_types[arg[1]]
lst.append(name)
else:
unknown_types[arg[1]] = [name]
return -1
if format != "":
format = format + ":%s" % (name)
if ret[0] == 'void':
if file == "python_accessor":
if args[1][1] == "char *" or args[1][1] == "xmlChar *":
c_call = "\n if (%s->%s != NULL) xmlFree(%s->%s);\n" % (
args[0][0], args[1][0], args[0][0], args[1][0])
c_call = c_call + " %s->%s = (%s)xmlStrdup((const xmlChar *)%s);\n" % (args[0][0],
args[1][0], args[1][1], args[1][0])
else:
c_call = "\n %s->%s = %s;\n" % (args[0][0], args[1][0],
args[1][0])
else:
c_call = "\n %s(%s);\n" % (name, c_call)
ret_convert = " Py_INCREF(Py_None);\n return(Py_None);\n"
elif ret[0] in py_types:
(f, t, n, c) = py_types[ret[0]]
c_return = c_return + " %s c_retval;\n" % (ret[0])
if file == "python_accessor" and ret[2] != None:
c_call = "\n c_retval = %s->%s;\n" % (args[0][0], ret[2])
else:
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
elif ret[0] in py_return_types:
(f, t, n, c) = py_return_types[ret[0]]
c_return = c_return + " %s c_retval;\n" % (ret[0])
c_call = "\n c_retval = %s(%s);\n" % (name, c_call)
ret_convert = " py_retval = libxml_%sWrap((%s) c_retval);\n" % (n,c)
ret_convert = ret_convert + " return(py_retval);\n"
else:
if ret[0] in skipped_types:
return 0
if ret[0] in unknown_types:
lst = unknown_types[ret[0]]
lst.append(name)
else:
unknown_types[ret[0]] = [name]
return -1
if cond != None and cond != "":
include.write("#if %s\n" % cond)
export.write("#if %s\n" % cond)
output.write("#if %s\n" % cond)
include.write("PyObject * ")
include.write("libxml_%s(PyObject *self, PyObject *args);\n" % (name))
export.write(" { (char *)\"%s\", libxml_%s, METH_VARARGS, NULL },\n" %
(name, name))
if file == "python":
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
if file == "python_accessor" and ret[0] != "void" and ret[2] is None:
# Those have been manually generated
if cond != None and cond != "":
include.write("#endif\n")
export.write("#endif\n")
output.write("#endif\n")
return 1
output.write("PyObject *\n")
output.write("libxml_%s(PyObject *self ATTRIBUTE_UNUSED," % (name))
output.write(" PyObject *args")
if format == "":
output.write(" ATTRIBUTE_UNUSED")
output.write(") {\n")
if ret[0] != 'void':
output.write(" PyObject *py_retval;\n")
if c_return != "":
output.write(c_return)
if c_args != "":
output.write(c_args)
if format != "":
output.write("\n if (!PyArg_ParseTuple(args, (char *)\"%s\"%s))\n" %
(format, format_args))
output.write(" return(NULL);\n")
if c_convert != "":
output.write(c_convert)
output.write(c_call)
if c_release != "":
output.write(c_release)
output.write(ret_convert)
output.write("}\n\n")
if cond != None and cond != "":
include.write("#endif /* %s */\n" % cond)
export.write("#endif /* %s */\n" % cond)
output.write("#endif /* %s */\n" % cond)
return 1
def buildStubs():
global py_types
global py_return_types
global unknown_types
try:
f = open(os.path.join(srcPref,"libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
try:
f = open(os.path.join(srcPref,"..","doc","libxml2-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
print(file, ":", msg)
sys.exit(1)
n = len(list(functions.keys()))
print("Found %d functions in libxml2-api.xml" % (n))
py_types['pythonObject'] = ('O', "pythonObject", "pythonObject", "pythonObject")
try:
f = open(os.path.join(srcPref,"libxml2-python-api.xml"))
data = f.read()
(parser, target) = getparser()
parser.feed(data)
parser.close()
except IOError as msg:
print(file, ":", msg)
print("Found %d functions in libxml2-python-api.xml" % (
len(list(functions.keys())) - n))
nb_wrap = 0
failed = 0
skipped = 0
include = open("libxml2-py.h", "w")
include.write("/* Generated */\n\n")
export = open("libxml2-export.c", "w")
export.write("/* Generated */\n\n")
wrapper = open("libxml2-py.c", "w")
wrapper.write("/* Generated */\n\n")
wrapper.write("#include <Python.h>\n")
wrapper.write("#include <libxml/xmlversion.h>\n")
wrapper.write("#include <libxml/tree.h>\n")
wrapper.write("#include <libxml/xmlschemastypes.h>\n")
wrapper.write("#include \"libxml_wrap.h\"\n")
wrapper.write("#include \"libxml2-py.h\"\n\n")
for function in sorted(functions.keys()):
ret = print_function_wrapper(function, wrapper, export, include)
if ret < 0:
failed = failed + 1
del functions[function]
if ret == 0:
skipped = skipped + 1
del functions[function]
if ret == 1:
nb_wrap = nb_wrap + 1
include.close()
export.close()
wrapper.close()
print("Generated %d wrapper functions, %d failed, %d skipped\n" % (nb_wrap,
failed, skipped))
print("Missing type converters: ")
for type in list(unknown_types.keys()):
print("%s:%d " % (type, len(unknown_types[type])))
print()
#######################################################################
#
# This part writes part of the Python front-end classes based on
# mapping rules between types and classes and also based on function
# renaming to get consistent function names at the Python level
#
#######################################################################
#
# The type automatically remapped to generated classes
#
classes_type = {
"xmlNodePtr": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlNode *": ("._o", "xmlNode(_obj=%s)", "xmlNode"),
"xmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlDocPtr": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"htmlxmlDocPtr *": ("._o", "xmlDoc(_obj=%s)", "xmlDoc"),
"xmlAttrPtr": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlAttr *": ("._o", "xmlAttr(_obj=%s)", "xmlAttr"),
"xmlNsPtr": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlNs *": ("._o", "xmlNs(_obj=%s)", "xmlNs"),
"xmlDtdPtr": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlDtd *": ("._o", "xmlDtd(_obj=%s)", "xmlDtd"),
"xmlEntityPtr": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlEntity *": ("._o", "xmlEntity(_obj=%s)", "xmlEntity"),
"xmlElementPtr": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlElement *": ("._o", "xmlElement(_obj=%s)", "xmlElement"),
"xmlAttributePtr": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlAttribute *": ("._o", "xmlAttribute(_obj=%s)", "xmlAttribute"),
"xmlXPathContextPtr": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathContext *": ("._o", "xpathContext(_obj=%s)", "xpathContext"),
"xmlXPathParserContext *": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlXPathParserContextPtr": ("._o", "xpathParserContext(_obj=%s)", "xpathParserContext"),
"xmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxtPtr": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"htmlParserCtxt *": ("._o", "parserCtxt(_obj=%s)", "parserCtxt"),
"xmlValidCtxtPtr": ("._o", "ValidCtxt(_obj=%s)", "ValidCtxt"),
"xmlCatalogPtr": ("._o", "catalog(_obj=%s)", "catalog"),
"xmlURIPtr": ("._o", "URI(_obj=%s)", "URI"),
"xmlErrorPtr": ("._o", "Error(_obj=%s)", "Error"),
"xmlOutputBufferPtr": ("._o", "outputBuffer(_obj=%s)", "outputBuffer"),
"xmlParserInputBufferPtr": ("._o", "inputBuffer(_obj=%s)", "inputBuffer"),
"xmlRegexpPtr": ("._o", "xmlReg(_obj=%s)", "xmlReg"),
"xmlTextReaderLocatorPtr": ("._o", "xmlTextReaderLocator(_obj=%s)", "xmlTextReaderLocator"),
"xmlTextReaderPtr": ("._o", "xmlTextReader(_obj=%s)", "xmlTextReader"),
'xmlRelaxNGPtr': ('._o', "relaxNgSchema(_obj=%s)", "relaxNgSchema"),
'xmlRelaxNGParserCtxtPtr': ('._o', "relaxNgParserCtxt(_obj=%s)", "relaxNgParserCtxt"),
'xmlRelaxNGValidCtxtPtr': ('._o', "relaxNgValidCtxt(_obj=%s)", "relaxNgValidCtxt"),
'xmlSchemaPtr': ("._o", "Schema(_obj=%s)", "Schema"),
'xmlSchemaParserCtxtPtr': ("._o", "SchemaParserCtxt(_obj=%s)", "SchemaParserCtxt"),
'xmlSchemaValidCtxtPtr': ("._o", "SchemaValidCtxt(_obj=%s)", "SchemaValidCtxt"),
}
converter_type = {
"xmlXPathObjectPtr": "xpathObjectRet(%s)",
}
primary_classes = ["xmlNode", "xmlDoc"]
classes_ancestor = {
"xmlNode" : "xmlCore",
"xmlDtd" : "xmlNode",
"xmlDoc" : "xmlNode",
"xmlAttr" : "xmlNode",
"xmlNs" : "xmlNode",
"xmlEntity" : "xmlNode",
"xmlElement" : "xmlNode",
"xmlAttribute" : "xmlNode",
"outputBuffer": "ioWriteWrapper",
"inputBuffer": "ioReadWrapper",
"parserCtxt": "parserCtxtCore",
"xmlTextReader": "xmlTextReaderCore",
"ValidCtxt": "ValidCtxtCore",
"SchemaValidCtxt": "SchemaValidCtxtCore",
"relaxNgValidCtxt": "relaxNgValidCtxtCore",
}
classes_destructors = {
"parserCtxt": "xmlFreeParserCtxt",
"catalog": "xmlFreeCatalog",
"URI": "xmlFreeURI",
# "outputBuffer": "xmlOutputBufferClose",
"inputBuffer": "xmlFreeParserInputBuffer",
"xmlReg": "xmlRegFreeRegexp",
"xmlTextReader": "xmlFreeTextReader",
"relaxNgSchema": "xmlRelaxNGFree",
"relaxNgParserCtxt": "xmlRelaxNGFreeParserCtxt",
"relaxNgValidCtxt": "xmlRelaxNGFreeValidCtxt",
"Schema": "xmlSchemaFree",
"SchemaParserCtxt": "xmlSchemaFreeParserCtxt",
"SchemaValidCtxt": "xmlSchemaFreeValidCtxt",
"ValidCtxt": "xmlFreeValidCtxt",
}
functions_noexcept = {
"xmlHasProp": 1,
"xmlHasNsProp": 1,
"xmlDocSetRootElement": 1,
"xmlNodeGetNs": 1,
"xmlNodeGetNsDefs": 1,
"xmlNextElementSibling": 1,
"xmlPreviousElementSibling": 1,
"xmlFirstElementChild": 1,
"xmlLastElementChild": 1,
}
reference_keepers = {
"xmlTextReader": [('inputBuffer', 'input')],
"relaxNgValidCtxt": [('relaxNgSchema', 'schema')],
"SchemaValidCtxt": [('Schema', 'schema')],
}
function_classes = {}
function_classes["None"] = []
def nameFixup(name, classe, type, file):
listname = classe + "List"
ll = len(listname)
l = len(classe)
if name[0:l] == listname:
func = name[l:]
func = func[0:1].lower() + func[1:]
elif name[0:12] == "xmlParserGet" and file == "python_accessor":
func = name[12:]
func = func[0:1].lower() + func[1:]
elif name[0:12] == "xmlParserSet" and file == "python_accessor":
func = name[12:]
func = func[0:1].lower() + func[1:]
elif name[0:10] == "xmlNodeGet" and file == "python_accessor":
func = name[10:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlURIGet" and file == "python_accessor":
func = name[9:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlURISet" and file == "python_accessor":
func = name[6:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlErrorGet" and file == "python_accessor":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:17] == "xmlXPathParserGet" and file == "python_accessor":
func = name[17:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlXPathGet" and file == "python_accessor":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:11] == "xmlXPathSet" and file == "python_accessor":
func = name[8:]
func = func[0:1].lower() + func[1:]
elif name[0:15] == "xmlOutputBuffer" and file != "python":
func = name[15:]
func = func[0:1].lower() + func[1:]
elif name[0:20] == "xmlParserInputBuffer" and file != "python":
func = name[20:]
func = func[0:1].lower() + func[1:]
elif name[0:9] == "xmlRegexp" and file == "xmlregexp":
func = "regexp" + name[9:]
elif name[0:6] == "xmlReg" and file == "xmlregexp":
func = "regexp" + name[6:]
elif name[0:20] == "xmlTextReaderLocator" and file == "xmlreader":
func = name[20:]
elif name[0:18] == "xmlTextReaderConst" and file == "xmlreader":
func = name[18:]
elif name[0:13] == "xmlTextReader" and file == "xmlreader":
func = name[13:]
elif name[0:12] == "xmlReaderNew" and file == "xmlreader":
func = name[9:]
elif name[0:11] == "xmlACatalog":
func = name[11:]
func = func[0:1].lower() + func[1:]
elif name[0:l] == classe:
func = name[l:]
func = func[0:1].lower() + func[1:]
elif name[0:7] == "libxml_":
func = name[7:]
func = func[0:1].lower() + func[1:]
elif name[0:6] == "xmlGet":
func = name[6:]
func = func[0:1].lower() + func[1:]
elif name[0:3] == "xml":
func = name[3:]
func = func[0:1].lower() + func[1:]
else:
func = name
if func[0:5] == "xPath":
func = "xpath" + func[5:]
elif func[0:4] == "xPtr":
func = "xpointer" + func[4:]
elif func[0:8] == "xInclude":
func = "xinclude" + func[8:]
elif func[0:2] == "iD":
func = "ID" + func[2:]
elif func[0:3] == "uRI":
func = "URI" + func[3:]
elif func[0:4] == "uTF8":
func = "UTF8" + func[4:]
elif func[0:3] == 'sAX':
func = "SAX" + func[3:]
return func
def functionCompare(info1, info2):
(index1, func1, name1, ret1, args1, file1) = info1
(index2, func2, name2, ret2, args2, file2) = info2
if file1 == file2:
if func1 < func2:
return -1
if func1 > func2:
return 1
if file1 == "python_accessor":
return -1
if file2 == "python_accessor":
return 1
if file1 < file2:
return -1
if file1 > file2:
return 1
return 0
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def writeDoc(name, args, indent, output):
if functions[name][0] is None or functions[name][0] == "":
return
val = functions[name][0]
val = val.replace("NULL", "None")
output.write(indent)
output.write('"""')
while len(val) > 60:
if val[0] == " ":
val = val[1:]
continue
str = val[0:60]
i = str.rfind(" ")
if i < 0:
i = 60
str = val[0:i]
val = val[i:]
output.write(str)
output.write('\n ')
output.write(indent)
output.write(val)
output.write(' """\n')
def buildWrappers():
global ctypes
global py_types
global py_return_types
global unknown_types
global functions
global function_classes
global classes_type
global classes_list
global converter_type
global primary_classes
global converter_type
global classes_ancestor
global converter_type
global primary_classes
global classes_ancestor
global classes_destructors
global functions_noexcept
for type in classes_type.keys():
function_classes[classes_type[type][2]] = []
#
# Build the list of C types to look for ordered to start
# with primary classes
#
ctypes = []
classes_list = []
ctypes_processed = {}
classes_processed = {}
for classe in primary_classes:
classes_list.append(classe)
classes_processed[classe] = ()
for type in classes_type.keys():
tinfo = classes_type[type]
if tinfo[2] == classe:
ctypes.append(type)
ctypes_processed[type] = ()
for type in sorted(classes_type.keys()):
if type in ctypes_processed:
continue
tinfo = classes_type[type]
if tinfo[2] not in classes_processed:
classes_list.append(tinfo[2])
classes_processed[tinfo[2]] = ()
ctypes.append(type)
ctypes_processed[type] = ()
for name in functions.keys():
found = 0
(desc, ret, args, file, cond) = functions[name]
for type in ctypes:
classe = classes_type[type][2]
if name[0:3] == "xml" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:3] == "xml" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 1 and args[0][1] == type:
found = 1
func = nameFixup(name, classe, type, file)
info = (0, func, name, ret, args, file)
function_classes[classe].append(info)
elif name[0:4] == "html" and len(args) >= 2 and args[1][1] == type \
and file != "python_accessor":
found = 1
func = nameFixup(name, classe, type, file)
info = (1, func, name, ret, args, file)
function_classes[classe].append(info)
if found == 1:
continue
if name[0:8] == "xmlXPath":
continue
if name[0:6] == "xmlStr":
continue
if name[0:10] == "xmlCharStr":
continue
func = nameFixup(name, "None", file, file)
info = (0, func, name, ret, args, file)
function_classes['None'].append(info)
classes = open("libxml2class.py", "w")
txt = open("libxml2class.txt", "w")
txt.write(" Generated Classes for libxml2-python\n\n")
txt.write("#\n# Global functions of the module\n#\n\n")
if "None" in function_classes:
flist = function_classes["None"]
flist = sorted(flist, key=cmp_to_key(functionCompare))
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
if file != oldfile:
classes.write("#\n# Functions from module %s\n#\n\n" % file)
txt.write("\n# functions from module %s\n" % file)
oldfile = file
classes.write("def %s(" % func)
txt.write("%s()\n" % func)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
for arg in args:
if arg[1] in classes_type:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
if arg[1] in py_types:
(f, t, n, c) = py_types[arg[1]]
if t == "File":
classes.write(" if %s is not None: %s.flush()\n" % (
arg[0], arg[0]))
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
classes.write("%s" % arg[0])
if arg[1] in classes_type:
classes.write("__o")
n = n + 1
classes.write(")\n")
# This may be needed to reposition the I/O, but likely to cause more harm
# than good. Those changes in Python3 really break the model.
# for arg in args:
# if arg[1] in py_types:
# (f, t, n, c) = py_types[arg[1]]
# if t == "File":
# classes.write(" if %s is not None: %s.seek(0,0)\n"%(
# arg[0], arg[0]))
if ret[0] != "void":
if ret[0] in classes_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(" if ret is None:return None\n")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
txt.write("\n\n#\n# Set of classes of the module\n#\n\n")
for classname in classes_list:
if classname == "None":
pass
else:
if classname in classes_ancestor:
txt.write("\n\nClass %s(%s)\n" % (classname,
classes_ancestor[classname]))
classes.write("class %s(%s):\n" % (classname,
classes_ancestor[classname]))
classes.write(" def __init__(self, _obj=None):\n")
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" if checkWrapper(_obj) != 0:")
classes.write(" raise TypeError")
classes.write("('%s got a wrong wrapper object type')\n" % \
classname)
if classname in reference_keepers:
rlist = reference_keepers[classname]
for ref in rlist:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" self._o = _obj\n")
classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (
classes_ancestor[classname]))
if classes_ancestor[classname] == "xmlCore" or \
classes_ancestor[classname] == "xmlNode":
classes.write(" def __repr__(self):\n")
format = "<%s (%%s) object at 0x%%x>" % (classname)
classes.write(" return \"%s\" %% (self.name, int(pos_id (self)))\n\n" % (
format))
else:
txt.write("Class %s()\n" % (classname))
classes.write("class %s:\n" % (classname))
classes.write(" def __init__(self, _obj=None):\n")
if classname in reference_keepers:
list = reference_keepers[classname]
for ref in list:
classes.write(" self.%s = None\n" % ref[1])
classes.write(" if _obj != None:self._o = _obj;return\n")
classes.write(" self._o = None\n\n")
destruct=None
if classname in classes_destructors:
classes.write(" def __del__(self):\n")
classes.write(" if self._o != None:\n")
classes.write(" libxml2mod.%s(self._o)\n" %
classes_destructors[classname])
classes.write(" self._o = None\n\n")
destruct=classes_destructors[classname]
flist = function_classes[classname]
flist = sorted(flist, key=cmp_to_key(functionCompare))
oldfile = ""
for info in flist:
(index, func, name, ret, args, file) = info
#
# Do not provide as method the destructors for the class
# to avoid double free
#
if name == destruct:
continue
if file != oldfile:
if file == "python_accessor":
classes.write(" # accessors for %s\n" % (classname))
txt.write(" # accessors\n")
else:
classes.write(" #\n")
classes.write(" # %s functions from module %s\n" % (
classname, file))
txt.write("\n # functions from module %s\n" % file)
classes.write(" #\n\n")
oldfile = file
classes.write(" def %s(self" % func)
txt.write(" %s()\n" % func)
n = 0
for arg in args:
if n != index:
classes.write(", %s" % arg[0])
n = n + 1
classes.write("):\n")
writeDoc(name, args, ' ', classes)
n = 0
for arg in args:
if arg[1] in classes_type:
if n != index:
classes.write(" if %s is None: %s__o = None\n" %
(arg[0], arg[0]))
classes.write(" else: %s__o = %s%s\n" %
(arg[0], arg[0], classes_type[arg[1]][0]))
n = n + 1
if ret[0] != "void":
classes.write(" ret = ")
else:
classes.write(" ")
classes.write("libxml2mod.%s(" % name)
n = 0
for arg in args:
if n != 0:
classes.write(", ")
if n != index:
classes.write("%s" % arg[0])
if arg[1] in classes_type:
classes.write("__o")
else:
classes.write("self")
if arg[1] in classes_type:
classes.write(classes_type[arg[1]][0])
n = n + 1
classes.write(")\n")
if ret[0] != "void":
if ret[0] in classes_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(
" if ret is None:return None\n")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
#
# generate the returned class wrapper for the object
#
classes.write(" __tmp = ")
classes.write(classes_type[ret[0]][1] % ("ret"))
classes.write("\n")
#
# Sometime one need to keep references of the source
# class in the returned class object.
# See reference_keepers for the list
#
tclass = classes_type[ret[0]][2]
if tclass in reference_keepers:
list = reference_keepers[tclass]
for pref in list:
if pref[0] == classname:
classes.write(" __tmp.%s = self\n" %
pref[1])
#
# return the class
#
classes.write(" return __tmp\n")
elif ret[0] in converter_type:
#
# Raise an exception
#
if name in functions_noexcept:
classes.write(
" if ret is None:return None")
elif name.find("URI") >= 0:
classes.write(
" if ret is None:raise uriError('%s() failed')\n"
% (name))
elif name.find("XPath") >= 0:
classes.write(
" if ret is None:raise xpathError('%s() failed')\n"
% (name))
elif name.find("Parse") >= 0:
classes.write(
" if ret is None:raise parserError('%s() failed')\n"
% (name))
else:
classes.write(
" if ret is None:raise treeError('%s() failed')\n"
% (name))
classes.write(" return ")
classes.write(converter_type[ret[0]] % ("ret"))
classes.write("\n")
else:
classes.write(" return ret\n")
classes.write("\n")
#
# Generate enum constants
#
for type,enum in enums.items():
classes.write("# %s\n" % type)
items = enum.items()
items = sorted(items, key=(lambda i: int(i[1])))
for name,value in items:
classes.write("%s = %s\n" % (name,value))
classes.write("\n")
txt.close()
classes.close()
buildStubs()
buildWrappers()
| mit | 1,028,407,188,964,338,600 | 38.711726 | 120 | 0.487635 | false |
metacloud/python-cinderclient | cinderclient/auth_plugin.py | 8 | 4568 | # Copyright 2013 OpenStack Foundation
# Copyright 2013 Spanish National Research Council.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import pkg_resources
import six
from cinderclient import exceptions
from cinderclient import utils
logger = logging.getLogger(__name__)
_discovered_plugins = {}
def discover_auth_systems():
"""Discover the available auth-systems.
This won't take into account the old style auth-systems.
"""
ep_name = 'openstack.client.auth_plugin'
for ep in pkg_resources.iter_entry_points(ep_name):
try:
auth_plugin = ep.load()
except (ImportError, pkg_resources.UnknownExtra, AttributeError) as e:
logger.debug("ERROR: Cannot load auth plugin %s" % ep.name)
logger.debug(e, exc_info=1)
else:
_discovered_plugins[ep.name] = auth_plugin
def load_auth_system_opts(parser):
"""Load options needed by the available auth-systems into a parser.
This function will try to populate the parser with options from the
available plugins.
"""
for name, auth_plugin in six.iteritems(_discovered_plugins):
add_opts_fn = getattr(auth_plugin, "add_opts", None)
if add_opts_fn:
group = parser.add_argument_group("Auth-system '%s' options" %
name)
add_opts_fn(group)
def load_plugin(auth_system):
if auth_system in _discovered_plugins:
return _discovered_plugins[auth_system]()
# NOTE(aloga): If we arrive here, the plugin will be an old-style one,
# so we have to create a fake AuthPlugin for it.
return DeprecatedAuthPlugin(auth_system)
class BaseAuthPlugin(object):
"""Base class for authentication plugins.
An authentication plugin needs to override at least the authenticate
method to be a valid plugin.
"""
def __init__(self):
self.opts = {}
def get_auth_url(self):
"""Return the auth url for the plugin (if any)."""
return None
@staticmethod
def add_opts(parser):
"""Populate and return the parser with the options for this plugin.
If the plugin does not need any options, it should return the same
parser untouched.
"""
return parser
def parse_opts(self, args):
"""Parse the actual auth-system options if any.
This method is expected to populate the attribute self.opts with a
dict containing the options and values needed to make authentication.
If the dict is empty, the client should assume that it needs the same
options as the 'keystone' auth system (i.e. os_username and
os_password).
Returns the self.opts dict.
"""
return self.opts
def authenticate(self, cls, auth_url):
"""Authenticate using plugin defined method."""
raise exceptions.AuthSystemNotFound(self.auth_system)
class DeprecatedAuthPlugin(object):
"""Class to mimic the AuthPlugin class for deprecated auth systems.
Old auth systems only define two entry points: openstack.client.auth_url
and openstack.client.authenticate. This class will load those entry points
into a class similar to a valid AuthPlugin.
"""
def __init__(self, auth_system):
self.auth_system = auth_system
def authenticate(cls, auth_url):
raise exceptions.AuthSystemNotFound(self.auth_system)
self.opts = {}
self.get_auth_url = lambda: None
self.authenticate = authenticate
self._load_endpoints()
def _load_endpoints(self):
ep_name = 'openstack.client.auth_url'
fn = utils._load_entry_point(ep_name, name=self.auth_system)
if fn:
self.get_auth_url = fn
ep_name = 'openstack.client.authenticate'
fn = utils._load_entry_point(ep_name, name=self.auth_system)
if fn:
self.authenticate = fn
def parse_opts(self, args):
return self.opts
| apache-2.0 | 4,118,304,488,959,902,700 | 30.944056 | 78 | 0.658494 | false |
sbstp/streamlink | src/streamlink_cli/utils/player.py | 23 | 1244 | import os
import sys
from ..compat import shlex_quote
def check_paths(exes, paths):
for path in paths:
for exe in exes:
path = os.path.expanduser(os.path.join(path, exe))
if os.path.isfile(path):
return path
def find_default_player():
if "darwin" in sys.platform:
paths = os.environ.get("PATH", "").split(":")
paths += ["/Applications/VLC.app/Contents/MacOS/"]
paths += ["~/Applications/VLC.app/Contents/MacOS/"]
path = check_paths(("VLC", "vlc"), paths)
elif "win32" in sys.platform:
exename = "vlc.exe"
paths = os.environ.get("PATH", "").split(";")
path = check_paths((exename,), paths)
if not path:
subpath = "VideoLAN\\VLC\\"
envvars = ("PROGRAMFILES", "PROGRAMFILES(X86)", "PROGRAMW6432")
paths = filter(None, (os.environ.get(var) for var in envvars))
paths = (os.path.join(p, subpath) for p in paths)
path = check_paths((exename,), paths)
else:
paths = os.environ.get("PATH", "").split(":")
path = check_paths(("vlc",), paths)
if path:
# Quote command because it can contain space
return shlex_quote(path)
| bsd-2-clause | -293,428,143,168,114,900 | 30.897436 | 75 | 0.561897 | false |
akhmadMizkat/odoo | addons/hr_holidays/hr_holidays.py | 1 | 39047 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
import calendar
import datetime
from datetime import date
import logging
import math
import time
from operator import attrgetter
from werkzeug import url_encode
from dateutil.relativedelta import relativedelta
from openerp.exceptions import UserError, AccessError
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
if holiday.state == 'validate':
# note: add only validated allocation even for the virtual
# count; otherwise pending then refused allocation allow
# the employee to create more leaves than possible
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id'):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.virtual_remaining_leaves or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
""" Override _search to order the results, according to some employee.
The order is the following
- limit (limited leaves first, such as Legal Leaves)
- virtual remaining leaves (higher the better, so using reverse on sorted)
This override is necessary because those fields are not stored and depends
on an employee_id given in context. This sort will be done when there
is an employee_id in context and that no other order has been given
to the method. """
if context is None:
context = {}
ids = super(hr_holidays_status, self)._search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count, access_rights_uid=access_rights_uid)
if not count and not order and context.get('employee_id'):
leaves = self.browse(cr, uid, ids, context=context)
sort_key = lambda l: (not l.limit, l.virtual_remaining_leaves)
return map(int, leaves.sorted(key=sort_key, reverse=True))
return ids
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from desc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'payslip_status': fields.boolean(string='Reported in last payslips',
help='Green this button when the leave has been taken into account in the payslip.'),
'report_note': fields.text('HR Comments'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset, string="Can reset",
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee',
'payslip_status': False,
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from', 'date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.', ['state', 'number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def name_get(self, cr, uid, ids, context=None):
res = []
for leave in self.browse(cr, uid, ids, context=context):
res.append((leave.id, leave.name or _("%s on %s") % (leave.employee_id.name, leave.holiday_status_id.name)))
return res
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise UserError(_('You cannot delete a leave which is in %s state.') % (rec.state,))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise UserError(_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise UserError(_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def _check_state_access_right(self, cr, uid, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
return False
return True
def add_follower(self, cr, uid, ids, employee_id, context=None):
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
if employee and employee.user_id:
self.message_subscribe_users(cr, uid, ids, user_ids=[employee.user_id.id], context=context)
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
employee_id = values.get('employee_id', False)
context = dict(context, mail_create_nolog=True, mail_create_nosubscribe=True)
if not self._check_state_access_right(cr, uid, values, context):
raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
if not values.get('name'):
employee_name = self.pool['hr.employee'].browse(cr, uid, employee_id, context=context).name
holiday_type = self.pool['hr.holidays.status'].browse(cr, uid, values.get('holiday_status_id'), context=context).name
values['name'] = _("%s on %s") % (employee_name, holiday_type)
hr_holiday_id = super(hr_holidays, self).create(cr, uid, values, context=context)
self.add_follower(cr, uid, [hr_holiday_id], employee_id, context=context)
return hr_holiday_id
def write(self, cr, uid, ids, vals, context=None):
employee_id = vals.get('employee_id', False)
if not self._check_state_access_right(cr, uid, vals, context):
raise AccessError(_('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
hr_holiday_id = super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
self.add_follower(cr, uid, ids, employee_id, context=context)
return hr_holiday_id
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
return self.write(cr, uid, ids, {'state': 'validate1', 'manager_id': manager}, context=context)
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state': 'validate'}, context=context)
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.display_name,
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = record.category_id.employee_ids.ids
leave_ids = []
batch_context = dict(context, mail_notify_force_send=False)
for emp in obj_emp.browse(cr, uid, emp_ids, context=context):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=batch_context))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
return False
return True
def toggle_payslip_status(self, cr, uid, ids, context=None):
ids_to_set_true = self.search(cr, uid, [('id', 'in', ids), ('payslip_status', '=', False)], context=context)
ids_to_set_false = list(set(ids) - set(ids_to_set_true))
return self.write(cr, uid, ids_to_set_true, {'payslip_status': True}, context=context) and self.write(cr, uid, ids_to_set_false, {'payslip_status': False}, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'validate':
return 'hr_holidays.mt_holidays_approved'
elif 'state' in init_values and record.state == 'validate1':
return 'hr_holidays.mt_holidays_first_validated'
elif 'state' in init_values and record.state == 'confirm':
return 'hr_holidays.mt_holidays_confirmed'
elif 'state' in init_values and record.state == 'refuse':
return 'hr_holidays.mt_holidays_refused'
return super(hr_holidays, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _notification_group_recipients(self, cr, uid, ids, message, recipients, done_ids, group_data, context=None):
""" Override the mail.thread method to handle HR users and officers
recipients. Indeed those will have specific action in their notification
emails. """
group_hr_user = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'base.group_hr_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if recipient.user_ids and group_hr_user in recipient.user_ids[0].groups_id.ids:
group_data['group_hr_user'] |= recipient
done_ids.add(recipient.id)
return super(hr_holidays, self)._notification_group_recipients(cr, uid, ids, message, recipients, done_ids, group_data, context=context)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(hr_holidays, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
app_action = '/mail/workflow?%s' % url_encode({'model': self._name, 'res_id': ids[0], 'signal': 'validate'})
ref_action = '/mail/workflow?%s' % url_encode({'model': self._name, 'res_id': ids[0], 'signal': 'refuse'})
holiday = self.browse(cr, uid, ids[0], context=context)
actions = []
if holiday.state == 'confirm':
actions.append({'url': app_action, 'title': 'Approve'})
if holiday.state in ['confirm', 'validate', 'validate1']:
actions.append({'url': ref_action, 'title': 'Refuse'})
res['group_hr_user'] = {
'actions': actions
}
return res
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.Model):
_inherit = "hr.employee"
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
if value:
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
return False
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
#Used SUPERUSER_ID to forcefully get status of other user's leave, to bypass record rule
holidays_id = holidays_obj.search(cr, SUPERUSER_ID,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d %H:%M:%S')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, SUPERUSER_ID, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
res = {}
leaves = self.pool['hr.holidays'].read_group(cr, uid, [
('employee_id', 'in', ids),
('holiday_status_id.limit', '=', False), ('state', '=', 'validate')], fields=['number_of_days', 'employee_id'], groupby=['employee_id'])
res.update(dict([(leave['employee_id'][0], leave['number_of_days']) for leave in leaves ]))
return res
def _show_approved_remaining_leave(self, cr, uid, ids, name, args, context=None):
if self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
return dict([(employee_id, True) for employee_id in ids])
return dict([(employee.id, True) for employee in self.browse(cr, uid, ids, context=context) if employee.user_id.id == uid])
def _absent_employee(self, cr, uid, ids, field_name, arg, context=None):
today_date = datetime.datetime.utcnow().date()
today_start = today_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) # get the midnight of the current utc day
today_end = (today_date + relativedelta(hours=23, minutes=59, seconds=59)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
data = self.pool['hr.holidays'].read_group(cr, uid,
[('employee_id', 'in', ids), ('state', 'not in', ['cancel', 'refuse']),
('date_from', '<=', today_end), ('date_to', '>=', today_start), ('type', '=', 'remove')],
['employee_id'], ['employee_id'], context=context)
result = dict.fromkeys(ids, False)
for d in data:
if d['employee_id_count'] >= 1:
result[d['employee_id'][0]] = True
return result
def _search_absent_employee(self, cr, uid, obj, name, args, context=None):
today_date = datetime.datetime.utcnow().date()
today_start = today_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT) # get the midnight of the current utc day
today_end = (today_date + relativedelta(hours=23, minutes=59, seconds=59)).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
holiday_ids = self.pool['hr.holidays'].search_read(cr, uid, [
('state', 'not in', ['cancel', 'refuse']),
('date_from', '<=', today_end),
('date_to', '>=', today_start),
('type', '=', 'remove')], ['employee_id'], context=context)
absent_employee_ids = [holiday['employee_id'][0] for holiday in holiday_ids if holiday['employee_id']]
return [('id', 'in', absent_employee_ids)]
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(
_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type", type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Number of Leaves'),
'show_leaves': fields.function(_show_approved_remaining_leave, type='boolean', string="Able to see Remaining Leaves"),
'is_absent_totay': fields.function(_absent_employee, fnct_search=_search_absent_employee, type="boolean", string="Absent Today", default=False)
}
| gpl-3.0 | -563,360,036,962,107,300 | 57.629129 | 568 | 0.602249 | false |
tbeadle/selenium | py/test/selenium/webdriver/common/utils.py | 41 | 1041 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def convert_cookie_to_json(cookie):
cookie_dict = {}
for key, value in cookie.items():
if key == "expires":
cookie_dict["expiry"] = int(value) * 1000
else:
cookie_dict[key] = value
return cookie_dict
| apache-2.0 | -6,418,838,208,798,828,000 | 39.038462 | 62 | 0.7195 | false |
raccoongang/edx-platform | lms/djangoapps/courseware/access_response.py | 2 | 4961 | """
This file contains all the classes used by has_access for error handling
"""
from django.utils.translation import ugettext as _
from xmodule.course_metadata_utils import DEFAULT_START_DATE
class AccessResponse(object):
"""Class that represents a response from a has_access permission check."""
def __init__(self, has_access, error_code=None, developer_message=None, user_message=None):
"""
Creates an AccessResponse object.
Arguments:
has_access (bool): if the user is granted access or not
error_code (String): optional - default is None. Unique identifier
for the specific type of error
developer_message (String): optional - default is None. Message
to show the developer
user_message (String): optional - default is None. Message to
show the user
"""
self.has_access = has_access
self.error_code = error_code
self.developer_message = developer_message
self.user_message = user_message
if has_access:
assert error_code is None
def __nonzero__(self):
"""
Overrides bool().
Allows for truth value testing of AccessResponse objects, so callers
who do not need the specific error information can check if access
is granted.
Returns:
bool: whether or not access is granted
"""
return self.has_access
def to_json(self):
"""
Creates a serializable JSON representation of an AccessResponse object.
Returns:
dict: JSON representation
"""
return {
"has_access": self.has_access,
"error_code": self.error_code,
"developer_message": self.developer_message,
"user_message": self.user_message
}
def __repr__(self):
return "AccessResponse({!r}, {!r}, {!r}, {!r})".format(
self.has_access,
self.error_code,
self.developer_message,
self.user_message
)
class AccessError(AccessResponse):
"""
Class that holds information about the error in the case of an access
denial in has_access. Contains the error code, user and developer
messages. Subclasses represent specific errors.
"""
def __init__(self, error_code, developer_message, user_message):
"""
Creates an AccessError object.
An AccessError object represents an AccessResponse where access is
denied (has_access is False).
Arguments:
error_code (String): unique identifier for the specific type of
error developer_message (String): message to show the developer
user_message (String): message to show the user
"""
super(AccessError, self).__init__(False, error_code, developer_message, user_message)
class StartDateError(AccessError):
"""
Access denied because the course has not started yet and the user
is not staff
"""
def __init__(self, start_date):
error_code = "course_not_started"
if start_date == DEFAULT_START_DATE:
developer_message = "Course has not started"
user_message = _("Course has not started")
else:
developer_message = "Course does not start until {}".format(start_date)
user_message = _("Course does not start until {}" # pylint: disable=translation-of-non-string
.format("{:%B %d, %Y}".format(start_date)))
super(StartDateError, self).__init__(error_code, developer_message, user_message)
class MilestoneError(AccessError):
"""
Access denied because the user has unfulfilled milestones
"""
def __init__(self):
error_code = "unfulfilled_milestones"
developer_message = "User has unfulfilled milestones"
user_message = _("You have unfulfilled milestones")
super(MilestoneError, self).__init__(error_code, developer_message, user_message)
class VisibilityError(AccessError):
"""
Access denied because the user does have the correct role to view this
course.
"""
def __init__(self):
error_code = "not_visible_to_user"
developer_message = "Course is not visible to this user"
user_message = _("You do not have access to this course")
super(VisibilityError, self).__init__(error_code, developer_message, user_message)
class MobileAvailabilityError(AccessError):
"""
Access denied because the course is not available on mobile for the user
"""
def __init__(self):
error_code = "mobile_unavailable"
developer_message = "Course is not available on mobile for this user"
user_message = _("You do not have access to this course on a mobile device")
super(MobileAvailabilityError, self).__init__(error_code, developer_message, user_message)
| agpl-3.0 | -3,311,439,934,669,775,000 | 34.690647 | 106 | 0.626487 | false |
aaronboyle/trivial | pygame/faeriewaccor.py | 1 | 7035 |
import pygame,sys,random
from pygame.locals import *
pygame.init()
pygame.font.init()
random.seed()
### Initialize constants
screenResolution = (640,480)
colorBlack = (0,0,0)
colorWhite = (255,255,255)
colorLtGrey = (190,190,190)
colorDkGrey = (80,80,80)
colorGreen = (0,255,0)
colorRed = (255,0,0)
colorBlue = (0,0,255)
colorMagenta = (255,0,255)
colorYellow = (255,255,0)
### initialize game globals
visibleObjects = []
faeries = []
windowLayers = []
### Grid properties - to be moved to class
# grid class will need parameter for location
gridSize = (480,480)
gridCount = 5 # grid is currently always square, this is both x and y size
gridMargin = 20
gridWidth = (((gridSize[0]-(2*gridMargin))/gridCount),((gridSize[1]-(2*gridMargin))/gridCount))
gameSpeed = 5 # number of frames until random item changes
gameCounter = 0
gameScore = 0
spawnCounter = 0
### Infobar properties - todo: move to class
# infobar class will need parameter for location
infobarLoc = (500,20)
infobarSize = (120,440)
infobarMargin = 15
infobarFont = pygame.font.SysFont("arial",20)
class GameSurface(pygame.Surface):
def __init__(self,loc,size):
self.location = loc
super(GameSurface,self).__init__(size)
def draw(self):
gameWindow.blit(self,self.location)
def CoordsToPos(coords):
x = (coords[0] * gridWidth[0]) - (gridWidth[0] / 2) + gridMargin
y = (coords[1] * gridWidth[1]) - (gridWidth[1] / 2) + gridMargin
return(x,y)
def PosToCoords(pos):
x = ((pos[0] - gridMargin) / gridWidth[0]) + 1
y = ((pos[1] - gridMargin) / gridWidth[1]) + 1
return(x,y)
def RandomCoord():
return ( random.randint(1,gridCount) , random.randint(1,gridCount) )
def Splat():
pass
class ball(pygame.sprite.Sprite):
def __init__(self,rgb,xy,vis=0):
pygame.sprite.Sprite.__init__(self)
self.coords = list(xy)
self.color = rgb
self.visible = vis
self.clock = 0
visibleObjects.append(self)
def place(self,coords):
self.coords = coords
def move(self,direction):
if direction == "right":
self.coords[0]+= 1
elif direction == "left":
self.coords[0]-= 1
elif direction == "up":
self.coords[1]-= 1
elif direction == "down":
self.coords[1]+= 1
def show(self):
self.visible = 1
def hide(self):
self.visible = 0
def toggle(self):
if self.visible == 0:
self.visible = 1
elif self.visible == 1:
self.visible = 0
def draw(self):
if self.visible:
pygame.draw.circle(gameWindow,self.color,CoordsToPos(self.coords),7)
def collide(self,loc):
if loc == self.coords:
return 1
else:
return 0
def tick(self):
self.clock += 1
if self.clock == 20:
faeries.remove(self)
visibleObjects.remove(self)
gameField = GameSurface((0,0),gridSize)
def DrawGrid():
gameField.fill(colorBlack)
for n in range(0,gridCount+1):
#Horizontal lines
lineStart = ( (gridMargin) , (gridMargin + (n * gridWidth[1])) )
lineEnd = ( (gridSize[0]-gridMargin) , (gridMargin + (n * gridWidth[1])) )
pygame.draw.line(gameField, colorDkGrey, lineStart, lineEnd, 2)
#Vertical lines
lineStart = ( (gridMargin + (n * gridWidth[0])) , (gridMargin) )
lineEnd = ( (gridMargin + (n * gridWidth[0])) , (gridSize[1]-gridMargin) )
pygame.draw.line(gameField, colorDkGrey, lineStart, lineEnd, 2)
gameField.draw()
infoBar = GameSurface(infobarLoc,infobarSize)
def DrawInfobar():
infoBar.fill(colorBlack)
pygame.draw.rect(infoBar, colorGreen, ( (0,0),(infobarSize[0],infobarSize[1]) ), 2)
heightOffset = (10)
for text in ["SCORE" , str(gameScore) , " " , "TIME" , str(gameCounter/30)]:
textRender = infobarFont.render(text,1,colorRed)
heightOffset += (textRender.get_height() + 5)
infoBar.blit( textRender , ( 25 , heightOffset ) )
infoBar.draw()
class PopMenu(GameSurface):
def __init__(self,loc,size,font,text):
self.size = size
self.loc = loc
self.font = font
self.text = text
super(PopMenu,self).__init__(loc,size)
def draw(self):
# and now I am box
self.fill(colorBlack)
# and now I has border
pygame.draw.rect( self, colorYellow, ((0,0), self.size) , 1 )
heightOffset = (15)
# and now I texting
for line in self.text:
textRender = self.font.render(line,1,colorRed)
heightOffset += (textRender.get_height() + 5)
# I textings a line
self.blit(textRender , ( 25 , heightOffset ) )
# and I tells the world
gameWindow.blit(self,self.location)
startLoc = (170,100)
startSize = (300,200)
startFont = pygame.font.SysFont("arial",16)
startText = ["FAERYWACCOR"," ","ready to WACC some FAERYS?"," ","press F to F them up, ESC to quit"]
startScreen = PopMenu(startLoc,startSize,startFont,startText)
mallet = ball(colorBlue,(0,0))
def CountdownToGame():
pass
def QuitGame():
pygame.quit()
sys.exit()
def PlayGame():
global gameCounter
global gameScore
global spawnCounter
while 1:
gameWindow.fill(colorBlack)
DrawGrid()
DrawInfobar()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
mousex, mousey = event.pos
eventCoords = PosToCoords(event.pos)
if (eventCoords[0] <= gridCount) & (eventCoords[1] <= gridCount):
mallet.place(eventCoords)
print "mallet" , " " , mallet.coords
for faery in faeries:
print "faery" , " " , faery.coords
if faery.collide( list(mallet.coords) ):
faeries.remove(faery)
visibleObjects.remove(faery)
Splat()
gameScore += 1
print gameScore
mallet.show()
elif event.type == MOUSEBUTTONUP:
mallet.hide()
elif event.type == KEYDOWN:
if event.key == K_LEFT:
PressLeft()
elif event.key == K_RIGHT:
PressRight()
elif event.key == K_UP:
PressUp()
elif event.key == K_DOWN:
PressDown()
elif event.key == K_r:
redBall.toggle()
elif event.key == K_g:
greenBall.toggle()
elif event.key == K_ESCAPE:
print "quitting!"
QuitGame()
for object in visibleObjects:
object.draw()
if spawnCounter == gameSpeed:
if random.randint(1,10) == 1:
faeries.append( ball(colorMagenta,RandomCoord(),1) )
for object in faeries:
object.tick()
spawnCounter = 0
pygame.display.flip()
gameCounter += 1
spawnCounter += 1
gameClock.tick(30)
### AND NOW IT STARTS
gameClock = pygame.time.Clock()
gameWindow = pygame.display.set_mode(screenResolution)
def main():
gameWindow.fill(colorBlack)
DrawGrid()
DrawInfobar()
startScreen.draw()
pygame.display.update()
while 1:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_f:
CountdownToGame()
PlayGame()
if event.key == K_ESCAPE:
QuitGame()
main()
| unlicense | 5,505,095,088,313,380,000 | 16.413366 | 100 | 0.623312 | false |
BrainTech/openbci | obci/utils/tagger_gui/tag_frame.py | 1 | 5016 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
from timer import Timer
from button import Button
from frame import Frame
from player import Player
import time
class TagFrame(Frame):
def __init__(self):
super(TagFrame, self).__init__()
def init_frame(self, status_bar, tag_signal, params):
self.status_bar = status_bar
self.tag_signal = tag_signal
self.tags = []
self.tag = params['tag_start_name']
self.duration = int(params['duration'])
self.tag_on_end = params['tag_on_end']
self.sound = params['sound']
self.tag_title = params['name']
self.repetitions_number = 0
self.is_first = 0
self.tag_signal = tag_signal
if params['sound']:
self.sound = 1
self.player = Player()
else:
self.sound=0
if self.duration:
self.timer = Timer(self.duration, self)
self.timer.set_position(2)
if self.tag_on_end != '':
self.timer.set_stop_action(self.action_stop)
self.stop_button = Button('stop', self)
self.stop_button.set_position(3)
self.stop_button.connect(self.action_stop)
self.stop_button.set_disable()
elif self.tag_on_end != '':
self.stop_button = Button('stop', self)
self.stop_button.set_position(3)
self.stop_button.connect(self.action_stop)
self.stop_button.set_disable()
self.start_button = Button('start', self)
self.start_button.set_position(1)
self.start_button.connect(self.action_start)
self.clear_button=Button('clear', self)
self.clear_button.set_position(4)
self.clear_button.connect(self.action_clear)
self.tag_title = QtGui.QLabel('{}:'.format(self.tag_title), self)
self.tag_title.move(10, 3)
self.set_off()
def _create_tag(self, tag):
self.tags.append({'timestamp':str(time.time()), 'name':str(tag)})
def _create_status_bar_message(self, type_, tag):
if type_ == 'create':
return "Tag: {} was create.".format(tag)
else:
names = [name for name in [t['name'] for t in tag]]
if type_ == 'delete':
if len(names) == 1:
return "Tag: {} was delete.".format(''.join(names))
else:
return "Tags: {} were delate.".format(', '.join(names))
elif type_ == 'send':
if len(names) == 1:
return "Tag: {} was send.".format(''.join(names))
else:
return "Tags: {} were send.".format(', '.join(names))
def action_start(self):
if self.sound:
self.player.play()
if (not self.repetitions_number) and (not self.is_first):
self.finish_action_elier_frame()
self.start_button.set_disable()
self._create_tag(self.tag)
self.status_bar.show_message(self._create_status_bar_message('create', self.tag))
if self.duration:
self.stop_button.set_enable()
self.timer.clock_start()
elif self.tag_on_end != '':
self.stop_button.set_enable()
else:
self.active_next_frame()
def action_stop(self):
if self.sound:
self.player.play()
if self.duration and self.timer.is_on:
self.timer.clock_stop()
if self.tag_on_end != '':
self._create_tag(self.tag_on_end)
self.status_bar.show_message(self._create_status_bar_message('create', self.tag_on_end))
self.stop_button.set_disable()
self.active_next_frame()
def _update_repetition(self):
self.repetitions_number+=1
def action_clear(self):
self.status_bar.show_message(self._create_status_bar_message('delete', self.tags))
self.tags = []
self.set_on()
self.deactivation_next_frame()
if self.duration:
self.timer.clock_reset()
self._update_repetition()
def set_on(self):
self.start_button.set_enable()
if self.duration:
self.timer.clock_reset()
self.stop_button.set_enable()
self.clear_button.set_enable()
self.set_is_on()
def set_off(self):
self.clear_button.set_disable()
if self.duration:
self.stop_button.set_disable()
self.start_button.set_disable()
self.set_is_off()
def _send_tags(self):
for tag in self.tags:
self.tag_signal.emit(str(tag))
self.tags = []
def finish_frame_action(self):
self.status_bar.show_message(self._create_status_bar_message('send', self.tags))
self._send_tags()
self.set_off()
self.next_frame.remove(self.next_frame[0])
if self.is_first:
self.is_first=0
print self, self.next_frame | gpl-3.0 | -4,335,205,495,966,599,700 | 32.225166 | 100 | 0.555024 | false |
okor/thumbor | tests/loaders/test_file_loader_http_fallback.py | 7 | 1935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from mock import patch
from os.path import abspath, join, dirname
from unittest import TestCase
from tests.base import TestCase as AsyncTestCase
from preggy import expect
import thumbor
from thumbor.context import Context
from thumbor.config import Config
from thumbor.loaders import LoaderResult
import thumbor.loaders.file_loader_http_fallback as loader
STORAGE_PATH = abspath(join(dirname(__file__), '../fixtures/images/'))
result = LoaderResult()
result.successful = True
def dummy_file_load(context, url, callback, normalize_url_func={}):
result.buffer = 'file'
callback(result)
def dummy_http_load(context, url, callback, normalize_url_func={}):
result.buffer = 'http'
callback(result)
class FileLoaderHttpFallbackFileTestCase(TestCase):
def setUp(self):
config = Config(
FILE_LOADER_ROOT_PATH=STORAGE_PATH
)
self.ctx = Context(config=config)
@patch.object(thumbor.loaders.file_loader, 'load', dummy_file_load)
def test_should_load_file(self):
result = loader.load(self.ctx, 'image.jpg', lambda x: x).result()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('file')
class FileLoaderHttpFallbackHttpTestCase(AsyncTestCase):
@patch.object(thumbor.loaders.http_loader, 'load', dummy_http_load)
def test_should_load_http(self):
url = self.get_url('http:/www.google.com/example_image.png')
config = Config()
ctx = Context(None, config, None)
loader.load(ctx, url, self.stop)
result = self.wait()
expect(result).to_be_instance_of(LoaderResult)
expect(result.buffer).to_equal('http')
| mit | -5,391,886,534,609,443,000 | 27.455882 | 73 | 0.702326 | false |
jungle90/Openstack-Swift-I-O-throttler | test/unit/obj/test_reconstructor.py | 8 | 105935 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import unittest
import os
from hashlib import md5
import mock
import cPickle as pickle
import tempfile
import time
import shutil
import re
import random
from eventlet import Timeout
from contextlib import closing, nested, contextmanager
from gzip import GzipFile
from shutil import rmtree
from swift.common import utils
from swift.common.exceptions import DiskFileError
from swift.obj import diskfile, reconstructor as object_reconstructor
from swift.common import ring
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
POLICIES, EC_POLICY)
from swift.obj.reconstructor import REVERT
from test.unit import (patch_policies, debug_logger, mocked_http_conn,
FabricatedRing, make_timestamp_iter)
@contextmanager
def mock_ssync_sender(ssync_calls=None, response_callback=None, **kwargs):
def fake_ssync(daemon, node, job, suffixes):
if ssync_calls is not None:
ssync_calls.append(
{'node': node, 'job': job, 'suffixes': suffixes})
def fake_call():
if response_callback:
response = response_callback(node, job, suffixes)
else:
response = True, {}
return response
return fake_call
with mock.patch('swift.obj.reconstructor.ssync_sender', fake_ssync):
yield fake_ssync
def make_ec_archive_bodies(policy, test_body):
segment_size = policy.ec_segment_size
# split up the body into buffers
chunks = [test_body[x:x + segment_size]
for x in range(0, len(test_body), segment_size)]
# encode the buffers into fragment payloads
fragment_payloads = []
for chunk in chunks:
fragments = policy.pyeclib_driver.encode(chunk)
if not fragments:
break
fragment_payloads.append(fragments)
# join up the fragment payloads per node
ec_archive_bodies = [''.join(fragments)
for fragments in zip(*fragment_payloads)]
return ec_archive_bodies
def _ips():
return ['127.0.0.1']
object_reconstructor.whataremyips = _ips
def _create_test_rings(path):
testgz = os.path.join(path, 'object.ring.gz')
intended_replica2part2dev_id = [
[0, 1, 2],
[1, 2, 3],
[2, 3, 0]
]
intended_devs = [
{'id': 0, 'device': 'sda1', 'zone': 0, 'ip': '127.0.0.0',
'port': 6000},
{'id': 1, 'device': 'sda1', 'zone': 1, 'ip': '127.0.0.1',
'port': 6000},
{'id': 2, 'device': 'sda1', 'zone': 2, 'ip': '127.0.0.2',
'port': 6000},
{'id': 3, 'device': 'sda1', 'zone': 4, 'ip': '127.0.0.3',
'port': 6000}
]
intended_part_shift = 30
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift),
f)
testgz = os.path.join(path, 'object-1.ring.gz')
with closing(GzipFile(testgz, 'wb')) as f:
pickle.dump(
ring.RingData(intended_replica2part2dev_id,
intended_devs, intended_part_shift),
f)
def count_stats(logger, key, metric):
count = 0
for record in logger.log_dict[key]:
log_args, log_kwargs = record
m = log_args[0]
if re.match(metric, m):
count += 1
return count
@patch_policies([StoragePolicy(0, name='zero', is_default=True),
ECStoragePolicy(1, name='one', ec_type='jerasure_rs_vand',
ec_ndata=2, ec_nparity=1)])
class TestGlobalSetupObjectReconstructor(unittest.TestCase):
def setUp(self):
self.testdir = tempfile.mkdtemp()
_create_test_rings(self.testdir)
POLICIES[0].object_ring = ring.Ring(self.testdir, ring_name='object')
POLICIES[1].object_ring = ring.Ring(self.testdir, ring_name='object-1')
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = ''
self.devices = os.path.join(self.testdir, 'node')
os.makedirs(self.devices)
os.mkdir(os.path.join(self.devices, 'sda1'))
self.objects = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[0]))
self.objects_1 = os.path.join(self.devices, 'sda1',
diskfile.get_data_dir(POLICIES[1]))
os.mkdir(self.objects)
os.mkdir(self.objects_1)
self.parts = {}
self.parts_1 = {}
self.part_nums = ['0', '1', '2']
for part in self.part_nums:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(self.parts[part])
self.parts_1[part] = os.path.join(self.objects_1, part)
os.mkdir(self.parts_1[part])
self.conf = dict(
swift_dir=self.testdir, devices=self.devices, mount_check='false',
timeout='300', stats_interval='1')
self.logger = debug_logger('test-reconstructor')
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.policy = POLICIES[1]
# most of the reconstructor test methods require that there be
# real objects in place, not just part dirs, so we'll create them
# all here....
# part 0: 3C1/hash/xxx-1.data <-- job: sync_only - parnters (FI 1)
# /xxx.durable <-- included in earlier job (FI 1)
# 061/hash/xxx-1.data <-- included in earlier job (FI 1)
# /xxx.durable <-- included in earlier job (FI 1)
# /xxx-2.data <-- job: sync_revert to index 2
# part 1: 3C1/hash/xxx-0.data <-- job: sync_only - parnters (FI 0)
# /xxx-1.data <-- job: sync_revert to index 1
# /xxx.durable <-- included in earlier jobs (FI 0, 1)
# 061/hash/xxx-1.data <-- included in earlier job (FI 1)
# /xxx.durable <-- included in earlier job (FI 1)
# part 2: 3C1/hash/xxx-2.data <-- job: sync_revert to index 2
# /xxx.durable <-- included in earlier job (FI 2)
# 061/hash/xxx-0.data <-- job: sync_revert to index 0
# /xxx.durable <-- included in earlier job (FI 0)
def _create_frag_archives(policy, obj_path, local_id, obj_set):
# we'll create 2 sets of objects in different suffix dirs
# so we cover all the scenarios we want (3 of them)
# 1) part dir with all FI's matching the local node index
# 2) part dir with one local and mix of others
# 3) part dir with no local FI and one or more others
def part_0(set):
if set == 0:
# just the local
return local_id
else:
# onde local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 1) % 3
def part_1(set):
if set == 0:
# one local and all of another
if obj_num == 0:
return local_id
else:
return (local_id + 2) % 3
else:
# just the local node
return local_id
def part_2(set):
# this part is a handoff in our config (always)
# so lets do a set with indicies from different nodes
if set == 0:
return (local_id + 1) % 3
else:
return (local_id + 2) % 3
# function dictionary for defining test scenarios base on set #
scenarios = {'0': part_0,
'1': part_1,
'2': part_2}
def _create_df(obj_num, part_num):
self._create_diskfile(
part=part_num, object_name='o' + str(obj_set),
policy=policy, frag_index=scenarios[part_num](obj_set),
timestamp=utils.Timestamp(t))
for part_num in self.part_nums:
# create 3 unique objcets per part, each part
# will then have a unique mix of FIs for the
# possible scenarios
for obj_num in range(0, 3):
_create_df(obj_num, part_num)
ips = utils.whataremyips()
for policy in [p for p in POLICIES if p.policy_type == EC_POLICY]:
self.ec_policy = policy
self.ec_obj_ring = self.reconstructor.load_object_ring(
self.ec_policy)
data_dir = diskfile.get_data_dir(self.ec_policy)
for local_dev in [dev for dev in self.ec_obj_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] ==
self.reconstructor.port]:
self.ec_local_dev = local_dev
dev_path = os.path.join(self.reconstructor.devices_dir,
self.ec_local_dev['device'])
self.ec_obj_path = os.path.join(dev_path, data_dir)
# create a bunch of FA's to test
t = 1421181937.70054 # time.time()
with mock.patch('swift.obj.diskfile.time') as mock_time:
# since (a) we are using a fixed time here to create
# frags which corresponds to all the hardcoded hashes and
# (b) the EC diskfile will delete its .data file right
# after creating if it has expired, use this horrible hack
# to prevent the reclaim happening
mock_time.time.return_value = 0.0
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 0)
_create_frag_archives(self.ec_policy, self.ec_obj_path,
self.ec_local_dev['id'], 1)
break
break
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def _create_diskfile(self, policy=None, part=0, object_name='o',
frag_index=0, timestamp=None, test_data=None):
policy = policy or self.policy
df_mgr = self.reconstructor._df_router[policy]
df = df_mgr.get_diskfile('sda1', part, 'a', 'c', object_name,
policy=policy)
with df.create() as writer:
timestamp = timestamp or utils.Timestamp(time.time())
test_data = test_data or 'test data'
writer.write(test_data)
metadata = {
'X-Timestamp': timestamp.internal,
'Content-Length': len(test_data),
'Etag': md5(test_data).hexdigest(),
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
writer.put(metadata)
writer.commit(timestamp)
return df
def assert_expected_jobs(self, part_num, jobs):
for job in jobs:
del job['path']
del job['policy']
if 'local_index' in job:
del job['local_index']
job['suffixes'].sort()
expected = []
# part num 0
expected.append(
[{
'sync_to': [{
'index': 2,
'replication_port': 6000,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 0,
'frag_index': 2,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1', 'port': 6000,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 0,
'replication_port': 6000,
'zone': 0,
'ip': '127.0.0.0',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.0',
'device': 'sda1', 'id': 0,
}, {
'index': 2,
'replication_port': 6000,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': ['061', '3c1'],
'partition': 0,
'frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6000,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229',
1: '0e6e8d48d801dc89fd31904ae3b31229'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 1
expected.append(
[{
'sync_to': [{
'index': 1,
'replication_port': 6000,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061', '3c1'],
'partition': 1,
'frag_index': 1,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6000,
},
'hashes':
{
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}, {
'sync_to': [{
'index': 2,
'replication_port': 6000,
'zone': 4,
'ip': '127.0.0.3',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.3',
'device': 'sda1', 'id': 3,
}, {
'index': 1,
'replication_port': 6000,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.2',
'device': 'sda1',
'id': 2,
}],
'job_type': object_reconstructor.SYNC,
'sync_diskfile_builder': self.reconstructor.reconstruct_fa,
'suffixes': ['3c1'],
'partition': 1,
'frag_index': 0,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6000,
},
'hashes': {
'061': {
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
'3c1': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5',
1: '0e6e8d48d801dc89fd31904ae3b31229',
},
},
}]
)
# part num 2
expected.append(
[{
'sync_to': [{
'index': 0,
'replication_port': 6000,
'zone': 2,
'ip': '127.0.0.2',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.2',
'device': 'sda1', 'id': 2,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['061'],
'partition': 2,
'frag_index': 0,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6000,
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}, {
'sync_to': [{
'index': 2,
'replication_port': 6000,
'zone': 0,
'ip': '127.0.0.0',
'region': 1,
'port': 6000,
'replication_ip': '127.0.0.0',
'device': 'sda1',
'id': 0,
}],
'job_type': object_reconstructor.REVERT,
'suffixes': ['3c1'],
'partition': 2,
'frag_index': 2,
'device': 'sda1',
'local_dev': {
'replication_port': 6000,
'zone': 1,
'ip': '127.0.0.1',
'region': 1,
'id': 1,
'replication_ip': '127.0.0.1',
'device': 'sda1',
'port': 6000
},
'hashes': {
'061': {
0: '0e6e8d48d801dc89fd31904ae3b31229',
None: '85b02a5283704292a511078a5c483da5'
},
'3c1': {
None: '85b02a5283704292a511078a5c483da5',
2: '0e6e8d48d801dc89fd31904ae3b31229'
},
},
}]
)
def check_jobs(part_num):
try:
expected_jobs = expected[int(part_num)]
except (IndexError, ValueError):
self.fail('Unknown part number %r' % part_num)
expected_by_part_frag_index = dict(
((j['partition'], j['frag_index']), j) for j in expected_jobs)
for job in jobs:
job_key = (job['partition'], job['frag_index'])
if job_key in expected_by_part_frag_index:
for k, value in job.items():
expected_value = \
expected_by_part_frag_index[job_key][k]
try:
if isinstance(value, list):
value.sort()
expected_value.sort()
self.assertEqual(value, expected_value)
except AssertionError as e:
extra_info = \
'\n\n... for %r in part num %s job %r' % (
k, part_num, job_key)
raise AssertionError(str(e) + extra_info)
else:
self.fail(
'Unexpected job %r for part num %s - '
'expected jobs where %r' % (
job_key, part_num,
expected_by_part_frag_index.keys()))
for expected_job in expected_jobs:
if expected_job in jobs:
jobs.remove(expected_job)
self.assertFalse(jobs) # that should be all of them
check_jobs(part_num)
def test_run_once(self):
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
with mock_ssync_sender():
self.reconstructor.run_once()
def test_get_response(self):
part = self.part_nums[0]
node = POLICIES[0].object_ring.get_part_nodes(int(part))[0]
for stat_code in (200, 400):
with mocked_http_conn(stat_code):
resp = self.reconstructor._get_response(node, part,
path='nada',
headers={},
policy=POLICIES[0])
if resp:
self.assertEqual(resp.status, 200)
else:
self.assertEqual(
len(self.reconstructor.logger.log_dict['warning']), 1)
def test_reconstructor_skips_bogus_partition_dirs(self):
# A directory in the wrong place shouldn't crash the reconstructor
rmtree(self.objects_1)
os.mkdir(self.objects_1)
os.mkdir(os.path.join(self.objects_1, "burrito"))
jobs = []
for part_info in self.reconstructor.collect_parts():
jobs += self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(len(jobs), 0)
def test_check_ring(self):
testring = tempfile.mkdtemp()
_create_test_rings(testring)
obj_ring = ring.Ring(testring, ring_name='object') # noqa
self.assertTrue(self.reconstructor.check_ring(obj_ring))
orig_check = self.reconstructor.next_check
self.reconstructor.next_check = orig_check - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check
orig_ring_time = obj_ring._mtime
obj_ring._mtime = orig_ring_time - 30
self.assertTrue(self.reconstructor.check_ring(obj_ring))
self.reconstructor.next_check = orig_check - 30
self.assertFalse(self.reconstructor.check_ring(obj_ring))
rmtree(testring, ignore_errors=1)
def test_build_reconstruction_jobs(self):
self.reconstructor.handoffs_first = False
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertTrue(jobs[0]['job_type'] in
(object_reconstructor.SYNC,
object_reconstructor.REVERT))
self.assert_expected_jobs(part_info['partition'], jobs)
self.reconstructor.handoffs_first = True
self.reconstructor._reset_stats()
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertTrue(jobs[0]['job_type'] ==
object_reconstructor.REVERT)
self.assert_expected_jobs(part_info['partition'], jobs)
def test_get_partners(self):
# we're going to perform an exhaustive test of every possible
# combination of partitions and nodes in our custom test ring
# format: [dev_id in question, 'part_num',
# [part_nodes for the given part], left id, right id...]
expected_partners = sorted([
(0, '0', [0, 1, 2], 2, 1), (0, '2', [2, 3, 0], 3, 2),
(1, '0', [0, 1, 2], 0, 2), (1, '1', [1, 2, 3], 3, 2),
(2, '0', [0, 1, 2], 1, 0), (2, '1', [1, 2, 3], 1, 3),
(2, '2', [2, 3, 0], 0, 3), (3, '1', [1, 2, 3], 2, 1),
(3, '2', [2, 3, 0], 2, 0), (0, '0', [0, 1, 2], 2, 1),
(0, '2', [2, 3, 0], 3, 2), (1, '0', [0, 1, 2], 0, 2),
(1, '1', [1, 2, 3], 3, 2), (2, '0', [0, 1, 2], 1, 0),
(2, '1', [1, 2, 3], 1, 3), (2, '2', [2, 3, 0], 0, 3),
(3, '1', [1, 2, 3], 2, 1), (3, '2', [2, 3, 0], 2, 0),
])
got_partners = []
for pol in POLICIES:
obj_ring = pol.object_ring
for part_num in self.part_nums:
part_nodes = obj_ring.get_part_nodes(int(part_num))
primary_ids = [n['id'] for n in part_nodes]
for node in part_nodes:
partners = object_reconstructor._get_partners(
node['index'], part_nodes)
left = partners[0]['id']
right = partners[1]['id']
got_partners.append((
node['id'], part_num, primary_ids, left, right))
self.assertEqual(expected_partners, sorted(got_partners))
def test_collect_parts(self):
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
self.assertEqual(sorted(parts), [0, 1, 2])
def test_collect_parts_mkdirs_error(self):
def blowup_mkdirs(path):
raise OSError('Ow!')
with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
rmtree(self.objects_1, ignore_errors=1)
parts = []
for part_info in self.reconstructor.collect_parts():
parts.append(part_info['partition'])
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
log_args, log_kwargs = self.logger.log_dict['error'][0]
self.assertEquals(str(log_kwargs['exc_info'][1]), 'Ow!')
def test_removes_zbf(self):
# After running xfs_repair, a partition directory could become a
# zero-byte file. If this happens, the reconstructor should clean it
# up, log something, and move on to the next partition.
# Surprise! Partition dir 1 is actually a zero-byte file.
pol_1_part_1_path = os.path.join(self.objects_1, '1')
rmtree(pol_1_part_1_path)
with open(pol_1_part_1_path, 'w'):
pass
self.assertTrue(os.path.isfile(pol_1_part_1_path)) # sanity check
# since our collect_parts job is a generator, that yields directly
# into build_jobs and then spawns it's safe to do the remove_files
# without making reconstructor startup slow
for part_info in self.reconstructor.collect_parts():
self.assertNotEqual(pol_1_part_1_path, part_info['part_path'])
self.assertFalse(os.path.exists(pol_1_part_1_path))
warnings = self.reconstructor.logger.get_lines_for_level('warning')
self.assertEqual(1, len(warnings))
self.assertTrue('Unexpected entity in data dir:' in warnings[0],
'Warning not found in %s' % warnings)
def _make_fake_ssync(self, ssync_calls):
class _fake_ssync(object):
def __init__(self, daemon, node, job, suffixes, **kwargs):
# capture context and generate an available_map of objs
context = {}
context['node'] = node
context['job'] = job
context['suffixes'] = suffixes
self.suffixes = suffixes
self.daemon = daemon
self.job = job
hash_gen = self.daemon._diskfile_mgr.yield_hashes(
self.job['device'], self.job['partition'],
self.job['policy'], self.suffixes,
frag_index=self.job.get('frag_index'))
self.available_map = {}
for path, hash_, ts in hash_gen:
self.available_map[hash_] = ts
context['available_map'] = self.available_map
ssync_calls.append(context)
def __call__(self, *args, **kwargs):
return True, self.available_map
return _fake_ssync
def test_delete_reverted(self):
# verify reconstructor deletes reverted frag indexes after ssync'ing
def visit_obj_dirs(context):
for suff in context['suffixes']:
suff_dir = os.path.join(
context['job']['path'], suff)
for root, dirs, files in os.walk(suff_dir):
for d in dirs:
dirpath = os.path.join(root, d)
files = os.listdir(dirpath)
yield dirpath, files
n_files = n_files_after = 0
# run reconstructor with delete function mocked out to check calls
ssync_calls = []
delete_func =\
'swift.obj.reconstructor.ObjectReconstructor.delete_reverted_objs'
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
with mock.patch(delete_func) as mock_delete:
self.reconstructor.reconstruct()
expected_calls = []
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
for dirpath, files in visit_obj_dirs(context):
# sanity check - expect some files to be in dir,
# may not be for the reverted frag index
self.assertTrue(files)
n_files += len(files)
expected_calls.append(mock.call(context['job'],
context['available_map'],
context['node']['index']))
mock_delete.assert_has_calls(expected_calls, any_order=True)
ssync_calls = []
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
self.reconstructor.reconstruct()
for context in ssync_calls:
if context['job']['job_type'] == REVERT:
data_file_tail = ('#%s.data'
% context['node']['index'])
for dirpath, files in visit_obj_dirs(context):
n_files_after += len(files)
for filename in files:
self.assertFalse(
filename.endswith(data_file_tail))
# sanity check that some files should were deleted
self.assertTrue(n_files > n_files_after)
def test_get_part_jobs(self):
# yeah, this test code expects a specific setup
self.assertEqual(len(self.part_nums), 3)
# OK, at this point we should have 4 loaded parts with one
jobs = []
for partition in os.listdir(self.ec_obj_path):
part_path = os.path.join(self.ec_obj_path, partition)
jobs = self.reconstructor._get_part_jobs(
self.ec_local_dev, part_path, int(partition), self.ec_policy)
self.assert_expected_jobs(partition, jobs)
def assertStatCount(self, stat_method, stat_prefix, expected_count):
count = count_stats(self.logger, stat_method, stat_prefix)
msg = 'expected %s != %s for %s %s' % (
expected_count, count, stat_method, stat_prefix)
self.assertEqual(expected_count, count, msg)
def test_delete_partition(self):
# part 2 is predefined to have all revert jobs
part_path = os.path.join(self.objects_1, '2')
self.assertTrue(os.access(part_path, os.F_OK))
ssync_calls = []
status = [200] * 2
body = pickle.dumps({})
with mocked_http_conn(*status, body=body) as request_log:
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
expected_repliate_calls = set([
('127.0.0.0', '/sda1/2/3c1'),
('127.0.0.2', '/sda1/2/061'),
])
found_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_repliate_calls, found_calls)
expected_ssync_calls = sorted([
('127.0.0.0', REVERT, 2, ['3c1']),
('127.0.0.2', REVERT, 2, ['061']),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['job_type'],
c['job']['partition'],
c['suffixes'],
) for c in ssync_calls))
expected_stats = {
('increment', 'partition.delete.count.'): 2,
('timing_since', 'partition.delete.timing'): 2,
}
for stat_key, expected in expected_stats.items():
stat_method, stat_prefix = stat_key
self.assertStatCount(stat_method, stat_prefix, expected)
# part 2 should be totally empty
policy = POLICIES[1]
hash_gen = self.reconstructor._df_router[policy].yield_hashes(
'sda1', '2', policy)
for path, hash_, ts in hash_gen:
self.fail('found %s with %s in %s', (hash_, ts, path))
# but the partition directory and hashes pkl still exist
self.assertTrue(os.access(part_path, os.F_OK))
hashes_path = os.path.join(self.objects_1, '2', diskfile.HASH_FILE)
self.assertTrue(os.access(hashes_path, os.F_OK))
# ... but on next pass
ssync_calls = []
with mocked_http_conn() as request_log:
with mock.patch('swift.obj.reconstructor.ssync_sender',
self._make_fake_ssync(ssync_calls)):
self.reconstructor.reconstruct(override_partitions=[2])
# reconstruct won't generate any replicate or ssync_calls
self.assertFalse(request_log.requests)
self.assertFalse(ssync_calls)
# and the partition will get removed!
self.assertFalse(os.access(part_path, os.F_OK))
def test_process_job_all_success(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[200] * 12, body=pickle.dumps({})):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
node_count = len(job['sync_to'])
self.reconstructor.process_job(job)
if job['job_type'] == object_reconstructor.REVERT:
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
else:
self.assertStatCount('update_stats',
'suffix.hashes',
node_count)
self.assertEqual(node_count, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(node_count, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertFalse('error' in
self.logger.all_log_lines())
self.assertEqual(self.reconstructor.suffix_sync, 8)
self.assertEqual(self.reconstructor.suffix_count, 8)
self.assertEqual(len(found_jobs), 6)
def test_process_job_all_insufficient_storage(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[507] * 10):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertTrue('responded as unmounted' in line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(len(found_jobs), 6)
def test_process_job_all_client_error(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with mocked_http_conn(*[400] * 10):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertTrue('Invalid response 400' in line)
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.hashes'))
self.assertEqual(0, count_stats(
self.logger, 'update_stats', 'suffix.syncs'))
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(len(found_jobs), 6)
def test_process_job_all_timeout(self):
self.reconstructor._reset_stats()
with mock_ssync_sender():
with nested(mocked_http_conn(*[Timeout()] * 10)):
found_jobs = []
for part_info in self.reconstructor.collect_parts():
jobs = self.reconstructor.build_reconstruction_jobs(
part_info)
found_jobs.extend(jobs)
for job in jobs:
self.logger._clear()
self.reconstructor.process_job(job)
for line in self.logger.get_lines_for_level('error'):
self.assertTrue('Timeout (Nones)' in line)
self.assertStatCount(
'update_stats', 'suffix.hashes', 0)
self.assertStatCount(
'update_stats', 'suffix.syncs', 0)
self.assertEqual(self.reconstructor.suffix_sync, 0)
self.assertEqual(self.reconstructor.suffix_count, 0)
self.assertEqual(len(found_jobs), 6)
@patch_policies(with_ec_default=True)
class TestObjectReconstructor(unittest.TestCase):
def setUp(self):
self.policy = POLICIES.default
self.testdir = tempfile.mkdtemp()
self.devices = os.path.join(self.testdir, 'devices')
self.local_dev = self.policy.object_ring.devs[0]
self.ip = self.local_dev['replication_ip']
self.port = self.local_dev['replication_port']
self.conf = {
'devices': self.devices,
'mount_check': False,
'bind_port': self.port,
}
self.logger = debug_logger('object-reconstructor')
self.reconstructor = object_reconstructor.ObjectReconstructor(
self.conf, logger=self.logger)
self.reconstructor._reset_stats()
# some tests bypass build_reconstruction_jobs and go to process_job
# directly, so you end up with a /0 when you try to show the
# percentage of complete jobs as ratio of the total job count
self.reconstructor.job_count = 1
self.policy.object_ring.max_more_nodes = \
self.policy.object_ring.replicas
self.ts_iter = make_timestamp_iter()
def tearDown(self):
self.reconstructor.stats_line()
shutil.rmtree(self.testdir)
def ts(self):
return next(self.ts_iter)
def test_collect_parts_skips_non_ec_policy_and_device(self):
stub_parts = (371, 78, 419, 834)
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for part in stub_parts:
utils.mkdirs(os.path.join(
self.devices, self.local_dev['device'],
datadir, str(part)))
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
self.assertEqual(found_parts, sorted(stub_parts))
for part_info in part_infos:
self.assertEqual(part_info['local_dev'], self.local_dev)
self.assertEqual(part_info['policy'], self.policy)
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_multi_device_skips_non_ring_devices(self):
device_parts = {
'sda': (374,),
'sdb': (179, 807),
'sdc': (363, 468, 843),
}
for policy in POLICIES:
datadir = diskfile.get_data_dir(policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for dev in local_devs]
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs)):
part_infos = list(self.reconstructor.collect_parts())
found_parts = sorted(int(p['partition']) for p in part_infos)
expected_parts = sorted(itertools.chain(
*(device_parts[d] for d in local_devs)))
self.assertEqual(found_parts, expected_parts)
for part_info in part_infos:
self.assertEqual(part_info['policy'], self.policy)
self.assertTrue(part_info['local_dev'] in stub_ring_devs)
dev = part_info['local_dev']
self.assertEqual(part_info['part_path'],
os.path.join(self.devices,
dev['device'],
diskfile.get_data_dir(self.policy),
str(part_info['partition'])))
def test_collect_parts_mount_check(self):
# each device has one part in it
local_devs = ('sda', 'sdb')
for i, dev in enumerate(local_devs):
datadir = diskfile.get_data_dir(self.policy)
utils.mkdirs(os.path.join(
self.devices, dev, datadir, str(i)))
stub_ring_devs = [{
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for dev in local_devs]
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
paths = []
def fake_ismount(path):
paths.append(path)
return False
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
mock.patch('swift.obj.reconstructor.ismount',
fake_ismount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(2, len(part_infos)) # sanity, same jobs
self.assertEqual(set(int(p['partition']) for p in part_infos),
set([0, 1]))
# ... because ismount was not called
self.assertEqual(paths, [])
# ... now with mount check
self.reconstructor.mount_check = True
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
mock.patch('swift.obj.reconstructor.ismount',
fake_ismount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual([], part_infos) # sanity, no jobs
# ... because fake_ismount returned False for both paths
self.assertEqual(set(paths), set([
os.path.join(self.devices, dev) for dev in local_devs]))
def fake_ismount(path):
if path.endswith('sda'):
return True
else:
return False
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
mock.patch('swift.obj.reconstructor.ismount',
fake_ismount)):
part_infos = list(self.reconstructor.collect_parts())
self.assertEqual(1, len(part_infos)) # only sda picked up (part 0)
self.assertEqual(part_infos[0]['partition'], 0)
def test_collect_parts_cleans_tmp(self):
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for dev in local_devs]
fake_unlink = mock.MagicMock()
self.reconstructor.reclaim_age = 1000
now = time.time()
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch('swift.obj.reconstructor.time.time',
return_value=now),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs),
mock.patch('swift.obj.reconstructor.unlink_older_than',
fake_unlink)):
self.assertEqual([], list(self.reconstructor.collect_parts()))
# each local device hash unlink_older_than called on it,
# with now - self.reclaim_age
tmpdir = diskfile.get_tmp_dir(self.policy)
expected = now - 1000
self.assertEqual(fake_unlink.mock_calls, [
mock.call(os.path.join(self.devices, dev, tmpdir), expected)
for dev in local_devs])
def test_collect_parts_creates_datadir(self):
# create just the device path
dev_path = os.path.join(self.devices, self.local_dev['device'])
utils.mkdirs(dev_path)
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
datadir_path = os.path.join(dev_path,
diskfile.get_data_dir(self.policy))
self.assertTrue(os.path.exists(datadir_path))
def test_collect_parts_creates_datadir_error(self):
# create just the device path
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch('swift.obj.reconstructor.mkdirs',
side_effect=OSError('kaboom!'))):
self.assertEqual([], list(self.reconstructor.collect_parts()))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
line = error_lines[0]
self.assertTrue('Unable to create' in line)
self.assertTrue(datadir_path in line)
def test_collect_parts_skips_invalid_paths(self):
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
utils.mkdirs(os.path.dirname(datadir_path))
with open(datadir_path, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
self.assertEqual([], list(self.reconstructor.collect_parts()))
self.assertTrue(os.path.exists(datadir_path))
error_lines = self.logger.get_lines_for_level('error')
self.assertEqual(len(error_lines), 1)
line = error_lines[0]
self.assertTrue('Unable to list partitions' in line)
self.assertTrue(datadir_path in line)
def test_collect_parts_removes_non_partition_files(self):
# create some junk next to partitions
datadir_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy))
num_parts = 3
for part in range(num_parts):
utils.mkdirs(os.path.join(datadir_path, str(part)))
junk_file = os.path.join(datadir_path, 'junk')
with open(junk_file, 'w') as f:
f.write('junk')
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]):
part_infos = list(self.reconstructor.collect_parts())
# the file is not included in the part_infos map
self.assertEqual(sorted(p['part_path'] for p in part_infos),
sorted([os.path.join(datadir_path, str(i))
for i in range(num_parts)]))
# and gets cleaned up
self.assertFalse(os.path.exists(junk_file))
def test_collect_parts_overrides(self):
# setup multiple devices, with multiple parts
device_parts = {
'sda': (374, 843),
'sdb': (179, 807),
'sdc': (363, 468, 843),
}
datadir = diskfile.get_data_dir(self.policy)
for dev, parts in device_parts.items():
for part in parts:
utils.mkdirs(os.path.join(
self.devices, dev,
datadir, str(part)))
# we're only going to add sda and sdc into the ring
local_devs = ('sda', 'sdc')
stub_ring_devs = [{
'device': dev,
'replication_ip': self.ip,
'replication_port': self.port
} for dev in local_devs]
expected = (
({}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda', 'sdc']}, [
('sda', 374),
('sda', 843),
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sdc']}, [
('sdc', 363),
('sdc', 468),
('sdc', 843),
]),
({'override_devices': ['sda']}, [
('sda', 374),
('sda', 843),
]),
({'override_devices': ['sdx']}, []),
({'override_partitions': [374]}, [
('sda', 374),
]),
({'override_partitions': [843]}, [
('sda', 843),
('sdc', 843),
]),
({'override_partitions': [843], 'override_devices': ['sda']}, [
('sda', 843),
]),
)
with nested(mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]),
mock.patch.object(self.policy.object_ring, '_devs',
new=stub_ring_devs)):
for kwargs, expected_parts in expected:
part_infos = list(self.reconstructor.collect_parts(**kwargs))
expected_paths = set(
os.path.join(self.devices, dev, datadir, str(part))
for dev, part in expected_parts)
found_paths = set(p['part_path'] for p in part_infos)
msg = 'expected %r != %r for %r' % (
expected_paths, found_paths, kwargs)
self.assertEqual(expected_paths, found_paths, msg)
def test_build_jobs_creates_empty_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
utils.mkdirs(part_path)
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
hashes_file = os.path.join(part_path,
diskfile.HASH_FILE)
self.assertTrue(os.path.exists(hashes_file))
suffixes = self.reconstructor._get_hashes(
self.policy, part_path, do_listdir=True)
self.assertEqual(suffixes, {})
def test_build_jobs_no_hashes(self):
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy), '0')
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': 0,
'part_path': part_path,
}
stub_hashes = {}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], 0)
self.assertEqual(job['suffixes'], [])
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(job['partition'], 0)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], {})
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_primary(self):
ring = self.policy.object_ring = FabricatedRing()
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
frag_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.SYNC)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(job['suffixes'], stub_hashes.keys())
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas]))
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['hashes'], stub_hashes)
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
def test_build_jobs_handoff(self):
ring = self.policy.object_ring = FabricatedRing()
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# since this part doesn't belong on us it doesn't matter what
# frag_index we have
frag_index = random.randint(0, ring.replicas - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(1, len(jobs))
job = jobs[0]
self.assertEqual(job['job_type'], object_reconstructor.REVERT)
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(stub_hashes.keys()))
self.assertEqual(len(job['sync_to']), 1)
self.assertEqual(job['sync_to'][0]['index'], frag_index)
self.assertEqual(job['path'], part_path)
self.assertEqual(job['partition'], partition)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['local_dev'], self.local_dev)
def test_build_jobs_mixed(self):
ring = self.policy.object_ring = FabricatedRing()
# find a partition for which we're a primary
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
try:
frag_index = [n['id'] for n in part_nodes].index(
self.local_dev['id'])
except ValueError:
pass
else:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
other_frag_index = random.choice([f for f in range(ring.replicas)
if f != frag_index])
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'456': {other_frag_index: 'hash', None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(2, len(jobs))
sync_jobs, revert_jobs = [], []
for job in jobs:
self.assertEqual(job['partition'], partition)
self.assertEqual(job['path'], part_path)
self.assertEqual(sorted(job['hashes']), sorted(stub_hashes))
self.assertEqual(job['policy'], self.policy)
self.assertEqual(job['local_dev'], self.local_dev)
self.assertEqual(job['device'], self.local_dev['device'])
{
object_reconstructor.SYNC: sync_jobs,
object_reconstructor.REVERT: revert_jobs,
}[job['job_type']].append(job)
self.assertEqual(1, len(sync_jobs))
job = sync_jobs[0]
self.assertEqual(job['frag_index'], frag_index)
self.assertEqual(sorted(job['suffixes']), sorted(['123', 'abc']))
self.assertEqual(len(job['sync_to']), 2)
self.assertEqual(set([n['index'] for n in job['sync_to']]),
set([(frag_index + 1) % ring.replicas,
(frag_index - 1) % ring.replicas]))
self.assertEqual(1, len(revert_jobs))
job = revert_jobs[0]
self.assertEqual(job['frag_index'], other_frag_index)
self.assertEqual(job['suffixes'], ['456'])
self.assertEqual(len(job['sync_to']), 1)
self.assertEqual(job['sync_to'][0]['index'], other_frag_index)
def test_build_jobs_revert_only_tombstones(self):
ring = self.policy.object_ring = FabricatedRing()
# find a partition for which we're a handoff
for partition in range(2 ** ring.part_power):
part_nodes = ring.get_part_nodes(partition)
if self.local_dev['id'] not in [n['id'] for n in part_nodes]:
break
else:
self.fail("the ring doesn't work: %r" % ring._replica2part2dev_id)
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
part_info = {
'local_dev': self.local_dev,
'policy': self.policy,
'partition': partition,
'part_path': part_path,
}
# we have no fragment index to hint the jobs where they belong
stub_hashes = {
'123': {None: 'hash'},
'abc': {None: 'hash'},
}
with mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes)):
jobs = self.reconstructor.build_reconstruction_jobs(part_info)
self.assertEqual(len(jobs), 1)
job = jobs[0]
expected = {
'job_type': object_reconstructor.REVERT,
'frag_index': None,
'suffixes': stub_hashes.keys(),
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
'device': self.local_dev['device'],
}
self.assertEqual(ring.replica_count, len(job['sync_to']))
for k, v in expected.items():
msg = 'expected %s != %s for %s' % (
v, job[k], k)
self.assertEqual(v, job[k], msg)
def test_get_suffix_delta(self):
# different
local_suff = {'123': {None: 'abc', 0: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
local_index = 0
remote_index = 0
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now the same
remote_suff = {'123': {None: 'abc', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, [])
# now with a mis-matched None key (missing durable)
remote_suff = {'123': {None: 'ghi', 0: 'def'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
# now with bogus local index
local_suff = {'123': {None: 'abc', 99: 'def'}}
remote_suff = {'456': {None: 'ghi', 0: 'jkl'}}
suffs = self.reconstructor.get_suffix_delta(local_suff,
local_index,
remote_suff,
remote_index)
self.assertEqual(suffs, ['123'])
def test_process_job_primary_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hash', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
'abc': {right_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.2', '/sdc/0'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertEqual(len(ssync_calls), 0)
def test_process_job_primary_not_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {}
sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.1', '/sdb/0/123-abc'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/123-abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
expected_ssync_calls = sorted([
('10.0.0.1', 0, set(['123', 'abc'])),
('10.0.0.2', 0, set(['123', 'abc'])),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
) for c in ssync_calls))
def test_process_job_sync_missing_durable(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
# left hand side is in sync
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hash', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
# right hand side has fragment, but no durable (None key is whack)
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
'abc': {right_index: 'hash', None: 'different-because-durable'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
expected_ssync_calls = sorted([
('10.0.0.2', 0, ['abc']),
])
self.assertEqual(expected_ssync_calls, sorted((
c['node']['ip'],
c['job']['partition'],
c['suffixes'],
) for c in ssync_calls))
def test_process_job_primary_some_in_sync(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [n for n in self.policy.object_ring.devs
if n != self.local_dev][:2]
# setup left and right hashes
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
left_index = sync_to[0]['index'] = (frag_index - 1) % replicas
left_hashes = {
'123': {left_index: 'hashX', None: 'hash'},
'abc': {left_index: 'hash', None: 'hash'},
}
right_index = sync_to[1]['index'] = (frag_index + 1) % replicas
right_hashes = {
'123': {right_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
responses = [(200, pickle.dumps(hashes)) for hashes in (
left_hashes, left_hashes, right_hashes, right_hashes)]
codes, body_iter = zip(*responses)
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*codes, body_iter=body_iter) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
('10.0.0.1', '/sdb/0'),
('10.0.0.1', '/sdb/0/123'),
('10.0.0.2', '/sdc/0'),
('10.0.0.2', '/sdc/0/abc'),
])
self.assertEqual(expected_suffix_calls,
set((r['ip'], r['path'])
for r in request_log.requests))
self.assertEqual(len(ssync_calls), 2)
self.assertEqual(set(c['node']['index'] for c in ssync_calls),
set([left_index, right_index]))
for call in ssync_calls:
if call['node']['index'] == left_index:
self.assertEqual(call['suffixes'], ['123'])
elif call['node']['index'] == right_index:
self.assertEqual(call['suffixes'], ['abc'])
else:
self.fail('unexpected call %r' % call)
def test_process_job_primary_down(self):
replicas = self.policy.object_ring.replicas
partition = 0
frag_index = random.randint(0, replicas - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:2]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
expected_suffix_calls = set()
for node in part_nodes[:3]:
expected_suffix_calls.update([
(node['replication_ip'], '/%s/0' % node['device']),
(node['replication_ip'], '/%s/0/123-abc' % node['device']),
])
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*[200] * len(expected_suffix_calls),
body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
expected_ssync_calls = sorted([
('10.0.0.0', 0, set(['123', 'abc'])),
('10.0.0.1', 0, set(['123', 'abc'])),
('10.0.0.2', 0, set(['123', 'abc'])),
])
found_ssync_calls = sorted((
c['node']['ip'],
c['job']['partition'],
set(c['suffixes']),
) for c in ssync_calls)
self.assertEqual(expected_ssync_calls, found_ssync_calls)
def test_process_job_suffix_call_errors(self):
replicas = self.policy.object_ring.replicas
partition = 0
frag_index = random.randint(0, replicas - 1)
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_nodes = self.policy.object_ring.get_part_nodes(partition)
sync_to = part_nodes[:2]
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.SYNC,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'device': self.local_dev['device'],
'local_dev': self.local_dev,
}
expected_suffix_calls = set((
node['replication_ip'], '/%s/0' % node['device']
) for node in part_nodes)
possible_errors = [404, 507, Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors)
for r in expected_suffix_calls]
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*codes) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertFalse(ssync_calls)
def test_process_job_handoff(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
expected_suffix_calls = set([
(sync_to[0]['ip'], '/%s/0/123-abc' % sync_to[0]['device']),
])
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertEqual(len(ssync_calls), 1)
call = ssync_calls[0]
self.assertEqual(call['node'], sync_to[0])
self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
def test_process_job_revert_to_handoff(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
handoff = next(self.policy.object_ring.get_more_nodes(partition))
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': self.local_dev,
}
non_local = {'called': 0}
def ssync_response_callback(*args):
# in this test, ssync fails on the first (primary sync_to) node
if non_local['called'] >= 1:
return True, {}
non_local['called'] += 1
return False, {}
expected_suffix_calls = set([
(node['replication_ip'], '/%s/0/123-abc' % node['device'])
for node in (sync_to[0], handoff)
])
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*[200] * len(expected_suffix_calls),
body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
self.assertEqual(len(ssync_calls), len(expected_suffix_calls))
call = ssync_calls[0]
self.assertEqual(call['node'], sync_to[0])
self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
def test_process_job_revert_is_handoff(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
handoff_nodes = list(self.policy.object_ring.get_more_nodes(partition))
stub_hashes = {
'123': {frag_index: 'hash', None: 'hash'},
'abc': {frag_index: 'hash', None: 'hash'},
}
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': stub_hashes.keys(),
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': stub_hashes,
'policy': self.policy,
'local_dev': handoff_nodes[-1],
}
def ssync_response_callback(*args):
# in this test ssync always fails, until we encounter ourselves in
# the list of possible handoff's to sync to
return False, {}
expected_suffix_calls = set([
(sync_to[0]['replication_ip'],
'/%s/0/123-abc' % sync_to[0]['device'])
] + [
(node['replication_ip'], '/%s/0/123-abc' % node['device'])
for node in handoff_nodes[:-1]
])
ssync_calls = []
with nested(
mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback),
mock.patch('swift.obj.diskfile.ECDiskFileManager._get_hashes',
return_value=(None, stub_hashes))):
with mocked_http_conn(*[200] * len(expected_suffix_calls),
body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
found_suffix_calls = set((r['ip'], r['path'])
for r in request_log.requests)
self.assertEqual(expected_suffix_calls, found_suffix_calls)
# this is ssync call to primary (which fails) plus the ssync call to
# all of the handoffs (except the last one - which is the local_dev)
self.assertEqual(len(ssync_calls), len(handoff_nodes))
call = ssync_calls[0]
self.assertEqual(call['node'], sync_to[0])
self.assertEqual(set(call['suffixes']), set(['123', 'abc']))
def test_process_job_revert_cleanup(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
with df.create() as writer:
test_data = 'test data'
writer.write(test_data)
metadata = {
'X-Timestamp': ts.internal,
'Content-Length': len(test_data),
'Etag': md5(test_data).hexdigest(),
'X-Object-Sysmeta-Ec-Frag-Index': frag_index,
}
writer.put(metadata)
writer.commit(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
}
def ssync_response_callback(*args):
return True, {ohash: ts}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
self.assertEqual([
(sync_to[0]['replication_ip'], '/%s/0/%s' % (
sync_to[0]['device'], suffix)),
], [
(r['ip'], r['path']) for r in request_log.requests
])
# hashpath is still there, but only the durable remains
files = os.listdir(df._datadir)
self.assertEqual(1, len(files))
self.assertTrue(files[0].endswith('.durable'))
# and more to the point, the next suffix recalc will clean it up
df_mgr = self.reconstructor._df_router[self.policy]
df_mgr.get_hashes(self.local_dev['device'], str(partition), [],
self.policy)
self.assertFalse(os.access(df._datadir, os.F_OK))
def test_process_job_revert_cleanup_tombstone(self):
replicas = self.policy.object_ring.replicas
frag_index = random.randint(0, replicas - 1)
sync_to = [random.choice([n for n in self.policy.object_ring.devs
if n != self.local_dev])]
sync_to[0]['index'] = frag_index
partition = 0
part_path = os.path.join(self.devices, self.local_dev['device'],
diskfile.get_data_dir(self.policy),
str(partition))
os.makedirs(part_path)
df_mgr = self.reconstructor._df_router[self.policy]
df = df_mgr.get_diskfile(self.local_dev['device'], partition, 'a',
'c', 'data-obj', policy=self.policy)
ts = self.ts()
df.delete(ts)
ohash = os.path.basename(df._datadir)
suffix = os.path.basename(os.path.dirname(df._datadir))
job = {
'job_type': object_reconstructor.REVERT,
'frag_index': frag_index,
'suffixes': [suffix],
'sync_to': sync_to,
'partition': partition,
'path': part_path,
'hashes': {},
'policy': self.policy,
'local_dev': self.local_dev,
}
def ssync_response_callback(*args):
return True, {ohash: ts}
ssync_calls = []
with mock_ssync_sender(ssync_calls,
response_callback=ssync_response_callback):
with mocked_http_conn(200, body=pickle.dumps({})) as request_log:
self.reconstructor.process_job(job)
self.assertEqual([
(sync_to[0]['replication_ip'], '/%s/0/%s' % (
sync_to[0]['device'], suffix)),
], [
(r['ip'], r['path']) for r in request_log.requests
])
# hashpath is still there, but it's empty
self.assertEqual([], os.listdir(df._datadir))
def test_reconstruct_fa_no_errors(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
responses = list((200, body) for body in ec_archive_bodies)
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
codes, body_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_fa_errors_works(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[4]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(4)
base_responses = list((200, body) for body in ec_archive_bodies)
# since we're already missing a fragment a +2 scheme can only support
# one additional failure at a time
for error in (Timeout(), 404, Exception('kaboom!')):
responses = list(base_responses)
error_index = random.randint(0, len(responses) - 1)
responses[error_index] = (error, '')
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
codes, body_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_parity_fa_with_data_node_failure(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[-4]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
# make up some data (trim some amount to make it unaligned with
# segment size)
test_data = ('rebuild' * self.policy.ec_segment_size)[:-454]
etag = md5(test_data).hexdigest()
ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
# the scheme is 10+4, so this gets a parity node
broken_body = ec_archive_bodies.pop(-4)
base_responses = list((200, body) for body in ec_archive_bodies)
for error in (Timeout(), 404, Exception('kaboom!')):
responses = list(base_responses)
# grab a data node index
error_index = random.randint(0, self.policy.ec_ndata - 1)
responses[error_index] = (error, '')
headers = {'X-Object-Sysmeta-Ec-Etag': etag}
codes, body_iter = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter,
headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_fa_errors_fails(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
policy = self.policy
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
possible_errors = [404, Timeout(), Exception('kaboom!')]
codes = [random.choice(possible_errors) for i in
range(policy.object_ring.replicas - 1)]
with mocked_http_conn(*codes):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, metadata)
def test_reconstruct_fa_with_mixed_old_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
# bad response
bad_response = (200, '', {
'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal,
})
# good responses
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': next(ts).internal
}
responses = [(200, body, headers)
for body in ec_archive_bodies]
# mixed together
error_index = random.randint(0, len(responses) - 2)
responses[error_index] = bad_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, metadata)
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
def test_reconstruct_fa_with_mixed_new_etag(self):
job = {
'partition': 0,
'policy': self.policy,
}
part_nodes = self.policy.object_ring.get_part_nodes(0)
node = part_nodes[1]
metadata = {
'name': '/a/c/o',
'Content-Length': 0,
'ETag': 'etag',
}
test_data = ('rebuild' * self.policy.ec_segment_size)[:-777]
etag = md5(test_data).hexdigest()
ec_archive_bodies = make_ec_archive_bodies(self.policy, test_data)
broken_body = ec_archive_bodies.pop(1)
ts = (utils.Timestamp(t) for t in itertools.count(int(time.time())))
# good responses
headers = {
'X-Object-Sysmeta-Ec-Etag': etag,
'X-Backend-Timestamp': next(ts).internal
}
responses = [(200, body, headers)
for body in ec_archive_bodies]
codes, body_iter, headers = zip(*responses)
# sanity check before negative test
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
df = self.reconstructor.reconstruct_fa(
job, node, dict(metadata))
fixed_body = ''.join(df.reader())
self.assertEqual(len(fixed_body), len(broken_body))
self.assertEqual(md5(fixed_body).hexdigest(),
md5(broken_body).hexdigest())
# one newer etag can spoil the bunch
new_response = (200, '', {
'X-Object-Sysmeta-Ec-Etag': 'some garbage',
'X-Backend-Timestamp': next(ts).internal,
})
new_index = random.randint(0, len(responses) - self.policy.ec_nparity)
responses[new_index] = new_response
codes, body_iter, headers = zip(*responses)
with mocked_http_conn(*codes, body_iter=body_iter, headers=headers):
self.assertRaises(DiskFileError, self.reconstructor.reconstruct_fa,
job, node, dict(metadata))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,415,196,587,893,767,000 | 41.255684 | 79 | 0.49925 | false |
vincent-noel/libSigNetSim | libsignetsim/sedml/Task.py | 1 | 3714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from libsignetsim.sedml.AbstractTask import AbstractTask
from libsignetsim.sedml.OneStep import OneStep
from libsignetsim.sedml.SedmlException import SedmlOneStepTaskException
from libsignetsim.settings.Settings import Settings
class Task(AbstractTask):
def __init__(self, document):
AbstractTask.__init__(self, document)
self.__document = document
self.__modelReference = None
self.__simulationReference = None
self.__simulationObject = None
self.__results = None
def readSedml(self, task, level=Settings.defaultSedmlLevel, version=Settings.defaultSedmlVersion):
AbstractTask.readSedml(self, task, level, version)
if task.isSetModelReference():
self.__modelReference = task.getModelReference()
if task.isSetSimulationReference():
self.__simulationReference = task.getSimulationReference()
def writeSedml(self, task, level=Settings.defaultSedmlLevel, version=Settings.defaultSedmlVersion):
AbstractTask.writeSedml(self, task, level, version)
if self.__modelReference is not None:
task.setModelReference(self.__modelReference)
if self.__simulationReference is not None:
task.setSimulationReference(self.__simulationReference)
def getModelReference(self):
return self.__modelReference
def getSimulationReference(self):
return self.__simulationReference
def getModel(self):
return self.__document.listOfModels.getSbmlModelByReference(self.__modelReference)
def getSimulation(self):
return self.__document.listOfSimulations.getSimulation(self.__simulationReference)
def setModelReference(self, model_reference):
self.__modelReference = model_reference
def setSimulationReference(self, simulation_reference):
self.__simulationReference = simulation_reference
def setModel(self, model):
self.__modelReference = model.getId()
def setSimulation(self, simulation):
self.__simulationReference = simulation.getId()
def run(self, timeout=None):
# One step simulations cannot be executed as a single task, they must be part of a repeated task
# At least, that's what I understand
if isinstance(self.__document.listOfSimulations.getSimulation(self.__simulationReference), OneStep):
raise SedmlOneStepTaskException("One step simulations cannot be executed as a single task")
model = self.__document.listOfModels.getSbmlModelByReference(self.__modelReference)
simulation = self.__document.listOfSimulations.getSimulation(self.__simulationReference)
self.__simulationObject = simulation.run(model, timeout=timeout)
self.__results = self.__simulationObject.getRawData()[0]
def getSimulationObject(self):
return self.__simulationObject
def getResults(self):
return self.__results
def getResultsByVariable(self, variable_sbmlid):
return self.__results[1][variable_sbmlid]
def getTimes(self):
return self.__results[0]
def getDuration(self):
return self.__simulationObject.getSimulationDuration() | gpl-3.0 | 5,935,475,291,927,596,000 | 31.876106 | 102 | 0.772213 | false |
emresefer/Parana | util.py | 1 | 4633 | import networkx as nx
import itertools
def parseHomologyInfo( hfile ):
'''
Parse the "Emre" format homology file. This returns a dictonary
where the key, k, is an ancestral protein. The value is a 2-tuple of
sets. The first set contains the homologs of k in G1 and the second
set contains the homologs of k in G2.
'''
def parseList( s ) :
s = s[1:-1]
return [ str(e)[1:-1] for e in s.split(', ') ] if len(s) > 0 else []
dupDict = {}
with open(hfile,'rb') as ifile :
for l in ifile :
toks = l.rstrip().split('\t')
key = toks[0]
g1Homologs = set([ e for e in parseList(toks[1]) ])
g2Homologs = set([ e for e in parseList(toks[2]) ])
dupDict[key] = (g1Homologs, g2Homologs)
return dupDict
def prepareTree(T, rv, lostNodes):
'''
Prepare the tree for various queries we will make later by populating all nodes with
certain information concerning their subtrees. In particular, for each node we will
compute: the leaves beneath it, the set of all subnodes beneath it, and the set of
extant networks beneath it.
'''
# Gather the leaf sets
for n in T.nodes_iter() :
T.node[n] = { 'leaves' : set([]), 'subnodes' : set([]), 'enets' : set([]) }
labelWithSubnodes(T, rv, lostNodes)
def labelWithSubnodes( T, root, lostNodes ):
"""
For each node in the tree T, we add two vertex properties:
1) leaves -- A set containing all of the leaves rooted at this subtree
2) subnodes -- A set containing all of the nodes (internal & leaves) in this subtree
3) enets -- A set containing network identifiers for all extant networks which exist as some leaf in this subtree
"""
lost = 'LOST'
def enet(nodes):
r = set([])
for n in nodes:
if n not in lostNodes:
# if n.find("LOST") != -1:
# r.add(n[:2])
#else:
r.add(n[-2:])
else:
r.add(lost)
return r
for n in nx.dfs_postorder_nodes(T, root) :
successors = T.successors(n)
if len(successors) == 0 :
T.node[n]['subnodes'] = set([n])
T.node[n]['leaves'] = T.node[n]['leaves'].union( set([n]) )
T.node[n]['enets'] = enet([n])
else :
subnodes = [ set([n]) ] + [ set([s]) for s in successors ] + [ T.node[s]['subnodes'] for s in successors ]
for sn in subnodes:
T.node[n]['subnodes'] = T.node[n]['subnodes'].union( sn )
leaves = [ T.node[s]['leaves'] for s in successors ]
T.node[n]['leaves'] = T.node[n]['leaves'].union( *leaves )
T.node[n]['enets'] = enet( T.node[n]['leaves'] )
def allPairs( nodes ):
'''
Yield all pairs of nodes as 2-tuples (including every node with itself)
'''
for u,v in itertools.combinations(nodes, 2) :
if u < v :
yield u,v
else :
yield v, u
for u in nodes :
yield u,u
def swap(u,v) :
'''
Yup, just swap u and v
'''
return v, u
def findRoot( G ) :
"""
Given a graph G, which is a rooted tree, return the root node.
"""
import random
rn = random.choice(G.nodes())
pred = G.predecessors(rn)
while len(pred) == 1:
rn = pred[0]
pred = G.predecessors(rn)
return rn
def pairs(lst) :
n = len(lst)
return itertools.izip(*[itertools.islice(lst,0,n-1,1), itertools.islice(lst,1,n,1)])
def pathToRoot( n, rv, T ) :
'''
Compute the path in the tree T, from node n to the root node, rv.
'''
path = []
if n != rv :
p = n
while p != rv :
path.append(p)
p = T.predecessors(p)[0]
path.append(p)
return path
def edgeCost(edge, direc, cc, dc) :
'''
Convenience function to compute the cost of the edge
between u and v.
'''
cost = 0
u,v = edge
if dir == 'f' or dir == 'r' :
cost = cc
else :
if u == v :
cost = cc
else :
cost = 2*cc
return cost
def treeToGraph(t) :
'''
Convert the Newick format tree, t, into a NetworkX
DiGraph. Tree edges will point from parents to children.
If branch lengths exist in the original tree, they will be
placed as edge weights in the resulting graph.
'''
root = t.Name
G = nx.DiGraph()
G.add_node(root)
for n in t.preorder():
if n.Name != root:
G.add_edge(n.Parent.Name, n.Name, weight=n.Parent.distance(n) )
return G
| apache-2.0 | 374,425,206,605,050,900 | 29.281046 | 118 | 0.548457 | false |
nkgilley/home-assistant | homeassistant/components/elgato/light.py | 6 | 4764 | """Support for LED lights."""
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional
from elgato import Elgato, ElgatoError, Info, State
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_ON,
ATTR_SOFTWARE_VERSION,
ATTR_TEMPERATURE,
DATA_ELGATO_CLIENT,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Elgato Key Light based on a config entry."""
elgato: Elgato = hass.data[DOMAIN][entry.entry_id][DATA_ELGATO_CLIENT]
info = await elgato.info()
async_add_entities([ElgatoLight(entry.entry_id, elgato, info)], True)
class ElgatoLight(LightEntity):
"""Defines a Elgato Key Light."""
def __init__(
self, entry_id: str, elgato: Elgato, info: Info,
):
"""Initialize Elgato Key Light."""
self._brightness: Optional[int] = None
self._info: Info = info
self._state: Optional[bool] = None
self._temperature: Optional[int] = None
self._available = True
self.elgato = elgato
@property
def name(self) -> str:
"""Return the name of the entity."""
# Return the product name, if display name is not set
if not self._info.display_name:
return self._info.product_name
return self._info.display_name
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._info.serial_number
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light between 1..255."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._temperature
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 143
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 344
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
@property
def is_on(self) -> bool:
"""Return the state of the light."""
return bool(self._state)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self.async_turn_on(on=False)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
data = {}
data[ATTR_ON] = True
if ATTR_ON in kwargs:
data[ATTR_ON] = kwargs[ATTR_ON]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_TEMPERATURE] = kwargs[ATTR_COLOR_TEMP]
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = round((kwargs[ATTR_BRIGHTNESS] / 255) * 100)
try:
await self.elgato.light(**data)
except ElgatoError:
_LOGGER.error("An error occurred while updating the Elgato Key Light")
self._available = False
async def async_update(self) -> None:
"""Update Elgato entity."""
try:
state: State = await self.elgato.state()
except ElgatoError:
if self._available:
_LOGGER.error("An error occurred while updating the Elgato Key Light")
self._available = False
return
self._available = True
self._brightness = round((state.brightness * 255) / 100)
self._state = state.on
self._temperature = state.temperature
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this Elgato Key Light."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._info.serial_number)},
ATTR_NAME: self._info.product_name,
ATTR_MANUFACTURER: "Elgato",
ATTR_MODEL: self._info.product_name,
ATTR_SOFTWARE_VERSION: f"{self._info.firmware_version} ({self._info.firmware_build_number})",
}
| apache-2.0 | -8,574,963,186,089,603,000 | 29.151899 | 105 | 0.619647 | false |
our-city-app/oca-backend | src/rogerthat/bizz/friends.py | 1 | 83704 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import hashlib
import itertools
import json
import logging
import uuid
from types import NoneType
from google.appengine.api import memcache, images, search
from google.appengine.ext import db, deferred
from mcfw.cache import cached
from mcfw.properties import azzert
from mcfw.rpc import returns, arguments, serialize_complex_value
from mcfw.utils import normalize_search_string
from rogerthat import consts
from rogerthat.bizz.friend_helper import FriendHelper, FriendCloudStorageHelper
from rogerthat.bizz.i18n import get_translator
from rogerthat.bizz.job import clear_service_inbox
from rogerthat.bizz.location import get_friend_location
from rogerthat.bizz.messaging import sendMessage, dashboardNotification
from rogerthat.capi.friends import becameFriends, updateGroups
from rogerthat.consts import MC_DASHBOARD, WEEK
from rogerthat.dal import parent_key, parent_key_unsafe
from rogerthat.dal.app import get_app_by_id, get_app_name_by_id, get_app_by_user
from rogerthat.dal.friend import get_friends_map, get_friends_friends_maps, get_friend_invitation_history, \
get_do_send_email_invitations
from rogerthat.dal.location import get_user_location
from rogerthat.dal.mobile import get_mobile_key_by_account
from rogerthat.dal.profile import get_profile_info, get_user_profile, is_service_identity_user, get_service_profile, \
get_profile_infos, are_service_identity_users, get_profile_key
from rogerthat.dal.roles import list_service_roles_by_type
from rogerthat.dal.service import get_friend_serviceidentity_connection, get_service_identity
from rogerthat.models import ProfileInfo, ServiceProfile, UserData, FriendServiceIdentityConnection, ServiceRole, \
FriendInvitationHistory, ServiceTranslation, UserInvitationSecret, UserProfile, Message, ServiceIdentity, \
ServiceInteractionDef, App, Group, ProfilePointer, FriendMap
from rogerthat.models.properties.friend import FriendDetailTO
from rogerthat.rpc import users
from rogerthat.rpc.models import RpcCAPICall, ServiceAPICallback, Mobile
from rogerthat.rpc.rpc import mapping, logError, SKIP_ACCOUNTS
from rogerthat.rpc.service import ServiceApiException, logServiceError
from rogerthat.service.api.friends import invited, is_in_roles
from rogerthat.settings import get_server_settings
from rogerthat.templates import render
from rogerthat.to.friends import UpdateFriendResponseTO, UpdateFriendRequestTO, FriendTO, FriendRelationTO, \
BecameFriendsRequestTO, BecameFriendsResponseTO, ServiceFriendStatusTO, UpdateFriendSetResponseTO, GroupTO, \
UpdateGroupsResponseTO, UpdateGroupsRequestTO, FindFriendResponseTO, \
FindFriendItemTO, RegistrationResultTO, FRIEND_TYPE_SERVICE
from rogerthat.to.location import GetLocationRequestTO
from rogerthat.to.messaging import ButtonTO, UserMemberTO
from rogerthat.to.roles import RoleTO
from rogerthat.to.service import UserDetailsTO, ServiceInteractionDefTO
from rogerthat.translations import localize, DEFAULT_LANGUAGE
from rogerthat.utils import channel, now, runeach, ed, foreach, base65, slog, is_clean_app_user_email, \
try_or_defer, bizz_check, base38, send_mail
from rogerthat.utils.app import get_app_user_tuple, get_app_id_from_app_user, get_human_user_from_app_user, \
create_app_user, remove_app_id, create_app_user_by_email, get_app_user_tuple_by_email
from rogerthat.utils.crypto import sha256
from rogerthat.utils.service import get_service_identity_tuple, remove_slash_default, add_slash_default, \
get_service_user_from_service_identity_user, create_service_identity_user
from rogerthat.utils.transactions import on_trans_committed, run_in_transaction
ACCEPT_ID = u"accepted"
ACCEPT_AND_CONNECT_ID = u"accept_and_connect"
DECLINE_ID = u"decline"
ESTABLISHED = u"established"
INVITE_ID = u"invite"
FRIEND_INVITATION_REQUEST = u"fir"
FRIEND_SHARE_SERVICE_REQUEST = u"fssr"
REQUEST_LOCATION_SHARING = u"rls"
INVITE_SERVICE_ADMIN = u"isa"
UPDATE_PROFILE = u"up"
FRIEND_ACCEPT_FAILED = u"faf"
INVITE_FACEBOOK_FRIEND = u"iff"
ORIGIN_USER_POKE = u"user_poke"
ORIGIN_SERVICE_INVITE = u"service_invite"
ORIGIN_USER_INVITE = u"user_invite"
ORIGIN_USER_RECOMMENDED = u"user_recommended"
ORIGIN_YSAAA = u"your_service_as_an_app"
ACTOR_TYPE_INVITOR = u"invitor"
ACTOR_TYPE_INVITEE = u"invitee"
REGISTRATION_ORIGIN_DEFAULT = u'default'
REGISTRATION_ORIGIN_QR = u'qr'
@returns([ButtonTO])
@arguments(language=unicode, accept_ui_flag=int)
def create_accept_decline_buttons(language, accept_ui_flag=0):
buttons = list()
button = ButtonTO()
button.id = ACCEPT_ID
button.caption = localize(language, "Accept invitation")
button.action = None
button.ui_flags = accept_ui_flag
buttons.append(button)
button = ButtonTO()
button.id = DECLINE_ID
button.caption = localize(language, "Decline invitation")
button.action = None
button.ui_flags = 0
buttons.append(button)
return buttons
@returns([ButtonTO])
@arguments(language=unicode)
def create_add_to_services_button(language):
buttons = list()
button = ButtonTO()
button.id = ACCEPT_ID
button.caption = localize(language, "Add to my services")
button.action = None
button.ui_flags = 0
buttons.append(button)
return buttons
UNIT_TEST_REFS = dict()
class PersonInvitationOverloadException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 2,
"Person was already invited three times.")
class PersonAlreadyInvitedThisWeekException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 1,
"This person was already invited in the last week.")
class InvalidEmailAddressException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 0,
"Invalid email address.")
class CannotSelfInviteException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 3,
"Can not invite myself.")
class DoesNotWantToBeInvitedViaEmail(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 4,
"This person does not want to be invited anymore via email.")
class CanNotRequestLocationFromServices(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 5,
"Cannot request location from service users.")
class UserNotFoundViaUserCode(ServiceApiException):
def __init__(self, user_code):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 6,
"User not found via userCode", user_code=user_code)
class CanNotInviteFriendException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 7,
"This person is already your friend")
class CanNotInviteOtherServiceException(ServiceApiException):
def __init__(self):
ServiceApiException.__init__(self, ServiceApiException.BASE_CODE_FRIEND + 8,
"Cannot invite services.")
@returns(NoneType)
@arguments(invitor_user=users.User, invitee_email=unicode, message=unicode, language=unicode,
servicetag=unicode, origin=unicode, app_id=unicode, allow_unsupported_apps=bool)
def invite(invitor_user, invitee_email, message, language, servicetag, origin, app_id, allow_unsupported_apps=False):
from rogerthat.bizz.service import validate_app_id_for_service_identity_user
# Basic validation checks
if ':' in invitee_email:
invitee_user, invitee_email_app_id = get_app_user_tuple_by_email(invitee_email)
azzert(invitee_email_app_id == app_id, "Cannot invite user in other app")
invitee_email = invitee_user.email()
invitee, invitation_history, now_ = _validate_invitation(
invitee_email, invitor_user, servicetag, app_id, allow_unsupported_apps)
# Check whether invitee is a nuntiuz user
invitor_profile_info, invitee_profile_info = get_profile_infos([invitor_user, invitee], allow_none_in_results=True)
if not invitee_profile_info:
if not language:
if invitor_profile_info:
# Default to invitor language
if invitor_profile_info.isServiceIdentity:
language = get_service_profile(invitor_profile_info.service_user).defaultLanguage
else:
language = invitor_profile_info.language
else:
language = DEFAULT_LANGUAGE
logging.info('sending invitation email to %s', invitee)
_send_invitation_email(language, get_human_user_from_app_user(
invitee).email(), invitor_user, invitee, invitation_history, now_, message, servicetag, origin)
return
if not invitee_profile_info.isServiceIdentity:
# invitee is a human user. Send him an email
if is_service_identity_user(invitor_user):
validate_app_id_for_service_identity_user(invitor_user, app_id)
_send_invitation_message(servicetag, message, get_human_user_from_app_user(
invitee).email(), invitor_user, invitee, invitation_history, now_, origin)
else:
# invitee is a service
if is_service_identity_user(invitor_user):
raise CanNotInviteOtherServiceException()
azzert(get_app_id_from_app_user(invitor_user) == app_id)
if "/" not in invitee.email():
invitee = create_service_identity_user(invitee)
if not allow_unsupported_apps:
validate_app_id_for_service_identity_user(invitee, app_id)
if not language:
invitor_profile = get_user_profile(invitor_user)
language = invitor_profile.language
svc_user, identifier = get_service_identity_tuple(invitee)
svc_profile = get_service_profile(svc_user)
invitor_user_profile = get_user_profile(invitor_user)
context = invited(invited_response_receiver, logServiceError, svc_profile,
email=get_human_user_from_app_user(invitor_user).email(), name=invitor_user_profile.name,
message=message, language=language, tag=servicetag, origin=origin,
service_identity=identifier,
user_details=[UserDetailsTO.fromUserProfile(invitor_user_profile)],
DO_NOT_SAVE_RPCCALL_OBJECTS=True)
if context: # None if friend.invited is not implemented
context.invitor = invitor_user
context.invitee = invitee
context.servicetag = servicetag
context.origin = origin
context.allow_unsupported_apps = allow_unsupported_apps
context.put()
else:
deferred.defer(process_invited_response, svc_user, invitee, identifier, invitor_user, invitee, servicetag,
origin, False, None, None, allow_unsupported_apps=allow_unsupported_apps)
UNIT_TEST_REFS["invited"] = context
@returns(bool)
@arguments(profile1=ProfileInfo, profile2=ProfileInfo)
def areFriends(profile1, profile2):
"""Checks wether profile1 is connected to profile2 and profile2 is connected to profile1"""
def check(p1, p2):
if p1.isServiceIdentity:
fsic = get_friend_serviceidentity_connection(p2.user, p1.user)
return fsic is not None and not fsic.deleted
else:
friend_map = get_friends_map(p1.user)
return friend_map is not None and remove_slash_default(p2.user) in friend_map.friends
return check(profile1, profile2) and check(profile2, profile1)
@returns(bool)
@arguments(profile_info1=ProfileInfo, profile_info2=ProfileInfo)
def canBeFriends(profile_info1, profile_info2):
# TODO communities: just refactor to `return True` or remove this function entirely
if profile_info1.isServiceIdentity:
if profile_info2.isServiceIdentity:
return False
else:
return get_app_id_from_app_user(profile_info2.user) in profile_info1.appIds
if profile_info2.isServiceIdentity:
return get_app_id_from_app_user(profile_info1.user) in profile_info2.appIds
return get_app_id_from_app_user(profile_info1.user) == get_app_id_from_app_user(profile_info2.user)
@returns()
@arguments(app_user=users.User, service_profile=ServiceProfile, identifier=unicode, user_details=[UserDetailsTO],
roles=[RoleTO])
def send_is_in_roles(app_user, service_profile, identifier, user_details, roles):
def trans():
context = is_in_roles(is_in_roles_response_receiver, logServiceError, service_profile,
service_identity=identifier, user_details=user_details, roles=roles,
DO_NOT_SAVE_RPCCALL_OBJECTS=True)
if context is not None: # None if friend.is_in_roles not implemented
context.human_user = app_user
context.role_ids = [r.id for r in roles]
context.put()
if roles:
if db.is_in_transaction():
trans()
else:
db.run_in_transaction(trans)
@returns()
@arguments(loyalty_device_user=users.User, scanned_user=users.User)
def connect_by_loyalty_scan(loyalty_device_user, scanned_user):
friends_map = get_friends_map(loyalty_device_user)
if len(friends_map.friends) == 1:
service_user = friends_map.friends[0]
if get_friend_serviceidentity_connection(scanned_user, create_service_identity_user(service_user)):
logging.info('Loyalty service %s and user %s are already connected', service_user, scanned_user)
else:
logging.info('Connecting loyalty service %s with user %s', service_user, scanned_user)
deferred.defer(makeFriends, service_user, scanned_user, None, None, ORIGIN_USER_INVITE, False, False,
allow_unsupported_apps=True, _countdown=2)
else:
logging.error('Unexpected friend count for loyalty device: %s', len(friends_map.friends))
@returns(NoneType)
@arguments(invitor=users.User, invitee=users.User, original_invitee=users.User, servicetag=unicode, origin=unicode,
notify_invitee=bool, notify_invitor=bool, user_data=unicode, allow_unsupported_apps=bool,
service_helper=FriendCloudStorageHelper, skip_callbacks=bool)
def makeFriends(invitor, invitee, original_invitee, servicetag, origin, notify_invitee=False, notify_invitor=True,
user_data=None, allow_unsupported_apps=False, service_helper=None, skip_callbacks=False):
from rogerthat.bizz import log_analysis
""" Make friends between invitor and invitee. They can be both human users, or one can be a service_identity_user
Although we are in the bizz layer, it is possible that the /+default+ suffix is not included! """
from rogerthat.bizz.profile import schedule_re_index
from rogerthat.bizz.service import create_send_user_data_requests
from rogerthat.bizz.job.update_friends import create_update_friend_requests
from rogerthat.bizz.news.matching import setup_notification_settings_for_user
def notifyActorInWebUI(helper, from_, friend_map, actor, to_friendDetail):
if get_app_id_from_app_user(from_) == App.APP_TYPE_ROGERTHAT:
if to_friendDetail.type == FriendDetailTO.TYPE_USER:
friends = [FriendRelationTO.fromDBFriendDetail(fd) for fd in friend_map.get_friend_details().values()
if fd.existence == FriendDetailTO.FRIEND_EXISTENCE_ACTIVE]
friends_of_friend = serialize_complex_value(friends, FriendRelationTO, True)
else:
friends_of_friend = []
# Send update request over channel API
friend_dict = FriendTO.fromDBFriendDetail(helper, to_friendDetail, False, True, targetUser=from_).to_dict()
channel.send_message(
from_,
u'rogerthat.friend.ackInvitation',
actor=actor,
friend=friend_dict,
friends=friends_of_friend)
def run(from_profile_info, to_profile_info, to_name, to_shareContacts, from_, to, actor_type):
notifyFriends = None
side_effects = list()
to = remove_slash_default(to)
to_put = []
if not from_profile_info.isServiceIdentity:
# from_ is a human
# to is a human or service
friendMap = get_friends_map(from_)
friendMap.friends.append(to)
friendMap.friends = list(set(friendMap.friends))
if to_profile_info:
profile_type = FriendDetailTO.TYPE_SERVICE if to_profile_info.isServiceIdentity else FriendDetailTO.TYPE_USER
if to_profile_info.isServiceIdentity and user_data is not None:
try:
_ = json.loads(user_data)
except:
logging.warn("Invalid user data JSON string!", exc_info=True)
has_user_data = False
else:
user_data_model_key = UserData.createKey(from_, to_profile_info.user)
user_data_model = UserData(key=user_data_model_key)
user_data_model.data = user_data
user_data_model.put()
has_user_data = True
user_profile = get_user_profile(from_)
mobiles = db.get([get_mobile_key_by_account(mobile_detail.account)
for mobile_detail in user_profile.get_mobiles().values()])
to_put.extend(create_send_user_data_requests(mobiles, user_data_model, from_,
to_profile_info.user))
else:
has_user_data = False
to_friend_detail = FriendDetailTO.create(to, to_name, to_profile_info.avatarId, False, False, to_shareContacts, profile_type, has_user_data)
else:
to_friend_detail = FriendDetailTO.create(to, to_name, -1, False, False, to_shareContacts, FriendDetailTO.TYPE_USER)
friend_details = friendMap.get_friend_details()
friend_details[to_friend_detail.email] = to_friend_detail
friendMap.save_friend_details(friend_details)
friendMap.generation += 1
friendMap.version += 1 # version of the set of friend e-mails
logging.debug('debugging_branding makeFriends friend_map.gen %s friend_map.ver %s', friendMap.generation, friendMap.version)
to_put.append(friendMap)
if to_friend_detail.type == FRIEND_TYPE_SERVICE and service_helper:
helper = service_helper
else:
helper = FriendHelper.from_data_store(users.User(to_friend_detail.email), to_friend_detail.type)
side_effects.append(lambda: notifyActorInWebUI(helper, from_, friendMap, actor_type, to_friend_detail))
to_put.extend(create_update_friend_requests(helper, to, friendMap, UpdateFriendRequestTO.STATUS_ADD))
if not to_profile_info.isServiceIdentity:
notifyFriends = (from_, to, friendMap, to_friend_detail) # both from and to are human users
side_effects.append(lambda: schedule_re_index(from_))
else:
# from_ is a service
# to is a human
from_ = add_slash_default(from_)
svc_user, identifier = get_service_identity_tuple(from_)
svc_profile = service_helper.get_service_profile() if service_helper else get_service_profile(svc_user)
new_fsic = FriendServiceIdentityConnection.create(to,
to_profile_info.name,
to_profile_info.avatarId,
from_,
to_profile_info.app_id)
new_fsic.put()
user_details = [UserDetailsTO.fromUserProfile(to_profile_info)]
# Only used in case a new autoconnected service is added
# Else we get too many transaction collisions because of the service callback models
if not skip_callbacks:
if actor_type == ACTOR_TYPE_INVITOR:
from rogerthat.service.api.friends import invite_result
side_effects.append(lambda: invite_result(invite_result_response_receiver, logServiceError, svc_profile,
email=get_human_user_from_app_user(to).email(),
result=ACCEPT_ID, tag=servicetag, origin=origin,
service_identity=identifier, user_details=user_details))
else:
logging.info('Skipping invite_result service api callback')
unknown_synced_roles = [r for r in list_service_roles_by_type(svc_user, ServiceRole.TYPE_SYNCED)
if not to_profile_info.has_role(from_, u'%s' % r.role_id)]
if unknown_synced_roles:
on_trans_committed(send_is_in_roles, to, svc_profile, identifier, user_details,
map(RoleTO.fromServiceRole, unknown_synced_roles))
on_trans_committed(slog, msg_="Service user gained", function_=log_analysis.SERVICE_STATS,
service=from_.email(), tag=to.email(), type_=log_analysis.SERVICE_STATS_TYPE_GAINED)
on_trans_committed(setup_notification_settings_for_user, to, from_)
if to_put:
db.put(to_put)
db.delete(FriendInvitationHistory.createKey(from_profile_info.user, to_profile_info.user))
return side_effects, notifyFriends # notifyFriends is None, or from_ and to are BOTH humans !
def translate_service_identity_name(service_identity, target_user_profile):
translator = get_translator(
service_identity.service_user, [ServiceTranslation.IDENTITY_TEXT], target_user_profile.language)
return translator.translate(ServiceTranslation.IDENTITY_TEXT, service_identity.name, target_user_profile.language)
logging.info("Storing accepted friend connection from %s to %s" % (invitor, invitee))
invitee_profile_info, invitor_profile_info = get_profile_infos([invitee, invitor])
if invitor_profile_info.isServiceIdentity:
myShareContacts = False
invitor_name = translate_service_identity_name(invitor_profile_info, invitee_profile_info)
else:
myShareContacts = get_friends_map(invitor).shareContacts
invitor_name = invitor_profile_info.name
if invitee_profile_info.isServiceIdentity:
hisShareContacts = False
invitee_name = translate_service_identity_name(invitee_profile_info, invitor_profile_info)
else:
hisShareContacts = get_friends_map(invitee).shareContacts
invitee_name = invitee_profile_info.name
def go():
if not allow_unsupported_apps:
bizz_check(canBeFriends(invitee_profile_info, invitor_profile_info),
"%s and %s can not be friends" % (invitee_profile_info.user, invitor_profile_info.user))
if not areFriends(invitee_profile_info, invitor_profile_info):
runMeResult = run(invitor_profile_info, invitee_profile_info, invitee_name,
hisShareContacts, invitor, invitee, ACTOR_TYPE_INVITOR)
runHimResult = run(invitee_profile_info, invitor_profile_info, invitor_name,
myShareContacts, invitee, invitor, ACTOR_TYPE_INVITEE)
else:
runMeResult = runHimResult = [[], False]
return runMeResult, runHimResult
xg_on = db.create_transaction_options(xg=True)
runMeResult, runHimResult = go() if db.is_in_transaction() else db.run_in_transaction_options(xg_on, go)
runeach(itertools.chain(runMeResult[0], runHimResult[0]))
# Delete invitation history
if original_invitee:
db.delete(FriendInvitationHistory.createKey(invitor, original_invitee))
deferred.defer(_notify_users, invitor_profile_info, invitor, invitee_profile_info, invitee, notify_invitee,
notify_invitor, (runMeResult[1], runHimResult[1]), _transactional=db.is_in_transaction(), _countdown=5)
@returns(NoneType)
@arguments(human_invitor_user=users.User, secret=unicode, phone_number=unicode, email_address=unicode, timestamp=int)
def log_invitation_secret_sent(human_invitor_user, secret, phone_number, email_address, timestamp):
def trans():
uis = UserInvitationSecret.get_by_id(base65.decode_int(secret), parent=parent_key(human_invitor_user))
uis.sent_timestamp = timestamp
uis.phone_number = phone_number
uis.email_address = email_address
uis.put()
db.run_in_transaction(trans)
@returns(NoneType)
@arguments(invitee_user=users.User, invitor_user=users.User, secret=unicode)
def ack_invitation_by_invitation_secret(invitee_user, invitor_user, secret):
if invitee_user == invitor_user:
logging.info("%s clicked his own invitation link" % invitee_user.email())
return
invitee_profile_info, invitor_profile_info = get_profile_infos(
[invitee_user, invitor_user], expected_types=[UserProfile, ProfileInfo])
invitee_friend_map = get_friends_map(invitee_user)
if remove_slash_default(invitor_user) in invitee_friend_map.friends:
logging.info("%s and %s are already friends!" % (invitee_user.email(), invitor_user.email()))
name = invitor_profile_info.name
msg = localize(invitee_profile_info.language, "You are already connected with %(name)s.", name=name)
dashboardNotification(invitee_user, msg)
return
try:
def trans():
invitation_secret = UserInvitationSecret.get_by_id(base65.decode_int(secret),
parent=parent_key_unsafe(remove_slash_default(invitor_user)))
azzert(invitation_secret, "Invitation secret not found")
azzert(invitation_secret.status in (UserInvitationSecret.STATUS_CREATED,
UserInvitationSecret.STATUS_SENT, UserInvitationSecret.STATUS_REDIRECTED), "Invitation secret not found")
invitation_secret.status = UserInvitationSecret.STATUS_USED
invitation_secret.used_timestamp = now()
invitation_secret.used_for_user = invitee_user
invitation_secret.put()
makeFriends(invitor_user, invitee_user, invitation_secret.email, invitation_secret.service_tag,
origin=invitation_secret.origin, notify_invitee=True)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
except AssertionError:
logging.exception("Failed to accept a secret based invitation.")
invitation_secret = UserInvitationSecret.get_by_id(base65.decode_int(secret),
parent=parent_key_unsafe(remove_slash_default(invitor_user)))
def trans():
button = ButtonTO()
button.id = INVITE_ID
button.caption = localize(invitee_profile_info.language, "Invite %(name)s", name=invitor_profile_info.name)
button.action = None
button.ui_flags = 0
msg = localize(invitee_profile_info.language, "_friendship_request_failed",
name=invitee_profile_info.name, friend_name=invitor_profile_info.name)
message = sendMessage(MC_DASHBOARD, [UserMemberTO(invitee_user)], Message.FLAG_ALLOW_DISMISS, 0, None, msg,
[button], None, get_app_by_user(invitee_user).core_branding_hash,
FRIEND_ACCEPT_FAILED, is_mfr=False)
message.invitor = invitee_user
message.invitee = invitor_user
message.service_tag = invitation_secret.service_tag if invitation_secret else None
message.origin = invitation_secret.origin if invitation_secret else ORIGIN_USER_INVITE
message.put()
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
@returns(NoneType)
@arguments(message=Message)
def ackInvitation(message):
azzert(message.tag == FRIEND_INVITATION_REQUEST)
invitor = message.invitor
invitee = message.invitee
origin = getattr(message, "origin", ORIGIN_USER_INVITE)
servicetag = getattr(message, "servicetag", None)
btn_index = message.get_member_statuses()[message.members.index(invitee)].button_index
if btn_index != message.get_button_by_id(ACCEPT_ID).index:
if is_service_identity_user(invitor):
svc_user, identifier = get_service_identity_tuple(add_slash_default(invitor)) # for old invites by services
invitor_profile = get_service_profile(svc_user)
from rogerthat.service.api.friends import invite_result
invite_result(invite_result_response_receiver, logServiceError, invitor_profile, email=None,
result=DECLINE_ID, tag=servicetag, origin=origin, service_identity=identifier,
user_details=[UserDetailsTO.fromUserProfile(get_user_profile(invitee))])
return
makeFriends(invitor, invitee, invitee, servicetag, origin=origin)
@returns(NoneType)
@arguments(user=users.User, friend=users.User, enabled=bool)
def shareLocation(user, friend, enabled):
from rogerthat.bizz.job.update_friends import create_update_friend_requests
def runMe():
myFriendMap = get_friends_map(user)
friend_details = myFriendMap.get_friend_details()
friend_detail = friend_details[friend.email()]
friend_detail.shareLocation = enabled
friend_detail.relationVersion += 1
myFriendMap.save_friend_details(friend_details)
myFriendMap.generation += 1
logging.debug('debugging_branding shareLocation friend_map.gen %s friend_detail.relv %s', myFriendMap.generation, friend_detail.relationVersion)
myFriendMap.put()
userLocation = get_user_location(user)
if userLocation.members:
if enabled and not friend in userLocation.members:
userLocation.members.append(friend)
userLocation.put()
elif not enabled and friend in userLocation.members:
userLocation.members.remove(friend)
userLocation.put()
helper = FriendHelper.from_data_store(users.User(friend_detail.email), friend_detail.type)
def updateMyWeb(helper):
if get_app_id_from_app_user(user) == App.APP_ID_ROGERTHAT:
channel.send_message(
user,
u'rogerthat.friend.shareLocationUpdate',
friend=FriendTO.fromDBFriendDetail(helper, friend_detail).to_dict())
return (myFriendMap, friend, helper, [updateMyWeb])
def runHim():
hisFriendMap = get_friends_map(friend)
friend_details = hisFriendMap.get_friend_details()
friend_detail = friend_details[friend.email()]
friend_detail.sharesLocation = enabled
friend_detail.relationVersion += 1
hisFriendMap.save_friend_details(friend_details)
hisFriendMap.generation += 1
logging.debug('debugging_branding shareLocation friend_map.gen %s friend_detail.relv %s', hisFriendMap.generation, friend_detail.relationVersion)
hisFriendMap.put()
helper = FriendHelper.from_data_store(users.User(friend_detail.email), friend_detail.type)
def updateWebOfFriend(helper):
if get_app_id_from_app_user(friend) == App.APP_ID_ROGERTHAT:
channel.send_message(
friend,
u'rogerthat.friend.shareLocation',
friend=FriendTO.fromDBFriendDetail(helper, friend_detail).to_dict())
return (hisFriendMap, user, helper, [updateWebOfFriend])
friendmap = get_friends_map(user)
if not friend in friendmap.friends:
logging.warning("ShareLocation performed to a non friend user!")
return
to_put = []
for friendMap, f, helper, functions in [db.run_in_transaction(runMe), db.run_in_transaction(runHim)]:
to_put.extend(create_update_friend_requests(helper, f, friendMap, UpdateFriendRequestTO.STATUS_MODIFIED))
for func in functions:
func(helper)
if to_put:
db.put(to_put)
@returns(NoneType)
@arguments(user=users.User, friend=users.User, message=unicode)
def requestLocationSharing(user, friend, message):
myFriendMap = get_friends_map(user)
if not friend in myFriendMap.friends:
logging.warning("RequestShareLocation performed to a non friend user!")
return
friend_detail = myFriendMap.get_friend_detail_by_email(friend.email())
if friend_detail.sharesLocation:
return
friend_profile, user_profile = get_profile_infos([friend, user], expected_types=[UserProfile, UserProfile])
devices = list()
if friend_profile.get_mobiles():
for mob in friend_profile.get_mobiles().values():
if mob.type_ in (Mobile.TYPE_ANDROID_FIREBASE_HTTP, Mobile.TYPE_ANDROID_HTTP, Mobile.TYPE_IPHONE_HTTP_APNS_KICK,
Mobile.TYPE_IPHONE_HTTP_XMPP_KICK, Mobile.TYPE_LEGACY_IPHONE_XMPP,
Mobile.TYPE_WINDOWS_PHONE):
devices.append(mob)
name = _get_full_name(user)
if devices:
m = localize(friend_profile.language, "User %(name)s has requested to share your location.", name=name)
if message:
m += "\n" + localize(friend_profile.language, "%(name)s added a personal note:\n%(message)s", name=name,
message=message)
message = sendMessage(MC_DASHBOARD, [UserMemberTO(friend)], Message.FLAG_AUTO_LOCK, 0, None, m,
create_accept_decline_buttons(friend_profile.language,
Message.UI_FLAG_AUTHORIZE_LOCATION), None,
get_app_by_user(friend).core_branding_hash,
REQUEST_LOCATION_SHARING, is_mfr=False)
message.invitor = user
message.invitee = friend
message.put()
else:
m = localize(user_profile.language, "%(name)s does not have a device which supports location tracking.",
name=name)
sendMessage(MC_DASHBOARD, [UserMemberTO(user)], Message.FLAG_AUTO_LOCK | Message.FLAG_ALLOW_DISMISS, 0, None,
m, [], None, get_app_by_user(user).core_branding_hash, None, is_mfr=False)
@returns(NoneType)
@arguments(message=Message)
def ackRequestLocationSharing(message):
from rogerthat.bizz.job.update_friends import create_update_friend_requests
azzert(message.tag == REQUEST_LOCATION_SHARING)
user, friend = message.invitor, message.invitee
user_profile, friend_profile = get_profile_infos([user, friend], expected_types=[UserProfile, UserProfile])
if message.get_member_statuses()[message.members.index(message.invitee)].button_index != message.get_button_by_id(ACCEPT_ID).index:
msg = localize(
user_profile.language, "%(name)s declined your request to track his/her location.", name=friend_profile.name)
dashboardNotification(user, msg)
return
def update_web(helper, friend_detail, method):
if get_app_id_from_app_user(helper.user) == App.APP_ID_ROGERTHAT:
channel.send_message(helper.user, method,
friend=FriendTO.fromDBFriendDetail(helper, friend_detail).to_dict())
def trans():
my_friend_map_key = FriendMap.create_key(user)
his_friend_map_key = FriendMap.create_key(friend)
my_friend_map, his_friend_map = db.get([my_friend_map_key, his_friend_map_key])
my_friend_details = my_friend_map.get_friend_details()
him = my_friend_details[friend.email()]
his_friend_details = his_friend_map.get_friend_details()
me = his_friend_details[user.email()]
him.sharesLocation = me.sharesLocation = True
him.relationVersion += 1
my_friend_map.save_friend_details(my_friend_details)
me.relationVersion += 1
his_friend_map.save_friend_details(his_friend_details)
my_friend_map.generation += 1
his_friend_map.generation += 1
logging.debug('debugging_branding ackRequestLocationSharing my_friend_map.gen %s my_friend_detail.relv %s', my_friend_map.generation, me.relationVersion)
logging.debug('debugging_branding ackRequestLocationSharing his_friend_map.gen %s his_friend_detail.relv %s', his_friend_map.generation, him.relationVersion)
to_put = [my_friend_map, his_friend_map]
his_helper = FriendHelper.from_data_store(users.User(him.email), him.type)
my_helper = FriendHelper.from_data_store(users.User(me.email), me.type)
to_put.extend(create_update_friend_requests(his_helper, friend, my_friend_map,
UpdateFriendRequestTO.STATUS_MODIFIED))
to_put.extend(create_update_friend_requests(my_helper, user, his_friend_map,
UpdateFriendRequestTO.STATUS_MODIFIED))
db.put(to_put)
update_web(my_helper, him, u'rogerthat.friend.ackRequestLocationSharing')
update_web(his_helper, me, u'rogerthat.friend.ackRequestLocationSharingUpdate')
deferred.defer(_get_friend_location, user, friend, _transactional=True)
run_in_transaction(trans, True)
@returns(NoneType)
@arguments(user=users.User, service_identity_user=users.User, recipient_user=users.User)
def share_service_identity(user, service_identity_user, recipient_user):
from rogerthat.bizz import log_analysis
if not is_clean_app_user_email(recipient_user):
logging.warn('Unclean recipient email address in share svc - invitor %s - recipient %s - svcid %s' %
(user, recipient_user, service_identity_user))
return
user_profile, service_identity, recipient_profile = get_profile_infos([user, service_identity_user, recipient_user], allow_none_in_results=True,
expected_types=[UserProfile, ServiceIdentity, UserProfile])
azzert(user_profile)
azzert(service_identity)
azzert(service_identity.shareEnabled)
if not recipient_profile:
sid = ServiceInteractionDef.get(service_identity.shareSIDKey)
share_url = ServiceInteractionDefTO.emailUrl(sid)
deferred.defer(_send_recommendation_email, user_profile.language, user_profile.name, get_human_user_from_app_user(user).email(),
get_human_user_from_app_user(recipient_user).email(), service_identity.name, share_url, get_app_id_from_app_user(user))
slog(msg_="Recommend via email", function_=log_analysis.SERVICE_STATS, service=service_identity_user.email(
), tag=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_EMAIL, type_=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_EMAIL)
return
# Share service via Rogerthat
if get_friend_serviceidentity_connection(recipient_user, service_identity_user):
logging.info("Recipient (%s) is already connected to the recommended service (%s)", recipient_user.email(),
service_identity_user.email())
slog(msg_="Recommend via rogerthat (2)", function_=log_analysis.SERVICE_STATS,
service=service_identity_user.email(), tag=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_ROGERTHAT,
type_=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_ROGERTHAT)
else:
deferred.defer(_send_recommendation_message, recipient_profile.language, user, user_profile.name, service_identity_user,
service_identity, recipient_user)
slog(msg_="Recommend via rogerthat", function_=log_analysis.SERVICE_STATS, service=service_identity_user.email(
), tag=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_ROGERTHAT, type_=log_analysis.SERVICE_STATS_TYPE_RECOMMEND_VIA_ROGERTHAT)
@arguments(language=unicode, from_name=unicode, from_email=unicode, to_email=unicode, service_name=unicode, share_url=unicode, app_id=unicode)
def _send_recommendation_email(language, from_name, from_email, to_email, service_name, share_url, app_id=App.APP_ID_ROGERTHAT):
app = get_app_by_id(app_id)
subject = localize(language, "%(user_name)s recommends you to connect to %(service_name)s on %(app_name)s.",
user_name=from_name, service_name=service_name, app_name=app.name)
variables = dict(consts=consts, from_name=from_name, share_url=share_url, service_name=service_name, app_id=app_id,
app_name=app.name)
body = render("recommend_service_email", [language], variables)
html = render("recommend_service_email_html", [language], variables)
if app.is_default:
dashboard_email_address = get_server_settings().senderEmail
else:
dashboard_email_address = ("%s <%s>" % (app.name, app.dashboard_email_address))
send_mail(dashboard_email_address, to_email, subject, body, html=html)
@arguments(language=unicode, from_=users.User, from_name=unicode, service_identity_user=users.User,
service_identity=ServiceIdentity, to=users.User)
def _send_recommendation_message(language, from_, from_name, service_identity_user, service_identity, to):
app_name = get_app_name_by_id(get_app_id_from_app_user(from_))
m = localize(language, "%(user_name)s recommends you to connect to %(service_name)s on %(app_name)s.",
user_name=from_name, app_name=app_name, service_name=service_identity.name)
m += "\n\n%(about)s %(service_name)s:\n%(service_description)s" % {
'about': localize(language, 'About'),
'service_name': service_identity.name,
'service_description': service_identity.description
}
message = sendMessage(MC_DASHBOARD, [UserMemberTO(to)], Message.FLAG_AUTO_LOCK, 0, None, m,
create_accept_decline_buttons(language), None, get_app_by_user(to).core_branding_hash,
FRIEND_SHARE_SERVICE_REQUEST, is_mfr=False)
message.invitor = from_
message.invitee = to
message.recommended_service = service_identity_user
db.put(message)
@returns(NoneType)
@arguments(message=Message)
def ack_share_service(message):
azzert(message.tag == FRIEND_SHARE_SERVICE_REQUEST)
service_identity_user = add_slash_default(message.recommended_service) # for old recommendation before migration
invitee = message.invitee
# If the invitee accepts the recommendation, then we somehow reverse roles, and the invitee invites the service
# This means that the invitee is now an invitor
if message.get_member_statuses()[message.members.index(invitee)].button_index == message.get_button_by_id(ACCEPT_ID).index:
invite(invitee, service_identity_user.email(), None, None,
None, ORIGIN_USER_RECOMMENDED, get_app_id_from_app_user(invitee))
@mapping('com.mobicage.capi.friends.update_friend_response')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=UpdateFriendResponseTO)
def update_friend_response(context, result):
if result is None or (result.updated is False and hasattr(context, 'update_status')):
logging.warn("The updateFriend call was not processed by the phone.\nReason: %s", result and result.reason)
@mapping('com.mobicage.capi.friends.update_friend_set_response')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=UpdateFriendSetResponseTO)
def update_friend_set_response(context, result):
if not result:
logging.warn("The updateFriendSet call return NULL")
elif result.updated is False:
logging.warn("The updateFriendSet call was not processed by the phone.\nReason: %s", result.reason)
@mapping('com.mobicage.capi.friends.became_friends_response')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=BecameFriendsResponseTO)
def became_friends_response(context, result):
pass
@mapping('com.mobicage.capi.friends.update_groups_response')
@returns(NoneType)
@arguments(context=RpcCAPICall, result=UpdateGroupsResponseTO)
def update_groups_response(context, result):
pass
@returns(NoneType)
@arguments(user1=users.User, user2=users.User, current_mobile=Mobile)
def breakFriendShip(user1, user2, current_mobile=None):
""" Break friendship between invitor and invitee. They can be both human users, or one can be a service_identity_user
Although we are in the bizz layer, it is possible that the /+default+ suffix is not included! """
from rogerthat.bizz.profile import schedule_re_index
from rogerthat.bizz import log_analysis
def removeShareLocationAccess(user, friend):
userLocation = get_user_location(user)
if userLocation.members and friend in userLocation.members:
if friend in userLocation.members:
userLocation.members.remove(friend)
userLocation.put()
def updateWeb(from_, helper, friend_detail):
# Send update request over channel API
if get_app_id_from_app_user(from_) == App.APP_ID_ROGERTHAT:
friend_dict = FriendTO.fromDBFriendDetail(helper, friend_detail).to_dict()
channel.send_message(from_, u'rogerthat.friend.breakFriendShip', friend=friend_dict)
to_put = []
def run(from_, to, initiator, from_is_service_identity, to_is_service_identity, current_mobile):
from rogerthat.bizz.roles import ROLES
to = remove_slash_default(to)
email = to.email() # possibly with ":<app id>" or "/<service identifier>"
if not from_is_service_identity:
removeShareLocationAccess(from_, to)
friendMap = get_friends_map(from_)
friend_map_updated = False
if to in friendMap.friends:
friendMap.friends.remove(to)
friend_map_updated = True
friend_details = friendMap.get_friend_details()
if email in friend_details:
friendDetail = friend_details[email]
user_profile = get_user_profile(from_)
if friendDetail.type == FriendDetailTO.TYPE_SERVICE and friendDetail.hasUserData:
user_data = UserData.get(UserData.createKey(from_, add_slash_default(to)))
if user_data:
if user_data.data:
from rogerthat.bizz.service import get_update_userdata_requests
user_data_dict = json.loads(user_data.data)
data_object = {k: None for k in user_data_dict.iterkeys()}
mobiles = db.get([get_mobile_key_by_account(m.account) for m in user_profile.get_mobiles().values()])
rpcs = get_update_userdata_requests(mobiles, from_, to, data_object, data_object.keys())
to_put.extend(rpcs)
user_data.delete()
if to_is_service_identity:
# revoke all service roles for this service identity
service_identity_user = add_slash_default(to)
roles = user_profile.grants.get(service_identity_user.email(), list())
service_roles = [r for r in roles if r not in ROLES]
if service_roles:
for sr in service_roles:
logging.debug("Revoking role %s of %s for %s", sr, service_identity_user, from_)
user_profile.revoke_role(service_identity_user, sr)
user_profile.put()
from rogerthat.bizz.roles import _send_service_role_grants_updates
on_trans_committed(_send_service_role_grants_updates,
get_service_user_from_service_identity_user(service_identity_user))
del friend_details[email]
friend_map_updated = True
def side_effects():
from rogerthat.bizz.job.update_friends import convert_friend, do_update_friend_request
status = UpdateFriendRequestTO.STATUS_DELETE
helper = FriendHelper.from_data_store(users.User(friendDetail.email), friendDetail.type)
def _create_friend_to():
conversion_args = {'existence': FriendTO.FRIEND_EXISTENCE_DELETED}
return convert_friend(helper, from_, friendDetail, status, conversion_args)
yield lambda: updateWeb(from_, helper, friendDetail)
yield lambda: db.put(do_update_friend_request(from_, _create_friend_to(), status, friendMap, helper))
if not to_is_service_identity:
found_member_in_groups = False
for g in Group.gql("WHERE ANCESTOR IS :1 and members = :2", parent_key(from_), to.email()):
found_member_in_groups = True
g.members.remove(to.email())
if len(g.members) == 0:
g.delete()
else:
g.put()
if found_member_in_groups:
extra_kwargs = dict()
if current_mobile is not None and from_ == initiator:
extra_kwargs[SKIP_ACCOUNTS] = [current_mobile.account]
yield lambda: updateGroups(update_groups_response, logError, from_,
request=UpdateGroupsRequestTO(), **extra_kwargs)
yield lambda: schedule_re_index(from_)
else:
def side_effects():
yield lambda: schedule_re_index(from_)
if friend_map_updated:
friendMap.save_friend_details(friend_details)
friendMap.generation += 1
friendMap.version += 1 # version of the set of friend e-mails
friendMap.put()
logging.debug('debugging_branding breakFriendShip friend_map.gen %s friend_map.ver %s', friendMap.generation, friendMap.version)
else:
# from_ is service identity user
# to is human
from_ = add_slash_default(from_)
fsic = db.get(FriendServiceIdentityConnection.createKey(friend_user=to, service_identity_user=from_))
if fsic:
on_trans_committed(slog, msg_="Service user lost", function_=log_analysis.SERVICE_STATS,
service=from_.email(), tag=to.email(), type_=log_analysis.SERVICE_STATS_TYPE_LOST)
db.delete(fsic)
clear_service_inbox.schedule(from_, to)
def side_effects():
if to != initiator:
return
up = get_user_profile(to)
if not up:
return
from rogerthat.service.api.friends import broke_friendship
svc_user, identifier = get_service_identity_tuple(from_)
svc_profile = get_service_profile(svc_user)
yield lambda: broke_friendship(broke_friendship_response_receiver, logServiceError, svc_profile,
email=get_human_user_from_app_user(to).email(),
service_identity=identifier,
user_details=[UserDetailsTO.fromUserProfile(up)])
if to_put:
db.put(to_put)
return side_effects()
# are_service_identity_users barfs if there is no ProfileInfo for a certain user.
# a ProfileInfo for user2 will definitely exist:
# - if user1 uses the Rogerthat App or is a ServiceIdentity (user1 does not contain ':')
# - if user2 contains ':' or '/'
if ':' not in user1.email() or ':' in user2.email() or '/' in user2.email():
try:
user1_is_svc_identity, user2_is_svc_identity = are_service_identity_users([user1, user2])
except AssertionError:
logging.debug('Ignoring breakFriendship request. One of the users does not exist.', exc_info=1)
return
else:
# there is a ':' in user1, and no ':' in user2, and no '/' in user2
user1_is_svc_identity = False
user2_is_svc_identity = is_service_identity_user(user2)
if not user2_is_svc_identity:
# user2 should be appended with the app_id of user1
user2 = create_app_user(user2, get_app_id_from_app_user(user1))
logging.debug("user1 (%s) is a %s", user1, 'ServiceIdentity' if user1_is_svc_identity else 'UserProfile')
logging.debug("user2 (%s) is a %s", user2, 'ServiceIdentity' if user2_is_svc_identity else 'UserProfile')
# every db.run_in_transaction will return a list of side effects
# itertools.chain appends 2 generators to each other
# runeach executes every returned side effect of the 2 transactions
xg_on = db.create_transaction_options(xg=True)
runeach(itertools.chain(db.run_in_transaction_options(xg_on, run, user1, user2, user1, user1_is_svc_identity, user2_is_svc_identity, current_mobile),
db.run_in_transaction_options(xg_on, run, user2, user1, user1, user2_is_svc_identity, user1_is_svc_identity, current_mobile)))
@returns(NoneType)
@arguments(user=users.User, enabled=bool)
def setShareContacts(user, enabled):
friendMap = get_friends_map(user)
friendMap.shareContacts = enabled
friendMap.put()
to_put = []
for friendMap in get_friends_friends_maps(user):
friend_details = friendMap.get_friend_details()
if user.email() in friend_details:
friend_details[user.email()].sharesContacts = enabled
friendMap.save_friend_details(friend_details)
to_put.append(friendMap)
else:
logging.error("friendMap of %s is inconsistent" % friendMap.me())
if to_put:
db.put(to_put)
@mapping(u'friend.invite_result.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=NoneType)
def invite_result_response_receiver(context, result):
capi_call = context
if getattr(capi_call, "poke", False):
from rogerthat.bizz.service import poke_service_with_tag
context = getattr(capi_call, "context", None)
message_flow_run_id = getattr(capi_call, "message_flow_run_id", None)
poke_service_with_tag(capi_call.invitor, capi_call.service_identity_user, capi_call.poke_tag, context,
message_flow_run_id, now())
@mapping(u'friend.broke_up.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=NoneType)
def broke_friendship_response_receiver(context, result):
pass
@mapping(u'friend.update.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=NoneType)
def friend_update_response_receiver(context, result):
pass
@mapping(u'friend.is_in_roles.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=[(int, long)])
def is_in_roles_response_receiver(context, result):
from rogerthat.bizz.job.update_friends import schedule_update_a_friend_of_a_service_identity_user
from rogerthat.bizz.roles import _send_service_role_grants_updates
service_user = context.service_user
service_identity_user = context.service_identity_user
app_user = context.human_user
role_ids = context.role_ids
def trans():
user_profile = get_user_profile(app_user, cached=False)
granted = False
revoked = False
for role_id in role_ids:
if role_id in result:
logging.debug('Granting role %s', role_id)
user_profile.grant_role(service_identity_user, role_id)
granted = True
elif user_profile.has_role(service_identity_user, role_id):
logging.debug('Revoking role %s', role_id)
user_profile.revoke_role(service_identity_user, role_id, skip_warning=True)
revoked = True
if granted or revoked:
user_profile.put()
on_trans_committed(_send_service_role_grants_updates, service_user)
if granted:
on_trans_committed(schedule_update_a_friend_of_a_service_identity_user, service_identity_user,
app_user, force=True)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
@mapping(u'friend.invited.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=unicode)
def invited_response_receiver(context, result):
capi_call = context
user_data = None
if result and result.startswith('{'):
try:
json.loads(result)
except:
logging.info("Received weird response in invited_response_receiver: %s" % result)
result = DECLINE_ID
else:
user_data = result
result = ACCEPT_ID
if result != ACCEPT_ID:
return # declined
invitor = capi_call.invitor
tag = capi_call.servicetag
invitee = capi_call.invitee
origin = capi_call.origin
service_user = capi_call.service_user
service_identity_user = capi_call.service_identity_user
service_identifier = capi_call.service_identifier
context = getattr(capi_call, "context", None)
message_flow_run_id = getattr(capi_call, "message_flow_run_id", None)
poke = getattr(capi_call, "poke", False)
allow_unsupported_apps = getattr(capi_call, 'allow_unsupported_apps', False)
process_invited_response(service_user, service_identity_user, service_identifier, invitor, invitee, tag, origin,
poke, context, message_flow_run_id, user_data, allow_unsupported_apps)
@arguments(service_user=users.User, service_identity_user=users.User, service_identifier=unicode, invitor=users.User,
invitee=users.User, tag=unicode, origin=unicode, poke=bool, context=unicode, message_flow_run_id=unicode,
user_data=unicode, allow_unsupported_apps=bool)
def process_invited_response(service_user, service_identity_user, service_identifier, invitor, invitee, tag, origin,
poke, context, message_flow_run_id, user_data=None, allow_unsupported_apps=False):
from rogerthat.service.api.friends import invite_result
makeFriends(invitor, invitee, invitee, None, notify_invitor=False, origin=origin, user_data=user_data,
allow_unsupported_apps=allow_unsupported_apps)
user_details = [UserDetailsTO.fromUserProfile(get_user_profile(invitor))]
def trans():
invite_result_capi_call = invite_result(invite_result_response_receiver, logServiceError,
get_service_profile(service_user), email=get_human_user_from_app_user(invitor).email(),
result=ESTABLISHED, tag=tag, origin=origin,
service_identity=service_identifier, user_details=user_details,
DO_NOT_SAVE_RPCCALL_OBJECTS=True)
if poke:
if not invite_result_capi_call: # None if friend.invite_result is not implemented
return "poke_now"
else:
invite_result_capi_call.poke = True
invite_result_capi_call.poke_tag = tag
invite_result_capi_call.invitor = invitor
invite_result_capi_call.service = invitee
invite_result_capi_call.context = context
invite_result_capi_call.message_flow_run_id = message_flow_run_id
invite_result_capi_call.put()
result = db.run_in_transaction(trans)
if result == "poke_now":
try_or_defer(_poke_service_directly, invitor, tag, context, message_flow_run_id, service_identity_user, now())
@cached(1, memcache=False)
@returns(unicode)
@arguments(user=users.User)
def userCode(user):
server_settings = get_server_settings()
c = base64.b64decode(server_settings.userCodeCipher.encode("utf8"))
user_hash = sha256(c % remove_slash_default(user).email())
return user_code_by_hash(user_hash)
@returns(unicode)
@arguments(user_hash=unicode)
def user_code_by_hash(user_hash):
return unicode(ed(user_hash))
@returns(ProfileInfo)
@arguments(code=unicode)
def get_profile_info_via_user_code(code):
pp = ProfilePointer.get(code)
if pp:
return get_profile_info(pp.user)
return None
@returns(UserProfile)
@arguments(code=unicode)
def get_user_profile_via_user_code(code):
pp = ProfilePointer.get(code)
if pp:
return get_user_profile(pp.user)
return None
@returns(ServiceProfile)
@arguments(code=unicode)
def get_service_profile_via_user_code(code):
pp = ProfilePointer.get(code)
if pp:
return get_service_profile(pp.user)
return None
@returns(unicode)
@arguments(code=unicode)
def get_user_invite_url(code):
pp = ProfilePointer.get(code)
if not pp or not pp.short_url_id:
return None
return '%s/M/%s' % (get_server_settings().baseUrl, base38.encode_int(pp.short_url_id))
@returns(tuple)
@arguments(code=unicode)
def get_user_and_qr_code_url(code):
pp = ProfilePointer.get(code)
if not pp or not pp.short_url_id:
return None
url = '%s/S/%s' % (get_server_settings().baseUrl, base38.encode_int(pp.short_url_id))
return url, pp.user
@returns(ServiceFriendStatusTO)
@arguments(service_identity_user=users.User, app_user=users.User)
def get_service_friend_status(service_identity_user, app_user):
human_user, app_id = get_app_user_tuple(app_user)
fsic, friend_profile = db.get([FriendServiceIdentityConnection.createKey(app_user, service_identity_user),
UserProfile.createKey(app_user)])
app = get_app_by_id(app_id)
is_friend = fsic is not None
if is_friend:
last_heartbeat = memcache.get("last_user_heart_beat_%s" % app_user.email()) or 0 # @UndefinedVariable
else:
last_heartbeat = 0
result = ServiceFriendStatusTO()
result.app_id = app_id
result.app_name = app.name
result.devices = list()
result.email = human_user.email()
result.is_friend = is_friend
result.last_heartbeat = last_heartbeat
result.deactivated = False
if friend_profile:
result.avatar = friend_profile.avatarUrl
result.language = friend_profile.language
result.name = friend_profile.name
if friend_profile.get_mobiles():
for m in friend_profile.get_mobiles().values():
result.devices.append(Mobile.typeAsString(m.type_))
else:
result.name = None
result.avatar = None
result.language = None
return result
@returns([unicode])
@arguments(app_user=users.User)
def create_friend_invitation_secrets(app_user):
def trans():
invitations = [UserInvitationSecret(parent=parent_key(app_user), status=UserInvitationSecret.STATUS_CREATED,
creation_timestamp=now(), origin=ORIGIN_USER_INVITE) for _ in xrange(20)]
aysnc_puts = [db.put_async(i) for i in invitations]
foreach(lambda i: i.get_result(), aysnc_puts)
return [i.secret for i in invitations]
return db.run_in_transaction(trans)
@returns(NoneType)
@arguments(message=Message)
def ack_invitation_by_secret_failed(message):
azzert(message.tag == FRIEND_ACCEPT_FAILED)
invitor = message.invitor
invitee = message.invitee
if message.get_member_statuses()[message.members.index(invitor)].button_index == message.get_button_by_id(INVITE_ID).index:
invite(invitor, invitee.email(), None, None,
message.service_tag, origin=message.origin, app_id=get_app_id_from_app_user(invitor))
logging.info("Mr %s accepted offer" % invitor.email())
else:
logging.info("Mr %s did not accept offer" % invitor.email())
def _get_friend_location(user, friend):
get_friend_location(user, friend, GetLocationRequestTO.TARGET_MOBILE_FIRST_REQUEST_AFTER_GRANT)
# TODO: only for rogerthat user?
def _notify_users(invitor_profile_info, invitor, invitee_profile_info, invitee, notify_invitee, notify_invitor, notify_friends):
def trans():
if notify_invitor and not invitor_profile_info.isServiceIdentity:
deferred.defer(_notify_invitor, invitor, invitor_profile_info,
invitee, invitee_profile_info, _transactional=True)
if notify_invitee and not invitee_profile_info.isServiceIdentity:
deferred.defer(_notify_invitee, invitee, invitee_profile_info, invitor_profile_info, _transactional=True)
for notify_friend in notify_friends:
if notify_friend:
deferred.defer(_notify_friends, *notify_friend, _transactional=True)
db.run_in_transaction(trans)
def _notify_friends(from_, to, friendMap, friendDetail):
# from and to are human
if not friendMap.shareContacts:
return
# create request
request = BecameFriendsRequestTO()
request.user = remove_app_id(from_).email()
request.friend = FriendRelationTO.fromDBFriendDetail(friendDetail)
request.friend.email = userCode(to) # certainly human user
# assemble recipients
friends = filter(lambda f: f != to, friendMap.friends)
# send
if friends:
_notify_friend_mobiles(friends, request)
slog('T', request.user, "com.mobicage.capi.friends.becameFriends", friend=request.friend.email)
slog('T', request.friend.email, "com.mobicage.capi.friends.becameFriends", friend=request.user)
def _notify_friend_mobiles(friends, request):
def trans():
if len(friends) <= 2:
becameFriends(became_friends_response, logError, friends, request=request)
else:
becameFriends(became_friends_response, logError, friends[:2], request=request)
deferred.defer(_notify_friend_mobiles, friends[2:], request, _transactional=True)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
def _notify_invitor(invitor_user, invitor_profile_info, invitee, invitee_profile_info):
xg_on = db.create_transaction_options(xg=True)
msg = localize(invitor_profile_info.language, "_friendship_accepted", name=invitor_profile_info.name,
friend_name=_get_full_name(invitee))
def trans():
dashboardNotification(invitor_user, msg)
db.run_in_transaction_options(xg_on, trans)
def _notify_invitee(invitee_user, invitee_profile_info, invitor_profile_info):
xg_on = db.create_transaction_options(xg=True)
msg = localize(invitee_profile_info.language, "_friendship_established",
name=invitee_profile_info.name, friend_name=invitor_profile_info.name)
def trans():
dashboardNotification(invitee_user, msg)
db.run_in_transaction_options(xg_on, trans)
def _validate_invitation(invitee_email, invitor_user, servicetag, app_id, allow_unsupported_apps=False):
from rogerthat.bizz.service import UnsupportedAppIdException
if not invitee_email:
raise InvalidEmailAddressException()
if "@" in invitee_email:
invitee_user = users.User(invitee_email)
invitee_profile_info = get_profile_info(invitee_user, skip_warning=True)
if not (invitee_profile_info and invitee_profile_info.isServiceIdentity):
invitee_user = create_app_user(invitee_user, app_id)
else:
# invitee_email is in fact a hashed_email of a human user
invitee_email = invitee_email.split('?')[0]
pp = ProfilePointer.get_by_key_name(invitee_email)
if not pp:
raise UserNotFoundViaUserCode(invitee_email)
invitee_user = pp.user
invitee_email = get_human_user_from_app_user(invitee_user).email()
if get_app_id_from_app_user(invitee_user) != app_id:
logging.debug("Expected app_id of invitee to be %s, but was %s",
app_id, get_app_id_from_app_user(invitee_user))
raise UnsupportedAppIdException(app_id)
invitee_profile_info = get_profile_info(invitee_user)
invitor_profile_info = get_profile_info(invitor_user)
if invitor_profile_info.isServiceIdentity:
if not (invitee_profile_info and invitee_profile_info.isServiceIdentity):
if get_app_id_from_app_user(invitee_user) not in invitor_profile_info.appIds:
logging.debug("Expected app_id of invitee to be in [%s], but was %s",
', '.join(invitor_profile_info.appIds), get_app_id_from_app_user(invitee_user))
raise UnsupportedAppIdException(app_id)
# what if invitee is a service identity?
if get_friend_serviceidentity_connection(friend_user=invitee_user, service_identity_user=invitor_user):
raise CanNotInviteFriendException()
else:
if not (invitee_profile_info and invitee_profile_info.isServiceIdentity):
if get_app_id_from_app_user(invitee_user) != get_app_id_from_app_user(invitor_user):
logging.debug("Expected app_id of invitee to be %s, but was %s",
get_app_id_from_app_user(invitor_user), get_app_id_from_app_user(invitee_user))
raise UnsupportedAppIdException(app_id)
else:
if get_app_id_from_app_user(invitor_user) not in invitee_profile_info.appIds:
logging.debug("Expected app_id of invitor to be in [%s], but was %s",
', '.join(invitee_profile_info.appIds), get_app_id_from_app_user(invitor_user))
if not allow_unsupported_apps:
raise UnsupportedAppIdException(app_id)
friend_map = get_friends_map(invitor_user)
if invitee_user in friend_map.friends:
invitee_profile_info = get_profile_info(invitee_user)
msg = localize(invitor_profile_info.language, "_invitee_was_already_friend", name=invitor_profile_info.name,
friend_email=invitee_profile_info.qualifiedIdentifier or invitee_email)
dashboardNotification(invitor_user, msg)
raise CanNotInviteFriendException()
if invitor_user == invitee_user:
if not invitor_profile_info.isServiceIdentity:
msg = localize(invitor_profile_info.language, "_invited_yourself", name=invitor_profile_info.name)
dashboardNotification(invitor_user, msg)
raise CannotSelfInviteException()
now_ = now()
invitation_history = get_friend_invitation_history(invitor_user, invitee_user)
if invitation_history:
app_name = get_app_name_by_id(app_id)
if len(invitation_history.inviteTimestamps) >= 3:
if not invitor_profile_info.isServiceIdentity:
msg = localize(invitor_profile_info.language, "_invited_too_often", name=invitor_profile_info.name,
friend_email=invitee_email, app_name=app_name)
dashboardNotification(invitor_user, msg)
raise PersonInvitationOverloadException()
last_week = now_ - WEEK
if any((ts > last_week for ts in invitation_history.inviteTimestamps)):
server_settings = get_server_settings()
if invitee_email not in server_settings.supportWorkers:
if not invitor_profile_info.isServiceIdentity:
msg = localize(invitor_profile_info.language, "_invited_too_often_this_week",
name=invitor_profile_info.name, friend_email=invitee_email, app_name=app_name)
dashboardNotification(invitor_user, msg)
raise PersonAlreadyInvitedThisWeekException()
else:
invitation_history = FriendInvitationHistory.create(invitor_user, invitee_user)
invitation_history.inviteTimestamps = list()
invitation_history.tag = servicetag
hasher = hashlib.sha256()
hasher.update(str(uuid.uuid4()))
hasher.update(str(uuid.uuid4()))
hasher.update(str(uuid.uuid4()))
hasher.update(str(uuid.uuid4()))
invitation_history.lastAttemptKey = hasher.hexdigest()
return invitee_user, invitation_history, now_
def _send_invitation_email(language, email, invitor_user, invitee, invitation_history, now_, message, service_tag, origin):
if get_do_send_email_invitations(invitee):
raise DoesNotWantToBeInvitedViaEmail()
profile_info = get_profile_info(invitor_user, skip_warning=True)
if profile_info.isServiceIdentity:
invitor_user = add_slash_default(invitor_user)
timestamp = now()
uis = UserInvitationSecret(parent=parent_key_unsafe(remove_slash_default(invitor_user)),
status=UserInvitationSecret.STATUS_SENT, creation_timestamp=timestamp,
sent_timestamp=timestamp, email=invitee, service_tag=service_tag, origin=origin)
uis.put()
secret = uis.secret
short_url = get_user_invite_url(userCode(invitor_user))
_, app_id = get_app_user_tuple(invitee)
app = get_app_by_id(app_id)
variables = dict(profile=profile_info, short_url=short_url, secret=secret, message=message, app=app)
variables['consts'] = consts
if profile_info.isServiceIdentity:
service_profile = get_service_profile(profile_info.service_user)
variables['localized_organization_type'] = service_profile.localizedOrganizationType(language, app_id)
body = render("service_invite_email", [language], variables)
html = render("service_invite_email_html", [language], variables)
si = get_service_identity(invitor_user)
from_ = "%s <%s>" % (
si.name, si.qualifiedIdentifier if si.qualifiedIdentifier else get_service_user_from_service_identity_user(invitor_user).email())
subject = localize(language, "Discover our new app")
else:
body = render("invite_email", [language], variables)
html = render("invite_email_html", [language], variables)
from_user, _ = get_app_user_tuple(invitor_user)
from_ = from_user.email()
subject = localize(language, "%(name)s invites you to the %(app_name)s app!",
name=profile_info.name, app_name=app.name)
send_mail(from_, email, subject, body, html=html)
invitation_history.inviteTimestamps.append(now_)
invitation_history.put()
def _send_invitation_message(servicetag, message, name, user, invitee, invitation_history, now_, origin):
name = _get_full_name(user)
invitor_profile_info, invitee_user_profile = get_profile_infos(
[user, invitee], expected_types=[ProfileInfo, UserProfile])
app_name = get_app_name_by_id(get_app_id_from_app_user(invitee))
if invitor_profile_info.isServiceIdentity:
m = localize(invitee_user_profile.language,
"Service %(name)s wants to connect with you via %(app_name)s.", name=name, app_name=app_name)
else:
m = localize(invitee_user_profile.language,
"User %(name)s wants to get in touch with you via %(app_name)s.", name=name, app_name=app_name)
if message:
m += "\n\n" + localize(invitee_user_profile.language, "%(name_or_email)s added a personal note:\n\n%(message)s",
name_or_email=(invitor_profile_info.name or invitor_profile_info.user.email()), message=message)
logging.info('Sending invitation: %s', m)
message = sendMessage(MC_DASHBOARD, [UserMemberTO(invitee)], Message.FLAG_AUTO_LOCK, 0, None, m,
create_accept_decline_buttons(invitee_user_profile.language), None,
get_app_by_user(invitee).core_branding_hash, FRIEND_INVITATION_REQUEST, is_mfr=False)
message.invitor = user
message.invitee = invitee
message.origin = origin
if servicetag:
message.servicetag = servicetag
invitation_history.inviteTimestamps.append(now_)
db.put([message, invitation_history])
def _send_invitation_message_from_service_to_user(app_user, service_identity_user, message, brandingHash, language,
origin, tag):
m = sendMessage(service_identity_user, [UserMemberTO(app_user)], Message.FLAG_ALLOW_DISMISS, 0, None, message,
create_add_to_services_button(language), None, brandingHash, tag,
is_mfr=False, check_friends=False, allow_reserved_tag=True)
m.invitor = service_identity_user
m.invitee = app_user
m.origin = origin
m.put()
def _get_full_name(user):
profile_info = get_profile_info(user)
name = "%s (%s)" % (profile_info.name, profile_info.qualifiedIdentifier or remove_app_id(
remove_slash_default(user)).email())
return name
def _poke_service_directly(invitor, tag, context, message_flow_run_id, service_identity_user, timestamp):
from rogerthat.bizz.service import poke_service_with_tag
poke_service_with_tag(invitor, service_identity_user, tag, context, message_flow_run_id, timestamp)
@returns([GroupTO])
@arguments(app_user=users.User)
def getGroups(app_user):
return [GroupTO.from_model(g) for g in Group.gql("WHERE ANCESTOR IS :1", parent_key(app_user)).fetch(None)]
@returns(str)
@arguments(app_user=users.User, guid=unicode, name=unicode, members=[unicode], avatar=unicode, current_mobile=Mobile)
def putGroup(app_user, guid, name, members, avatar, current_mobile):
group = Group(key=Group.create_key(app_user, guid))
group.name = name
app_id = get_app_id_from_app_user(app_user)
if App.APP_ID_ROGERTHAT == app_id:
group.members = members
else:
group.members = []
for m in members:
group.members.append(create_app_user(users.User(m), app_id).email())
if avatar:
group.avatar = base64.b64decode(str(avatar))
digester = hashlib.sha256()
digester.update(group.avatar)
group.avatar_hash = digester.hexdigest().upper()
group.put()
extra_kwargs = dict()
if current_mobile is not None:
extra_kwargs[SKIP_ACCOUNTS] = [current_mobile.account]
updateGroups(update_groups_response, logError, app_user, request=UpdateGroupsRequestTO(), **extra_kwargs)
return group.avatar_hash
@returns(NoneType)
@arguments(app_user=users.User, guid=unicode, current_mobile=Mobile)
def deleteGroup(app_user, guid, current_mobile):
db.delete(Group.create_key(app_user, guid))
extra_kwargs = dict()
if current_mobile is not None:
extra_kwargs[SKIP_ACCOUNTS] = [current_mobile.account]
updateGroups(update_groups_response, logError, app_user, request=UpdateGroupsRequestTO(), **extra_kwargs)
@returns(unicode)
@arguments(avatar_hash=unicode, size=long)
def getGroupAvatar(avatar_hash, size):
group = Group.gql("WHERE avatar_hash = :1", avatar_hash).get()
if group:
picture = str(group.avatar)
img = images.Image(picture)
if img.width > size or img.height > size:
img.resize(size, size)
picture = img.execute_transforms(output_encoding=img.format)
return unicode(base64.b64encode(picture))
return None
@returns(FindFriendResponseTO)
@arguments(app_user=users.User, search_string=unicode, cursor_string=unicode, avatar_size=int)
def find_friend(app_user, search_string, cursor_string=None, avatar_size=50):
from rogerthat.bizz.profile import USER_INDEX
def get_name_sort_options():
sort_expr = search.SortExpression(expression='name', direction=search.SortExpression.ASCENDING)
return search.SortOptions(expressions=[sort_expr])
limit = 30
results = []
results_cursor = None
app_id = get_app_id_from_app_user(app_user)
the_index = search.Index(name=USER_INDEX)
try:
query_string = u"%s app_id:%s" % (normalize_search_string(search_string), app_id)
query = search.Query(query_string=query_string,
options=search.QueryOptions(returned_fields=['email'],
sort_options=get_name_sort_options(),
limit=limit - len(results),
cursor=search.Cursor(cursor_string)))
search_result = the_index.search(query)
results.extend(search_result.results)
results_cursor = search_result.cursor.web_safe_string if search_result.cursor else None
except:
logging.error('Search query error while searching friends for search_string "%s"', search_string, exc_info=True)
user_profiles = filter(lambda p: p is not None and isinstance(p, UserProfile),
db.get([get_profile_key(create_app_user_by_email(human_user_email, app_id))
for human_user_email in [result.fields[0].value for result in results]]))
r = FindFriendResponseTO()
r.error_string = None
r.items = list()
r.cursor = results_cursor
for up in user_profiles:
t = FindFriendItemTO()
t.name = up.name
t.email = get_human_user_from_app_user(up.user).email()
t.avatar_url = up.avatarUrl
r.items.append(t)
return r
@mapping(u'friend.register.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=(unicode, RegistrationResultTO))
def register_response_receiver(context, result):
pass
@mapping(u'friend.register_result.response_receiver')
@returns(NoneType)
@arguments(context=ServiceAPICallback, result=NoneType)
def register_result_response_receiver(context, result):
pass
| apache-2.0 | -2,872,275,547,037,006,000 | 47.949708 | 165 | 0.65562 | false |
mtils/ems | ems/qt4/itemmodel/merged.py | 1 | 15814 | '''
Created on 01.08.2011
@author: michi
'''
from PyQt4.QtCore import QModelIndex, Qt, pyqtSignal, QObject, QVariant
from PyQt4.QtGui import QAbstractProxyModel
from ems.qt4.itemmodel.reflectable_mixin import ReflectableMixin #@UnresolvedImport
from ems.qt4.util import variant_to_pyobject
class SignalTranslator(QObject):
dataChangedWithId = pyqtSignal(int, QModelIndex, QModelIndex)
modelResetWithId = pyqtSignal(int)
rowsInsertedWithId = pyqtSignal(int, QModelIndex, int, int)
rowsRemovedWithId = pyqtSignal(int, QModelIndex, int, int)
columnsInsertedWithId = pyqtSignal(int, QModelIndex, int, int)
columnsRemovedWithId = pyqtSignal(int, QModelIndex, int, int)
headerDataChangedWithId = pyqtSignal(int, int, int, int)
def __init__(self, modelId, sourceModel, parent=None):
QObject.__init__(self, parent)
self.sourceModel = sourceModel
self.modelId = modelId
self.sourceModel.dataChanged.connect(self.onDataChanged)
self.sourceModel.modelReset.connect(self.onModelReset)
self.sourceModel.rowsInserted.connect(self.onRowsInserted)
self.sourceModel.rowsRemoved.connect(self.onRowsRemoved)
self.sourceModel.columnsInserted.connect(self.onColumnsInserted)
self.sourceModel.columnsRemoved.connect(self.onColumnsRemoved)
self.sourceModel.headerDataChanged.connect(self.onHeaderDataChanged)
def onDataChanged(self, topLeft, bottomRight):
self.dataChangedWithId.emit(self.modelId, topLeft, bottomRight)
def onModelReset(self):
self.modelResetWithId.emit(self.modelId)
def onRowsInserted(self, index, start, end):
self.rowsInsertedWithId.emit(self.modelId, index, start, end)
def onRowsRemoved(self, index, start, end):
self.rowsRemovedWithId.emit(self.modelId, index, start, end)
def onColumnsInserted(self, index, start, end):
self.columnsInsertedWithId.emit(self.modelId, index, start, end)
def onColumnsRemoved(self, index, start, end):
self.columnsRemovedWithId.emit(self.modelId, index, start, end)
def onHeaderDataChanged(self, orientation, first, last):
self.headerDataChangedWithId.emit(self.modelId, orientation, first, last)
class MergedProxyModel(QAbstractProxyModel, ReflectableMixin):
#modelReset = pyqtSignal()
#layoutChanged = pyqtSignal()
#headerDataChanged = pyqtSignal(Qt.Orientation, int, int)
def __init__(self, parent):
super(MergedProxyModel, self).__init__(parent)
self._sourceModels = {}
self._sourceModelKeys = []
self._signalEmitters = {}
def index(self, row, column, parentIndex=QModelIndex()):
proxyIndex = self.createIndex(row, column, parentIndex)
return proxyIndex
#return self.sourceModel().index(row, column, parentIndex)
def createIndex(self, row, column, parentIndex=QModelIndex()):
proxyIndex = QAbstractProxyModel.createIndex(self, row, column, parentIndex)
proxyIndex.modelId = self.getModelIdOfProxyColumn(column)
return proxyIndex
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal:
modelId = self.getModelIdOfProxyColumn(section)
sourceSection = self.getSourceModelColumn(modelId, section)
return self._sourceModels[modelId].headerData(sourceSection,
orientation,
role)
return QAbstractProxyModel.headerData(self, section, orientation, role)
def getModelIdOfSourceColumn(self, col):
pass
def getModelIdOfProxyColumn_(self, col):
foundedCols = 0
lastModelId = self._sourceModelKeys[0]
for modelId in self._sourceModelKeys:
#print "modelId", modelId, 'has', self._sourceModels[modelId].columnCount(), 'cols'
foundedCols += self._sourceModels[modelId].columnCount()
if foundedCols > col:
return lastModelId
lastModelId = modelId
if foundedCols > col:
return lastModelId
return -1
def getModelIdOfProxyColumn(self, proxyCol):
col = 0
for modelIdKey in self._sourceModelKeys:
for sCol in range(self._sourceModels[modelIdKey].columnCount()):
if col == proxyCol:
return modelIdKey
col += 1
return -1
def getProxyModelColumn(self, modelId, sourceCol):
col = 0
for modelIdKey in self._sourceModelKeys:
for sCol in range(self._sourceModels[modelIdKey].columnCount()):
if (modelIdKey == modelId) and (sCol == sourceCol):
return col
col += 1
return -1
def getSourceModelColumn(self, modelId, proxyCol):
col = 0
for modelIdKey in self._sourceModelKeys:
for sCol in range(self._sourceModels[modelIdKey].columnCount()):
if (modelIdKey == modelId) and (col == proxyCol):
return sCol
col += 1
return -1
def addSourceModel(self, sourceModel):
modelId = hash(sourceModel)
self._signalEmitters[modelId] = SignalTranslator(modelId, sourceModel,
self)
self._signalEmitters[modelId].dataChangedWithId.connect(self.onDataChanged)
self._signalEmitters[modelId].modelResetWithId.connect(self.onModelReset)
self._signalEmitters[modelId].rowsInsertedWithId.connect(self.onRowsInserted)
self._signalEmitters[modelId].rowsRemovedWithId.connect(self.onRowsRemoved)
self._signalEmitters[modelId].columnsInsertedWithId.connect(self.onColumnsInserted)
self._signalEmitters[modelId].columnsRemovedWithId.connect(self.onColumnsRemoved)
self._signalEmitters[modelId].headerDataChangedWithId.connect(self.onHeaderDataChanged)
self._sourceModels[modelId] = sourceModel
self._sourceModelKeys.append(modelId)
def onDataChanged(self, modelId, topLeft, bottomRight):
self.dataChanged.emit(self.mapFromSource(topLeft), self.mapFromSource(bottomRight))
def onModelReset(self, modelId):
self.modelReset.emit()
def onRowsInserted(self, modelId, index, start, end):
self.rowsInserted.emit(index, start, end)
def onRowsRemoved(self, modelId, index, start, end):
self.rowsRemovedWithId.emit(self.modelId, index, start, end)
def onColumnsInserted(self, modelId, index, start, end):
self.columnsInsertedWithId.emit(self.modelId, index, start, end)
def onColumnsRemoved(self, modelId, index, start, end):
self.columnsRemovedWithId.emit(self.modelId, index, start, end)
def onHeaderDataChanged(self, modelId, orientation, first, last):
self.headerDataChangedWithId.emit(self.modelId, orientation, first, last)
def setSourceModel(self, sourceModel):
raise TypeError("Please use addSourceModel")
sourceModel.rowsAboutToBeInserted.connect(self.onSourceModelRowsInserted)
sourceModel.rowsAboutToBeRemoved.connect(self.onSourceModelRowsDeleted)
sourceModel.dataChanged.connect(self.onDataChanged)
sourceModel.modelReset.connect(self.modelReset)
sourceModel.layoutChanged.connect(self.layoutChanged)
sourceModel.headerDataChanged.connect(self.headerDataChanged)
return QAbstractProxyModel.setSourceModel(self, sourceModel)
def onSourceModelRowsInserted(self, parentIndex, start, end):
self.beginInsertRows(parentIndex, start, end)
self.endInsertRows()
def insertRows(self, row, count, parentIndex=QModelIndex()):
return self.sourceModel().insertRows(row, count, parentIndex)
def onSourceModelRowsDeleted(self, parentIndex, start, end):
self.beginRemoveRows(parentIndex, start, end)
self.endRemoveRows()
def removeRows(self, row, count, parentIndex=QModelIndex()):
return self.sourceModel().removeRows(row, count, parentIndex)
def parent(self, index):
return QModelIndex()
def flags(self, index):
modelId = self.getModelIdOfProxyColumn(index.column())
#sourceIndex = self.mapToSource(index)
#print "flags", modelId, sourceIndex.row(), sourceIndex.column()
return self._sourceModels[modelId].flags(self.mapToSource(index))
def mapFromSource(self, sourceIndex):
if not sourceIndex.isValid():
return QModelIndex()
#modelId = self.getModelIdOfProxyColumn(sourceIndex.column())
modelId = hash(sourceIndex.model())
proxyModelColumn = self.getProxyModelColumn(modelId, sourceIndex.column())
# print "mapFromSource: Column", sourceIndex.column(), 'is', proxyModelColumn
return self.index(sourceIndex.row(), proxyModelColumn)
def mapToSource(self, proxyIndex):
if not proxyIndex.isValid():
return QModelIndex()
modelId = self.getModelIdOfProxyColumn(proxyIndex.column())
sourceModelColumn = self.getSourceModelColumn(modelId, proxyIndex.column())
return self._sourceModels[modelId].index(proxyIndex.row(), sourceModelColumn)
def rowCount(self, parentIndex=QModelIndex()):
rows = 1000000
for modelId in self._sourceModelKeys:
rows = min(self._sourceModels[modelId].rowCount(), rows)
return rows
def columnCount(self, parentIndex=QModelIndex()):
cols = 0
for modelId in self._sourceModelKeys:
cols += self._sourceModels[modelId].columnCount()
return cols
def data(self, index, role=Qt.DisplayRole):
#data = QAbstractProxyModel.data(self, index, role)
#index = self.mapToSource(index)
#print "data role:",role
#print "data:", variant_to_pyobject(index.data())
return self.mapToSource(index).data(role)
def setData(self, index, value, role=Qt.EditRole):
return QAbstractProxyModel.setData(self, index, value, role)
def columnType(self, column):
modelId = self.getModelIdOfProxyColumn(column)
srcColumn = self.getSourceModelColumn(modelId, column)
return self._sourceModels[modelId].columnType(srcColumn)
def _getColumnOffsetOfModel(self, modelId):
columns = 0
for modelIdKey in self._sourceModelKeys:
if modelIdKey == modelId:
return columns
columns += self._sourceModels[modelIdKey].columnCount()
return columns
def nameOfColumn(self, column):
modelId = self.getModelIdOfProxyColumn(column)
srcColumn = self.getSourceModelColumn(modelId, column)
return self._sourceModels[modelId].nameOfColumn(srcColumn)
def columnsOfName(self, name):
columns = []
for modelId in self._sourceModelKeys:
try:
cols = self._sourceModels[modelId].columnOfName(name)
if cols != -1:
columns.append(cols + self._getColumnOffsetOfModel(modelId))
except Exception:
continue
return columns
def columnOfName(self, name):
columns = self.columnsOfName(name)
if len(columns):
return columns[0]
return -1
def childModel(self, index):
modelId = self.getModelIdOfProxyColumn(index.column())
srcColumn = self.getSourceModelColumn(modelId, index.column())
return self._sourceModels[modelId].childModel(srcColumn)
class MergedRowsProxyModel(MergedProxyModel):
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Vertical and role == Qt.DisplayRole:
modelId = self.getModelIdOfProxyRow(section)
sourceSection = self.getSourceModelRow(modelId, section)
original = self._sourceModels[modelId].headerData(sourceSection,
orientation,
role)
rowNum = variant_to_pyobject(original)
if isinstance(rowNum, int):
return QVariant(rowNum+self._getRowOffsetOfModel(modelId))
return original
return QAbstractProxyModel.headerData(self, section, orientation, role)
def rowCount(self, parentIndex=QModelIndex()):
rows = 0
for modelId in self._sourceModelKeys:
rows += self._sourceModels[modelId].rowCount()
return rows
def columnCount(self, parentIndex=QModelIndex()):
cols = 1000000
for modelId in self._sourceModelKeys:
cols = min(self._sourceModels[modelId].columnCount(), cols)
return cols
def getProxyModelRow(self, modelId, sourceRow):
row = 0
for modelIdKey in self._sourceModelKeys:
for sCol in range(self._sourceModels[modelIdKey].rowCount()):
if (modelIdKey == modelId) and (sCol == sourceRow):
return row
row += 1
return -1
def getModelIdOfProxyRow(self, proxyRow):
row = 0
for modelIdKey in self._sourceModelKeys:
for sRow in range(self._sourceModels[modelIdKey].rowCount()):
if row == proxyRow:
return modelIdKey
row += 1
return -1
def getSourceModelRow(self, modelId, proxyRow):
row = 0
for modelIdKey in self._sourceModelKeys:
for sRow in range(self._sourceModels[modelIdKey].rowCount()):
if (modelIdKey == modelId) and (row == proxyRow):
return sRow
row += 1
return -1
def mapToSource(self, proxyIndex):
if not proxyIndex.isValid():
return QModelIndex()
modelId = self.getModelIdOfProxyRow(proxyIndex.row())
sourceModelRow = self.getSourceModelRow(modelId, proxyIndex.row())
return self._sourceModels[modelId].index(sourceModelRow, proxyIndex.column())
def mapFromSource(self, sourceIndex):
if not sourceIndex.isValid() or sourceIndex.column() > self.columnCount():
return QModelIndex()
modelId = hash(sourceIndex.model())
proxyModelRow = self.getProxyModelRow(modelId, sourceIndex.column())
return self.index(proxyModelRow, sourceIndex.column())
def flags(self, index):
modelId = self.getModelIdOfProxyRow(index.row())
#sourceIndex = self.mapToSource(index)
#print "flags", modelId, sourceIndex.row(), sourceIndex.column()
return self._sourceModels[modelId].flags(self.mapToSource(index))
def _getRowOffsetOfModel(self, modelId):
rows = 0
for modelIdKey in self._sourceModelKeys:
if modelIdKey == modelId:
return rows
rows += self._sourceModels[modelIdKey].rowCount()
return rows
def rowOfName(self, name):
rows = self.rowsOfName(name)
if not len(rows):
return -1
return rows[0]
def rowsOfName(self, name):
rows = []
for modelId in self._sourceModelKeys:
try:
row = self._sourceModels[modelId].rowOfName(name)
if row != -1:
rows.append(row + self._getRowOffsetOfModel(modelId))
except Exception:
continue
return rows
| mit | -7,199,127,218,735,544,000 | 39.548718 | 95 | 0.640255 | false |
deepsrijit1105/edx-platform | lms/djangoapps/courseware/tests/test_video_handlers.py | 6 | 35856 | # -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
import os
import freezegun
import tempfile
import textwrap
import json
import ddt
from nose.plugins.attrib import attr
from datetime import timedelta, datetime
from webob import Request
from mock import MagicMock, Mock, patch
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import STUDENT_VIEW
from . import BaseTestXmodule
from .test_video_xml import SOURCE_XML
from contentserver.caching import del_cached_content
from xmodule.exceptions import NotFoundError
from xmodule.video_module.transcripts_utils import (
TranscriptException,
TranscriptsGenerationException,
)
TRANSCRIPT = {"start": [10], "end": [100], "text": ["Hi, welcome to Edx."]}
BUMPER_TRANSCRIPT = {"start": [1], "end": [10], "text": ["A bumper"]}
SRT_content = textwrap.dedent("""
0
00:00:00,12 --> 00:00:00,100
Привіт, edX вітає вас.
""")
def _create_srt_file(content=None):
"""
Create srt file in filesystem.
"""
content = content or SRT_content
srt_file = tempfile.NamedTemporaryFile(suffix=".srt")
srt_file.content_type = 'application/x-subrip; charset=utf-8'
srt_file.write(content)
srt_file.seek(0)
return srt_file
def _check_asset(location, asset_name):
"""
Check that asset with asset_name exists in assets.
"""
content_location = StaticContent.compute_location(
location.course_key, asset_name
)
try:
contentstore().find(content_location)
except NotFoundError:
return False
else:
return True
def _clear_assets(location):
"""
Clear all assets for location.
"""
store = contentstore()
assets, __ = store.get_all_content_for_course(location.course_key)
for asset in assets:
asset_location = asset['asset_key']
del_cached_content(asset_location)
store.delete(asset_location)
def _get_subs_id(filename):
basename = os.path.splitext(os.path.basename(filename))[0]
return basename.replace('subs_', '').replace('.srt', '')
def _create_file(content=''):
"""
Create temporary subs_somevalue.srt.sjson file.
"""
sjson_file = tempfile.NamedTemporaryFile(prefix="subs_", suffix=".srt.sjson")
sjson_file.content_type = 'application/json'
sjson_file.write(textwrap.dedent(content))
sjson_file.seek(0)
return sjson_file
def _upload_sjson_file(subs_file, location, default_filename='subs_{}.srt.sjson'):
filename = default_filename.format(_get_subs_id(subs_file.name))
_upload_file(subs_file, location, filename)
def _upload_file(subs_file, location, filename):
mime_type = subs_file.content_type
content_location = StaticContent.compute_location(
location.course_key, filename
)
content = StaticContent(content_location, filename, mime_type, subs_file.read())
contentstore().save(content)
del_cached_content(content.location)
def attach_sub(item, filename):
"""
Attach `en` transcript.
"""
item.sub = filename
def attach_bumper_transcript(item, filename, lang="en"):
"""
Attach bumper transcript.
"""
item.video_bumper["transcripts"][lang] = filename
@attr(shard=1)
class TestVideo(BaseTestXmodule):
"""Integration tests: web client + mongo."""
CATEGORY = "video"
DATA = SOURCE_XML
METADATA = {}
def test_handle_ajax_wrong_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
status_codes = {response.status_code for response in responses.values()}
self.assertEqual(status_codes.pop(), 404)
def test_handle_ajax(self):
data = [
{u'speed': 2.0},
{u'saved_video_position': "00:00:10"},
{u'transcript_language': 'uk'},
{u'bumper_do_not_show_again': True},
{u'bumper_last_view_date': True},
{u'demoo�': 'sample'}
]
for sample in data:
response = self.clients[self.users[0].username].post(
self.get_url('save_user_state'),
sample,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(self.item_descriptor.speed, None)
self.item_descriptor.handle_ajax('save_user_state', {'speed': json.dumps(2.0)})
self.assertEqual(self.item_descriptor.speed, 2.0)
self.assertEqual(self.item_descriptor.global_speed, 2.0)
self.assertEqual(self.item_descriptor.saved_video_position, timedelta(0))
self.item_descriptor.handle_ajax('save_user_state', {'saved_video_position': "00:00:10"})
self.assertEqual(self.item_descriptor.saved_video_position, timedelta(0, 10))
self.assertEqual(self.item_descriptor.transcript_language, 'en')
self.item_descriptor.handle_ajax('save_user_state', {'transcript_language': "uk"})
self.assertEqual(self.item_descriptor.transcript_language, 'uk')
self.assertEqual(self.item_descriptor.bumper_do_not_show_again, False)
self.item_descriptor.handle_ajax('save_user_state', {'bumper_do_not_show_again': True})
self.assertEqual(self.item_descriptor.bumper_do_not_show_again, True)
with freezegun.freeze_time(datetime.now()):
self.assertEqual(self.item_descriptor.bumper_last_view_date, None)
self.item_descriptor.handle_ajax('save_user_state', {'bumper_last_view_date': True})
self.assertEqual(self.item_descriptor.bumper_last_view_date, datetime.utcnow())
response = self.item_descriptor.handle_ajax('save_user_state', {u'demoo�': "sample"})
self.assertEqual(json.loads(response)['success'], True)
def tearDown(self):
_clear_assets(self.item_descriptor.location)
super(TestVideo, self).tearDown()
@attr(shard=1)
class TestTranscriptAvailableTranslationsDispatch(TestVideo):
"""
Test video handler that provide available translations info.
Tests for `available_translations` dispatch.
"""
srt_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
</video>
""".format(os.path.split(srt_file.name)[1])
MODEL_DATA = {
'data': DATA
}
def setUp(self):
super(TestTranscriptAvailableTranslationsDispatch, self).setUp()
self.item_descriptor.render(STUDENT_VIEW)
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
self.subs = {"start": [10], "end": [100], "text": ["Hi, welcome to Edx."]}
def test_available_translation_en(self):
good_sjson = _create_file(json.dumps(self.subs))
_upload_sjson_file(good_sjson, self.item_descriptor.location)
self.item.sub = _get_subs_id(good_sjson.name)
request = Request.blank('/available_translations')
response = self.item.transcript(request=request, dispatch='available_translations')
self.assertEqual(json.loads(response.body), ['en'])
def test_available_translation_non_en(self):
_upload_file(self.srt_file, self.item_descriptor.location, os.path.split(self.srt_file.name)[1])
request = Request.blank('/available_translations')
response = self.item.transcript(request=request, dispatch='available_translations')
self.assertEqual(json.loads(response.body), ['uk'])
def test_multiple_available_translations(self):
good_sjson = _create_file(json.dumps(self.subs))
# Upload english transcript.
_upload_sjson_file(good_sjson, self.item_descriptor.location)
# Upload non-english transcript.
_upload_file(self.srt_file, self.item_descriptor.location, os.path.split(self.srt_file.name)[1])
self.item.sub = _get_subs_id(good_sjson.name)
request = Request.blank('/available_translations')
response = self.item.transcript(request=request, dispatch='available_translations')
self.assertEqual(json.loads(response.body), ['en', 'uk'])
@attr(shard=1)
@ddt.ddt
class TestTranscriptAvailableTranslationsBumperDispatch(TestVideo):
"""
Test video handler that provide available translations info.
Tests for `available_translations_bumper` dispatch.
"""
srt_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
</video>
""".format(os.path.split(srt_file.name)[1])
MODEL_DATA = {
'data': DATA
}
def setUp(self):
super(TestTranscriptAvailableTranslationsBumperDispatch, self).setUp()
self.item_descriptor.render(STUDENT_VIEW)
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
self.dispatch = "available_translations/?is_bumper=1"
self.item.video_bumper = {"transcripts": {"en": ""}}
@ddt.data("en", "uk")
def test_available_translation_en_and_non_en(self, lang):
filename = os.path.split(self.srt_file.name)[1]
_upload_file(self.srt_file, self.item_descriptor.location, filename)
self.item.video_bumper["transcripts"][lang] = filename
request = Request.blank('/' + self.dispatch)
response = self.item.transcript(request=request, dispatch=self.dispatch)
self.assertEqual(json.loads(response.body), [lang])
def test_multiple_available_translations(self):
en_translation = _create_srt_file()
en_translation_filename = os.path.split(en_translation.name)[1]
uk_translation_filename = os.path.split(self.srt_file.name)[1]
# Upload english transcript.
_upload_file(en_translation, self.item_descriptor.location, en_translation_filename)
# Upload non-english transcript.
_upload_file(self.srt_file, self.item_descriptor.location, uk_translation_filename)
self.item.video_bumper["transcripts"]["en"] = en_translation_filename
self.item.video_bumper["transcripts"]["uk"] = uk_translation_filename
request = Request.blank('/' + self.dispatch)
response = self.item.transcript(request=request, dispatch=self.dispatch)
self.assertEqual(json.loads(response.body), ['en', 'uk'])
class TestTranscriptDownloadDispatch(TestVideo):
"""
Test video handler that provide translation transcripts.
Tests for `download` dispatch.
"""
DATA = """
<video show_captions="true"
display_name="A Name"
sub='OEoXaMPEzfM'
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA
}
def setUp(self):
super(TestTranscriptDownloadDispatch, self).setUp()
self.item_descriptor.render(STUDENT_VIEW)
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
def test_download_transcript_not_exist(self):
request = Request.blank('/download')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.status, '404 Not Found')
@patch('xmodule.video_module.VideoModule.get_transcript', return_value=('Subs!', 'test_filename.srt', 'application/x-subrip; charset=utf-8'))
def test_download_srt_exist(self, __):
request = Request.blank('/download')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.body, 'Subs!')
self.assertEqual(response.headers['Content-Type'], 'application/x-subrip; charset=utf-8')
self.assertEqual(response.headers['Content-Language'], 'en')
@patch('xmodule.video_module.VideoModule.get_transcript', return_value=('Subs!', 'txt', 'text/plain; charset=utf-8'))
def test_download_txt_exist(self, __):
self.item.transcript_format = 'txt'
request = Request.blank('/download')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.body, 'Subs!')
self.assertEqual(response.headers['Content-Type'], 'text/plain; charset=utf-8')
self.assertEqual(response.headers['Content-Language'], 'en')
def test_download_en_no_sub(self):
request = Request.blank('/download')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.status, '404 Not Found')
transcripts = self.item.get_transcripts_info()
with self.assertRaises(NotFoundError):
self.item.get_transcript(transcripts)
@patch('xmodule.video_module.VideoModule.get_transcript', return_value=('Subs!', u"塞.srt", 'application/x-subrip; charset=utf-8'))
def test_download_non_en_non_ascii_filename(self, __):
request = Request.blank('/download')
response = self.item.transcript(request=request, dispatch='download')
self.assertEqual(response.body, 'Subs!')
self.assertEqual(response.headers['Content-Type'], 'application/x-subrip; charset=utf-8')
self.assertEqual(response.headers['Content-Disposition'], 'attachment; filename="塞.srt"')
@attr(shard=1)
@ddt.ddt
class TestTranscriptTranslationGetDispatch(TestVideo):
"""
Test video handler that provide translation transcripts.
Tests for `translation` and `translation_bumper` dispatches.
"""
srt_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
</video>
""".format(os.path.split(srt_file.name)[1])
MODEL_DATA = {
'data': DATA
}
def setUp(self):
super(TestTranscriptTranslationGetDispatch, self).setUp()
self.item_descriptor.render(STUDENT_VIEW)
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
self.item.video_bumper = {"transcripts": {"en": ""}}
@ddt.data(
# No language
('/translation', 'translation', '400 Bad Request'),
# No videoId - HTML5 video with language that is not in available languages
('/translation/ru', 'translation/ru', '404 Not Found'),
# Language is not in available languages
('/translation/ru?videoId=12345', 'translation/ru', '404 Not Found'),
# Youtube_id is invalid or does not exist
('/translation/uk?videoId=9855256955511225', 'translation/uk', '404 Not Found'),
('/translation?is_bumper=1', 'translation', '400 Bad Request'),
('/translation/ru?is_bumper=1', 'translation/ru', '404 Not Found'),
('/translation/ru?videoId=12345&is_bumper=1', 'translation/ru', '404 Not Found'),
('/translation/uk?videoId=9855256955511225&is_bumper=1', 'translation/uk', '404 Not Found'),
)
@ddt.unpack
def test_translation_fails(self, url, dispatch, status_code):
request = Request.blank(url)
response = self.item.transcript(request=request, dispatch=dispatch)
self.assertEqual(response.status, status_code)
@ddt.data(
('translation/en?videoId={}', 'translation/en', attach_sub),
('translation/en?videoId={}&is_bumper=1', 'translation/en', attach_bumper_transcript))
@ddt.unpack
def test_translaton_en_youtube_success(self, url, dispatch, attach):
subs = {"start": [10], "end": [100], "text": ["Hi, welcome to Edx."]}
good_sjson = _create_file(json.dumps(subs))
_upload_sjson_file(good_sjson, self.item_descriptor.location)
subs_id = _get_subs_id(good_sjson.name)
attach(self.item, subs_id)
request = Request.blank(url.format(subs_id))
response = self.item.transcript(request=request, dispatch=dispatch)
self.assertDictEqual(json.loads(response.body), subs)
def test_translation_non_en_youtube_success(self):
subs = {
u'end': [100],
u'start': [12],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.srt_file.seek(0)
_upload_file(self.srt_file, self.item_descriptor.location, os.path.split(self.srt_file.name)[1])
subs_id = _get_subs_id(self.srt_file.name)
# youtube 1_0 request, will generate for all speeds for existing ids
self.item.youtube_id_1_0 = subs_id
self.item.youtube_id_0_75 = '0_75'
request = Request.blank('/translation/uk?videoId={}'.format(subs_id))
response = self.item.transcript(request=request, dispatch='translation/uk')
self.assertDictEqual(json.loads(response.body), subs)
# 0_75 subs are exist
request = Request.blank('/translation/uk?videoId={}'.format('0_75'))
response = self.item.transcript(request=request, dispatch='translation/uk')
calculated_0_75 = {
u'end': [75],
u'start': [9],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.assertDictEqual(json.loads(response.body), calculated_0_75)
# 1_5 will be generated from 1_0
self.item.youtube_id_1_5 = '1_5'
request = Request.blank('/translation/uk?videoId={}'.format('1_5'))
response = self.item.transcript(request=request, dispatch='translation/uk')
calculated_1_5 = {
u'end': [150],
u'start': [18],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.assertDictEqual(json.loads(response.body), calculated_1_5)
@ddt.data(
('translation/en', 'translation/en', attach_sub),
('translation/en?is_bumper=1', 'translation/en', attach_bumper_transcript))
@ddt.unpack
def test_translaton_en_html5_success(self, url, dispatch, attach):
good_sjson = _create_file(json.dumps(TRANSCRIPT))
_upload_sjson_file(good_sjson, self.item_descriptor.location)
subs_id = _get_subs_id(good_sjson.name)
attach(self.item, subs_id)
request = Request.blank(url)
response = self.item.transcript(request=request, dispatch=dispatch)
self.assertDictEqual(json.loads(response.body), TRANSCRIPT)
def test_translaton_non_en_html5_success(self):
subs = {
u'end': [100],
u'start': [12],
u'text': [
u'\u041f\u0440\u0438\u0432\u0456\u0442, edX \u0432\u0456\u0442\u0430\u0454 \u0432\u0430\u0441.'
]
}
self.srt_file.seek(0)
_upload_file(self.srt_file, self.item_descriptor.location, os.path.split(self.srt_file.name)[1])
# manually clean youtube_id_1_0, as it has default value
self.item.youtube_id_1_0 = ""
request = Request.blank('/translation/uk')
response = self.item.transcript(request=request, dispatch='translation/uk')
self.assertDictEqual(json.loads(response.body), subs)
def test_translation_static_transcript_xml_with_data_dirc(self):
"""
Test id data_dir is set in XML course.
Set course data_dir and ensure we get redirected to that path
if it isn't found in the contentstore.
"""
# Simulate data_dir set in course.
test_modulestore = MagicMock()
attrs = {'get_course.return_value': Mock(data_dir='dummy/static', static_asset_path='')}
test_modulestore.configure_mock(**attrs)
self.item_descriptor.runtime.modulestore = test_modulestore
# Test youtube style en
request = Request.blank('/translation/en?videoId=12345')
response = self.item.transcript(request=request, dispatch='translation/en')
self.assertEqual(response.status, '307 Temporary Redirect')
self.assertIn(
('Location', '/static/dummy/static/subs_12345.srt.sjson'),
response.headerlist
)
# Test HTML5 video style
self.item.sub = 'OEoXaMPEzfM'
request = Request.blank('/translation/en')
response = self.item.transcript(request=request, dispatch='translation/en')
self.assertEqual(response.status, '307 Temporary Redirect')
self.assertIn(
('Location', '/static/dummy/static/subs_OEoXaMPEzfM.srt.sjson'),
response.headerlist
)
# Test different language to ensure we are just ignoring it since we can't
# translate with static fallback
request = Request.blank('/translation/uk')
response = self.item.transcript(request=request, dispatch='translation/uk')
self.assertEqual(response.status, '404 Not Found')
@ddt.data(
# Test youtube style en
('/translation/en?videoId=12345', 'translation/en', '307 Temporary Redirect', '12345'),
# Test html5 style en
('/translation/en', 'translation/en', '307 Temporary Redirect', 'OEoXaMPEzfM', attach_sub),
# Test different language to ensure we are just ignoring it since we can't
# translate with static fallback
('/translation/uk', 'translation/uk', '404 Not Found'),
(
'/translation/en?is_bumper=1', 'translation/en', '307 Temporary Redirect', 'OEoXaMPEzfM',
attach_bumper_transcript
),
('/translation/uk?is_bumper=1', 'translation/uk', '404 Not Found'),
)
@ddt.unpack
def test_translation_static_transcript(self, url, dispatch, status_code, sub=None, attach=None):
"""
Set course static_asset_path and ensure we get redirected to that path
if it isn't found in the contentstore
"""
self._set_static_asset_path()
if attach:
attach(self.item, sub)
request = Request.blank(url)
response = self.item.transcript(request=request, dispatch=dispatch)
self.assertEqual(response.status, status_code)
if sub:
self.assertIn(
('Location', '/static/dummy/static/subs_{}.srt.sjson'.format(sub)),
response.headerlist
)
@patch('xmodule.video_module.VideoModule.course_id', return_value='not_a_course_locator')
def test_translation_static_non_course(self, __):
"""
Test that get_static_transcript short-circuits in the case of a non-CourseLocator.
This fixes a bug for videos inside of content libraries.
"""
self._set_static_asset_path()
# When course_id is not mocked out, these values would result in 307, as tested above.
request = Request.blank('/translation/en?videoId=12345')
response = self.item.transcript(request=request, dispatch='translation/en')
self.assertEqual(response.status, '404 Not Found')
def _set_static_asset_path(self):
""" Helper method for setting up the static_asset_path information """
self.course.static_asset_path = 'dummy/static'
self.course.save()
store = modulestore()
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.course.id):
store.update_item(self.course, self.user.id)
@attr(shard=1)
class TestStudioTranscriptTranslationGetDispatch(TestVideo):
"""
Test Studio video handler that provide translation transcripts.
Tests for `translation` dispatch GET HTTP method.
"""
srt_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
<transcript language="zh" src="{}"/>
</video>
""".format(os.path.split(srt_file.name)[1], u"塞.srt".encode('utf8'))
MODEL_DATA = {'data': DATA}
def test_translation_fails(self):
# No language
request = Request.blank('')
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation')
self.assertEqual(response.status, '400 Bad Request')
# No filename in request.GET
request = Request.blank('')
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
self.assertEqual(response.status, '400 Bad Request')
# Correct case:
filename = os.path.split(self.srt_file.name)[1]
_upload_file(self.srt_file, self.item_descriptor.location, filename)
self.srt_file.seek(0)
request = Request.blank(u'translation/uk?filename={}'.format(filename))
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
self.assertEqual(response.body, self.srt_file.read())
self.assertEqual(response.headers['Content-Type'], 'application/x-subrip; charset=utf-8')
self.assertEqual(
response.headers['Content-Disposition'],
'attachment; filename="{}"'.format(filename)
)
self.assertEqual(response.headers['Content-Language'], 'uk')
# Non ascii file name download:
self.srt_file.seek(0)
_upload_file(self.srt_file, self.item_descriptor.location, u'塞.srt')
self.srt_file.seek(0)
request = Request.blank('translation/zh?filename={}'.format(u'塞.srt'.encode('utf8')))
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/zh')
self.assertEqual(response.body, self.srt_file.read())
self.assertEqual(response.headers['Content-Type'], 'application/x-subrip; charset=utf-8')
self.assertEqual(response.headers['Content-Disposition'], 'attachment; filename="塞.srt"')
self.assertEqual(response.headers['Content-Language'], 'zh')
@attr(shard=1)
class TestStudioTranscriptTranslationPostDispatch(TestVideo):
"""
Test Studio video handler that provide translation transcripts.
Tests for `translation` dispatch with HTTP POST method.
"""
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
</video>
"""
MODEL_DATA = {
'data': DATA
}
METADATA = {}
def test_studio_transcript_post(self):
# Check for exceptons:
# Language is passed, bad content or filename:
# should be first, as other tests save transcrips to store.
request = Request.blank('/translation/uk', POST={'file': ('filename.srt', SRT_content)})
with patch('xmodule.video_module.video_handlers.save_to_store'):
with self.assertRaises(TranscriptException): # transcripts were not saved to store for some reason.
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
request = Request.blank('/translation/uk', POST={'file': ('filename', 'content')})
with self.assertRaises(TranscriptsGenerationException): # Not an srt filename
self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
request = Request.blank('/translation/uk', POST={'file': ('filename.srt', 'content')})
with self.assertRaises(TranscriptsGenerationException): # Content format is not srt.
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
request = Request.blank('/translation/uk', POST={'file': ('filename.srt', SRT_content.decode('utf8').encode('cp1251'))})
# Non-UTF8 file content encoding.
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
self.assertEqual(response.status_code, 400)
self.assertEqual(response.body, "Invalid encoding type, transcripts should be UTF-8 encoded.")
# No language is passed.
request = Request.blank('/translation', POST={'file': ('filename', SRT_content)})
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation')
self.assertEqual(response.status, '400 Bad Request')
# Language, good filename and good content.
request = Request.blank('/translation/uk', POST={'file': ('filename.srt', SRT_content)})
response = self.item_descriptor.studio_transcript(request=request, dispatch='translation/uk')
self.assertEqual(response.status, '201 Created')
self.assertDictEqual(json.loads(response.body), {'filename': u'filename.srt', 'status': 'Success'})
self.assertDictEqual(self.item_descriptor.transcripts, {})
self.assertTrue(_check_asset(self.item_descriptor.location, u'filename.srt'))
@attr(shard=1)
class TestGetTranscript(TestVideo):
"""
Make sure that `get_transcript` method works correctly
"""
srt_file = _create_srt_file()
DATA = """
<video show_captions="true"
display_name="A Name"
>
<source src="example.mp4"/>
<source src="example.webm"/>
<transcript language="uk" src="{}"/>
<transcript language="zh" src="{}"/>
</video>
""".format(os.path.split(srt_file.name)[1], u"塞.srt".encode('utf8'))
MODEL_DATA = {
'data': DATA
}
METADATA = {}
def setUp(self):
super(TestGetTranscript, self).setUp()
self.item_descriptor.render(STUDENT_VIEW)
self.item = self.item_descriptor.xmodule_runtime.xmodule_instance
def test_good_transcript(self):
"""
Test for download 'en' sub with html5 video and self.sub has correct non-empty value.
"""
good_sjson = _create_file(content=textwrap.dedent("""\
{
"start": [
270,
2720
],
"end": [
2720,
5430
],
"text": [
"Hi, welcome to Edx.",
"Let's start with what is on your screen right now."
]
}
"""))
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
transcripts = self.item.get_transcripts_info()
text, filename, mime_type = self.item.get_transcript(transcripts)
expected_text = textwrap.dedent("""\
0
00:00:00,270 --> 00:00:02,720
Hi, welcome to Edx.
1
00:00:02,720 --> 00:00:05,430
Let's start with what is on your screen right now.
""")
self.assertEqual(text, expected_text)
self.assertEqual(filename[:-4], self.item.sub)
self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')
def test_good_txt_transcript(self):
good_sjson = _create_file(content=textwrap.dedent("""\
{
"start": [
270,
2720
],
"end": [
2720,
5430
],
"text": [
"Hi, welcome to Edx.",
"Let's start with what is on your screen right now."
]
}
"""))
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
transcripts = self.item.get_transcripts_info()
text, filename, mime_type = self.item.get_transcript(transcripts, transcript_format="txt")
expected_text = textwrap.dedent("""\
Hi, welcome to Edx.
Let's start with what is on your screen right now.""")
self.assertEqual(text, expected_text)
self.assertEqual(filename, self.item.sub + '.txt')
self.assertEqual(mime_type, 'text/plain; charset=utf-8')
def test_en_with_empty_sub(self):
transcripts = {"transcripts": {}, "sub": ""}
# no self.sub, self.youttube_1_0 exist, but no file in assets
with self.assertRaises(NotFoundError):
self.item.get_transcript(transcripts)
# no self.sub and no self.youtube_1_0, no non-en transcritps
self.item.youtube_id_1_0 = None
with self.assertRaises(ValueError):
self.item.get_transcript(transcripts)
# no self.sub but youtube_1_0 exists with file in assets
good_sjson = _create_file(content=textwrap.dedent("""\
{
"start": [
270,
2720
],
"end": [
2720,
5430
],
"text": [
"Hi, welcome to Edx.",
"Let's start with what is on your screen right now."
]
}
"""))
_upload_sjson_file(good_sjson, self.item.location)
self.item.youtube_id_1_0 = _get_subs_id(good_sjson.name)
text, filename, mime_type = self.item.get_transcript(transcripts)
expected_text = textwrap.dedent("""\
0
00:00:00,270 --> 00:00:02,720
Hi, welcome to Edx.
1
00:00:02,720 --> 00:00:05,430
Let's start with what is on your screen right now.
""")
self.assertEqual(text, expected_text)
self.assertEqual(filename, self.item.youtube_id_1_0 + '.srt')
self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')
def test_non_en_with_non_ascii_filename(self):
self.item.transcript_language = 'zh'
self.srt_file.seek(0)
_upload_file(self.srt_file, self.item_descriptor.location, u"塞.srt")
transcripts = self.item.get_transcripts_info()
text, filename, mime_type = self.item.get_transcript(transcripts)
expected_text = textwrap.dedent("""
0
00:00:00,12 --> 00:00:00,100
Привіт, edX вітає вас.
""")
self.assertEqual(text, expected_text)
self.assertEqual(filename, u"塞.srt")
self.assertEqual(mime_type, 'application/x-subrip; charset=utf-8')
def test_value_error(self):
good_sjson = _create_file(content='bad content')
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
transcripts = self.item.get_transcripts_info()
with self.assertRaises(ValueError):
self.item.get_transcript(transcripts)
def test_key_error(self):
good_sjson = _create_file(content="""
{
"start": [
270,
2720
],
"end": [
2720,
5430
]
}
""")
_upload_sjson_file(good_sjson, self.item.location)
self.item.sub = _get_subs_id(good_sjson.name)
transcripts = self.item.get_transcripts_info()
with self.assertRaises(KeyError):
self.item.get_transcript(transcripts)
| agpl-3.0 | 6,408,480,736,475,752,000 | 38.004357 | 145 | 0.619617 | false |
robhudson/zamboni | mkt/downloads/views.py | 5 | 1342 | from django import http
from django.shortcuts import get_object_or_404
import commonware.log
import amo
from access import acl
from amo.utils import HttpResponseSendFile
from files.models import File
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.downloads')
def download_file(request, file_id, type=None):
file = get_object_or_404(File, pk=file_id)
webapp = get_object_or_404(Webapp, pk=file.version.addon_id,
is_packaged=True)
if webapp.is_disabled or file.status == amo.STATUS_DISABLED:
if not acl.check_addon_ownership(request, webapp, viewer=True,
ignore_disabled=True):
raise http.Http404()
# We treat blocked files like public files so users get the update.
if file.status in [amo.STATUS_PUBLIC, amo.STATUS_BLOCKED]:
path = webapp.sign_if_packaged(file.version_id)
else:
# This is someone asking for an unsigned packaged app.
if not acl.check_addon_ownership(request, webapp, dev=True):
raise http.Http404()
path = file.file_path
log.info('Downloading package: %s from %s' % (webapp.id, path))
return HttpResponseSendFile(request, path, content_type='application/zip',
etag=file.hash.split(':')[-1])
| bsd-3-clause | -865,827,683,980,715,100 | 34.315789 | 78 | 0.653502 | false |
shaunaks/python-monasca | monasca/v2/elasticsearch/alarmdefinitions.py | 1 | 14709 | # Copyright 2015 Carnegie Mellon University
#
# Author: Shaunak Shatmanyu <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import falcon
from oslo.config import cfg
from stevedore import driver
import uuid
from monasca.common import alarm_expr_parser
from monasca.common import alarm_expr_validator
from monasca.common import es_conn
from monasca.common import namespace
from monasca.common import resource_api
from monasca.openstack.common import log
try:
import ujson as json
except ImportError:
import json
alarmdefinitions_opts = [
cfg.StrOpt('doc_type', default='alarmdefinitions',
help='The doc_type that alarm definitions will be saved to.'),
cfg.StrOpt('index_strategy', default='fixed',
help='The index strategy used to create index name.'),
cfg.StrOpt('index_prefix', default='data_',
help='The index prefix where metrics were saved to.'),
cfg.IntOpt('size', default=1000,
help=('The query result limit. Any result set more than '
'the limit will be discarded.')),
]
cfg.CONF.register_opts(alarmdefinitions_opts, group='alarmdefinitions')
STATES = ['LOW', 'MEDIUM', 'HIGH', 'CRITICAL']
LOG = log.getLogger(__name__)
class AlarmDefinitionUtil(object):
@staticmethod
def severityparsing(msg):
try:
severity = msg["severity"]
if severity in STATES:
return msg
else:
msg["severity"] = "LOW"
return msg
except Exception:
return msg
class AlarmDefinitionDispatcher(object):
def __init__(self, global_conf):
LOG.debug('Initializing AlarmDefinition V2API!')
super(AlarmDefinitionDispatcher, self).__init__()
self.doc_type = cfg.CONF.alarmdefinitions.doc_type
self.size = cfg.CONF.alarmdefinitions.size
# load index strategy
if cfg.CONF.alarmdefinitions.index_strategy:
self.index_strategy = driver.DriverManager(
namespace.STRATEGY_NS,
cfg.CONF.alarmdefinitions.index_strategy,
invoke_on_load=True,
invoke_kwds={}).driver
LOG.debug(self.index_strategy)
else:
self.index_strategy = None
self.index_prefix = cfg.CONF.alarmdefinitions.index_prefix
self._es_conn = es_conn.ESConnection(
self.doc_type, self.index_strategy, self.index_prefix)
def _get_alarm_definitions_response(self, res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
def _get_alarm_definitions_helper(self, query_string):
query = {}
queries = []
field_string = 'alarmdefinitions.expression_data.dimensions.'
if query_string:
params = query_string.split('&')
for current_param in params:
current_param_split = current_param.split('=')
if current_param_split[0] == 'dimensions':
current_dimension_split = (
current_param_split[1].split(','))
for current_dimension in current_dimension_split:
current_dimen_data = current_dimension.split(':')
queries.append({
'query_string': {
'default_field': (field_string +
current_dimen_data[0]),
'query': current_dimen_data[1]
}
})
elif current_param_split[0] in ['limit', 'offset']:
# ignore the limit and offset for now.
pass
else:
queries.append({
'query_string': {
'default_field': current_param_split[0],
'query': current_param_split[1]
}
})
LOG.debug(queries)
query = {
'query': {
'bool': {
'must': queries
}
}
}
LOG.debug('Parsed Query: %s' % query)
return query
@resource_api.Restify('/v2.0/alarm-definitions/', method='post')
def do_post_alarm_definitions(self, req, res):
LOG.debug('Creating the alarm definitions')
msg = req.stream.read()
LOG.debug("Message: %s" % msg)
post_msg = ast.literal_eval(msg)
# random uuid generation for alarm definition
id = str(uuid.uuid4())
post_msg["id"] = id
post_msg = AlarmDefinitionUtil.severityparsing(post_msg)
post_msg_json = json.dumps(post_msg)
LOG.debug("Validating Alarm Definition Data: %s" % post_msg_json)
if alarm_expr_validator.is_valid_alarm_definition(post_msg_json):
LOG.debug("Post Alarm Definition method: %s" % post_msg)
try:
expression_parsed = (
alarm_expr_parser.AlarmExprParser(post_msg["expression"]))
expression_data = expression_parsed.sub_alarm_expressions
expression_data_list = []
for temp in expression_data:
expression_data_list.append(expression_data[temp])
post_msg["expression_data"] = expression_data_list
LOG.debug(post_msg)
es_res = self._es_conn.post_messages(json.dumps(post_msg), id)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res)
res.status = getattr(falcon, 'HTTP_%s' % es_res)
except Exception:
LOG.exception('Error occurred while handling '
'Alarm Definition Post Request.')
res.status = getattr(falcon, 'HTTP_400')
else:
LOG.error('Alarm definition is not valid.')
res.status = getattr(falcon, 'HTTP_400')
@resource_api.Restify('/v2.0/alarm-definitions/{id}', method='get')
def do_get_alarm_definitions_by_id(self, req, res, id):
LOG.debug('The alarm definitions GET request is received!')
LOG.debug(id)
es_res = self._es_conn.get_message_by_id(id)
res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res.status_code)
es_res = self._get_alarm_definitions_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
res.body = ''
try:
if es_res["hits"]:
res_data = es_res["hits"][0]
if res_data:
res.body = json.dumps({
"id": id,
"links": [{"rel": "self",
"href": req.uri}],
"name": res_data["_source"]["name"],
"description": res_data["_source"]["description"],
"expression": res_data["_source"]["expression"],
"expression_data":
res_data["_source"]["expression_data"],
"severity": res_data["_source"]["severity"],
"match_by": res_data["_source"]["match_by"],
"alarm_actions": res_data["_source"]["alarm_actions"],
"ok_actions": res_data["_source"]["ok_actions"],
"undetermined_actions": res_data["_source"]
["undetermined_actions"]})
res.content_type = 'application/json;charset=utf-8'
except Exception:
LOG.exception('Error occurred while handling Alarm Definition '
'Get Request.')
@resource_api.Restify('/v2.0/alarm-definitions/{id}', method='put')
def do_put_alarm_definitions(self, req, res, id):
LOG.debug("Put the alarm definitions with id: %s" % id)
es_res = self._es_conn.get_message_by_id(id)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res.status_code)
es_res = self._get_alarm_definitions_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
original_data = {}
try:
if es_res["hits"]:
res_data = es_res["hits"][0]
if res_data:
original_data = json.dumps({
"id": id,
"name": res_data["_source"]["name"],
"description": res_data["_source"]["description"],
"expression": res_data["_source"]["expression"],
"expression_data":
res_data["_source"]["expression_data"],
"severity": res_data["_source"]["severity"],
"match_by": res_data["_source"]["match_by"],
"alarm_actions": res_data["_source"]["alarm_actions"],
"ok_actions": res_data["_source"]["ok_actions"],
"undetermined_actions": res_data["_source"]
["undetermined_actions"]})
msg = req.stream.read()
put_msg = ast.literal_eval(msg)
put_msg = AlarmDefinitionUtil.severityparsing(put_msg)
expression_parsed = (
alarm_expr_parser.AlarmExprParser(put_msg["expression"])
)
expression_data = expression_parsed.sub_alarm_expressions
expression_data_list = []
for temp in expression_data:
expression_data_list.append(expression_data[temp])
put_msg["expression_data"] = expression_data_list
put_msg_json = json.dumps(put_msg)
LOG.debug("Alarm Definition Put Data: %s" % put_msg_json)
if alarm_expr_validator.is_valid_update_alarm_definition(
original_data, put_msg_json):
es_res = self._es_conn.put_messages(put_msg_json, id)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res)
res.status = getattr(falcon, 'HTTP_%s' % es_res)
else:
res.status = getattr(falcon, 'HTTP_400')
LOG.debug("Validating Alarm Definition Failed !!")
except Exception:
res.status = getattr(falcon, 'HTTP_400')
LOG.exception('Error occurred while handling Alarm '
'Definition Put Request.')
@resource_api.Restify('/v2.0/alarm-definitions/{id}', method='delete')
def do_delete_alarm_definitions(self, req, res, id):
LOG.debug("Delete the alarm definitions with id: %s" % id)
try:
es_res = self._es_conn.del_messages(id)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res)
res.status = getattr(falcon, 'HTTP_%s' % es_res)
except Exception:
res.status = getattr(falcon, 'HTTP_400')
LOG.exception('Error occurred while handling Alarm '
'Definition Delete Request.')
@resource_api.Restify('/v2.0/alarm-definitions/', method='get')
def do_get_alarm_definitions_filtered(self, req, res):
LOG.debug('The alarm definitions GET request is received!')
query_string = req.query_string
LOG.debug('Request Query String: %s' % query_string)
params = self._get_alarm_definitions_helper(query_string)
LOG.debug('Query Data: %s' % params)
es_res = self._es_conn.get_messages(params)
res.status = getattr(falcon, 'HTTP_%s' % es_res.status_code)
LOG.debug('Query to ElasticSearch returned Status: %s' %
es_res.status_code)
es_res = self._get_alarm_definitions_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
res.body = ''
result_elements = []
try:
if es_res["hits"]:
res_data = es_res["hits"]
for current_alarm in res_data:
if current_alarm:
result_elements.append({
"id": current_alarm["_source"]["id"],
"links": [{"rel": "self",
"href": req.uri}],
"name": current_alarm["_source"]["name"],
"description":
current_alarm["_source"]["description"],
"expression":
current_alarm["_source"]["expression"],
"expression_data":
current_alarm["_source"]["expression_data"],
"severity":
current_alarm["_source"]["severity"],
"match_by":
current_alarm["_source"]["match_by"],
"alarm_actions":
current_alarm["_source"]["alarm_actions"],
"ok_actions":
current_alarm["_source"]["ok_actions"],
"undetermined_actions":
current_alarm["_source"]
["undetermined_actions"]})
res.body = json.dumps({
"links": [{"rel": "self", "href": req.uri}],
"elements": result_elements
})
else:
res.body = ""
res.content_type = 'application/json;charset=utf-8'
except Exception:
res.status = getattr(falcon, 'HTTP_400')
LOG.exception('Error occurred while handling Alarm '
'Definitions Get Request.')
| apache-2.0 | -6,302,949,945,039,539,000 | 40.668555 | 78 | 0.518458 | false |
woutdenolf/spectrocrunch | spectrocrunch/visualization/id21_scanoverlap.py | 1 | 7933 | # -*- coding: utf-8 -*-
from ..io.spec import spec
import h5py
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from ..math.utils import logscale
import warnings
def show(
x,
y,
images,
xp,
yp,
xlabel,
ylabel,
names,
transpose=False,
flipvert=False,
fliphor=False,
color="#ffffff",
defaultorigin=False,
printpos=False,
outname=None,
):
"""
Args:
x(np.array): horizontal coordinates
y(np.array): vertical coordinates
images(np.array): image
xp(np.array): marker horizontal coord.
yp(np.array): marker vertical coord.
xlabel(str):
ylabel(str):
names(list(str)):
"""
# Make monotonically increasing (required by interp2d)
ind = np.argsort(x)
x = x[ind]
images = images[:, :, ind]
ind = np.argsort(y)
y = y[ind]
images = images[:, ind, :]
nimg = images.shape[0]
# New grid
xnew = np.linspace(x[0], x[-1], len(x))
ynew = np.linspace(y[0], y[-1], len(y))
# Interpolate
for i in range(nimg):
# Use another algorithm
f = interpolate.interp2d(x, y, images[i, ...], kind="cubic")
images[i, ...] = np.clip(f(xnew, ynew), 0, 1)
# Plot range
dx = (xnew[1] - xnew[0]) / 2.0
dy = (ynew[1] - ynew[0]) / 2.0
extent = (x[0] - dx, x[-1] + dx, y[0] - dy, y[-1] + dy)
origin = "lower"
# Transpose
if transpose:
extent = (extent[2], extent[3], extent[0], extent[1])
images = images.transpose((0, 2, 1))
xp, yp = yp, xp
xlabel, ylabel = ylabel, xlabel
# Flip vertical
if flipvert:
extent = (extent[0], extent[1], extent[3], extent[2])
images = images[:, ::-1, :]
# Flip horizontal
if fliphor:
extent = (extent[1], extent[0], extent[2], extent[3])
images = images[:, :, ::-1]
# Origin left bottom
if defaultorigin:
ind = [0, 1, 2, 3]
if extent[1] < extent[0]:
# extent[0] ... xp .......... extent[1]
# extent[1] ... xp .......... extent[0]
xp = extent[1] + extent[0] - xp
ind[0] = 1
ind[1] = 0
if extent[3] < extent[2]:
ind[2] = 3
ind[3] = 2
yp = extent[3] + extent[2] - yp
extent = (extent[ind[0]], extent[ind[1]], extent[ind[2]], extent[ind[3]])
# Show
if printpos:
print(extent)
print(np.vstack((xp, yp)).T)
# RGB for plotting
if transpose:
rgb = np.zeros((len(xnew), len(ynew), 3))
else:
rgb = np.zeros((len(ynew), len(xnew), 3))
for i in range(3):
rgb[..., i] = images[i, ...]
# rgb = images[0:3,...].transpose((1,2,0))
# Plot
plt.figure(1)
plt.clf()
im = plt.imshow(
rgb, extent=extent, origin=origin, interpolation="nearest", aspect=1
) # ,cmap=plt.get_cmap("gray")
axes = plt.gca()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
xlim, ylim = axes.get_xlim(), axes.get_ylim()
fontsize = 12
s = fontsize / 2
axes.scatter(xp, yp, marker="o", s=s, color=color)
for i in range(len(names)):
# try:
# rgbi = rgb[int(np.round(xp[i])),int(np.round(yp[i])),:]*255
# print(rgbi[0]*0.299 + rgbi[1]*0.587 + rgbi[2]*0.114)
# if (rgbi[0]*0.299 + rgbi[1]*0.587 + rgbi[2]*0.114) > 100:
# color = '#000000'
# else:
# color = '#ffffff'
# except:
# color = '#ffffff'
# color = '#%02x%02x%02x' % tuple(255-rgbi)
# axes.scatter(xp[i], yp[i], marker='o',s=s,color = color)
if names[i] is not None:
axes.annotate(
names[i], xy=(xp[i], yp[i]), xytext=(xp[i] + dx, yp[i]), color=color
)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if outname is None:
plt.show()
else:
plt.savefig(outname, bbox_inches="tight", dpi=300)
def plot(
hdf5filename,
grps,
specfilename,
specnumbers,
offsamy,
offsamz,
transpose=False,
flipvert=True,
fliphor=False,
defaultorigin=False,
showlabels=False,
color="#ffffff",
printpos=False,
outname=None,
log=False,
):
"""
Args:
hdf5filename(str)
grps(dict): keys must be 0, 1 or 2 (r, g, b)
specfilename(str)
specnumbers(list(int))
offhor(float)
offvert(float)
"""
oh5 = h5py.File(hdf5filename)
# Prepare global coordinates
dim1off = 0.0
dim1name = "samz"
dim1mult = 1
dim2off = 0.0
dim2name = "samy"
dim2mult = 1
try:
ocoord = oh5["stackinfo"]
except KeyError:
warnings.warn(
'"coordinates" is deprecated and should be replaced by "stackinfo"',
DeprecationWarning,
)
ocoord = oh5["coordinates"]
for f in ocoord:
if f == "samz":
dim1off = ocoord[f].value * 1000
dim1name = "sampz"
dim1mult = 1
if f == "sampz":
dim1off = ocoord[f].value
dim1name = "samz"
dim1mult = 1000
if f == "samy":
dim2off = ocoord[f].value * 1000
dim2name = "sampy"
dim2mult = 1
if f == "sampy":
dim2off = ocoord[f].value
dim2name = "samy"
dim2mult = 1000
# Get image with axes in micron
for i in grps:
ogrp = oh5[grps[i]["path"]]
odset = ogrp[ogrp.attrs["signal"]]
dim1 = dim1off[grps[i]["ind"]] + ogrp[dim1name].value * dim1mult
dim2 = dim2off[grps[i]["ind"]] + ogrp[dim2name].value * dim2mult
idim1 = ogrp.attrs[dim1name + "_indices"]
idim2 = ogrp.attrs[dim2name + "_indices"]
if idim2 != 0 and idim1 != 0:
img = odset[grps[i]["ind"], ...]
elif idim2 != 1 and idim1 != 1:
img = odset[:, grps[i]["ind"], :]
else:
img = odset[..., grps[i]["ind"]]
img[np.isnan(img)] = np.nanmin(img)
if idim1 > idim2:
img = img.T
if i == 0:
images = np.zeros((3,) + img.shape, dtype=img.dtype)
if log:
img = logscale(img)
mi = np.min(img)
ma = np.max(img)
d = ma - mi
mi += d * grps[i]["lo"]
ma -= d * (1 - grps[i]["hi"])
img -= mi
img /= ma
img = np.clip(img, 0, 1)
images[i, ...] = img
oh5.close()
# XANES positions
ospec = spec(specfilename)
motors = ["samz", "sampz", "samy", "sampy"]
n = len(specnumbers)
pdim1 = np.empty(n)
pdim2 = np.empty(n)
if not hasattr(offsamz, "__len__"):
offsamz = [offsamz] * n
if not hasattr(offsamy, "__len__"):
offsamy = [offsamy] * n
for i in range(n):
v = ospec.getmotorvalues(specnumbers[i], motors)
if printpos:
print("Spec number {}".format(i))
for a, b in zip(motors, v):
print(" {} = {}".format(a, b))
pdim1[i] = v[0] * 1000 + v[1] + offsamz[i]
pdim2[i] = v[2] * 1000 + v[3] + offsamy[i]
# Make axes values readable
m1 = min(dim1)
m2 = min(dim2)
dim1 -= m1
dim2 -= m2
pdim1 -= m1
pdim2 -= m2
# Plot
if showlabels:
names = [str(i) for i in specnumbers]
else:
names = [None] * n
if defaultorigin:
dim2label = "x ($\mu$m)"
dim1label = "y ($\mu$m)"
else:
dim2label = "y ($\mu$m)"
dim1label = "z ($\mu$m)"
show(
dim2,
dim1,
images,
pdim2,
pdim1,
dim2label,
dim1label,
names,
transpose=transpose,
flipvert=flipvert,
fliphor=fliphor,
color=color,
defaultorigin=defaultorigin,
printpos=printpos,
outname=outname,
)
| mit | -2,253,729,458,052,529,000 | 24.840391 | 84 | 0.499811 | false |
Southpaw-TACTIC/TACTIC | 3rd_party/python3/site-packages/cherrypy-18.1.2/cherrypy/_cpnative_server.py | 4 | 6677 | """Native adapter for serving CherryPy via its builtin server."""
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from ._cpcompat import tonative
class NativeGateway(cheroot.server.Gateway):
"""Native gateway implementation allowing to bypass WSGI."""
recursive = False
def respond(self):
"""Obtain response from CherryPy machinery and then send it."""
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr # FIXME: handle UNIX sockets
local = tonative(local[0]), local[1]
local = httputil.Host(local[0], local[1], '')
remote = tonative(req.conn.remote_addr), req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = tonative(req.scheme)
sn = cherrypy.tree.script_name(tonative(req.uri or '/'))
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = tonative(req.method)
path = tonative(req.path)
qs = tonative(req.qs or '')
headers = (
(tonative(h), tonative(v))
for h, v in req.inheaders.items()
)
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(
method, path, qs,
tonative(req.request_protocol),
headers, rfile,
)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except Exception:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
"""Send response to HTTP request."""
req = self.req
# Set response status
req.status = status or b'500 Server Error'
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPHTTPServer."""
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
| epl-1.0 | 3,932,335,958,504,604,000 | 38.744048 | 78 | 0.510708 | false |
carroarmato0/jenkins-job-builder | tests/cmd/subcommands/test_update.py | 13 | 4433 |
# Joint copyright:
# - Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from jenkins_jobs import cmd
from jenkins_jobs import builder
from tests.base import mock
from tests.cmd.test_cmd import CmdTestsBase
@mock.patch('jenkins_jobs.builder.Jenkins.get_plugins_info', mock.MagicMock)
class UpdateTests(CmdTestsBase):
@mock.patch('jenkins_jobs.cmd.Builder.update_job')
def test_update_jobs(self, update_job_mock):
"""
Test update_job is called
"""
# don't care about the value returned here
update_job_mock.return_value = ([], 0)
path = os.path.join(self.fixtures_path, 'cmd-002.yaml')
args = self.parser.parse_args(['update', path])
cmd.execute(args, self.config)
update_job_mock.assert_called_with([path], [])
@mock.patch('jenkins_jobs.builder.Jenkins.is_job', return_value=True)
@mock.patch('jenkins_jobs.builder.Jenkins.get_jobs')
@mock.patch('jenkins_jobs.builder.Builder.delete_job')
@mock.patch('jenkins_jobs.cmd.Builder')
def test_update_jobs_and_delete_old(self, builder_mock, delete_job_mock,
get_jobs_mock, is_job_mock):
"""
Test update behaviour with --delete-old option
Test update of jobs with the --delete-old option enabled, where only
some jobs result in has_changed() to limit the number of times
update_job is called, and have the get_jobs() method return additional
jobs not in the input yaml to test that the code in cmd will call
delete_job() after update_job() when '--delete-old' is set but only
for the extra jobs.
"""
# set up some test data
jobs = ['old_job001', 'old_job002']
extra_jobs = [{'name': name} for name in jobs]
builder_obj = builder.Builder('http://jenkins.example.com',
'doesnot', 'matter',
plugins_list={})
# get the instance created by mock and redirect some of the method
# mocks to call real methods on a the above test object.
b_inst = builder_mock.return_value
b_inst.plugins_list = builder_obj.plugins_list
b_inst.update_job.side_effect = builder_obj.update_job
b_inst.delete_old_managed.side_effect = builder_obj.delete_old_managed
def _get_jobs():
return builder_obj.parser.jobs + extra_jobs
get_jobs_mock.side_effect = _get_jobs
# override cache to ensure Jenkins.update_job called a limited number
# of times
self.cache_mock.return_value.has_changed.side_effect = (
[True] * 2 + [False] * 2)
path = os.path.join(self.fixtures_path, 'cmd-002.yaml')
args = self.parser.parse_args(['update', '--delete-old', path])
with mock.patch('jenkins_jobs.builder.Jenkins.update_job') as update:
with mock.patch('jenkins_jobs.builder.Jenkins.is_managed',
return_value=True):
cmd.execute(args, self.config)
self.assertEquals(2, update.call_count,
"Expected Jenkins.update_job to be called '%d' "
"times, got '%d' calls instead.\n"
"Called with: %s" % (2, update.call_count,
update.mock_calls))
calls = [mock.call(name) for name in jobs]
self.assertEquals(2, delete_job_mock.call_count,
"Expected Jenkins.delete_job to be called '%d' "
"times got '%d' calls instead.\n"
"Called with: %s" % (2, delete_job_mock.call_count,
delete_job_mock.mock_calls))
delete_job_mock.assert_has_calls(calls, any_order=True)
| apache-2.0 | 232,580,272,836,688,420 | 42.891089 | 78 | 0.607715 | false |
tongpo/Holle-World | py/python3-cookbook/cookbook/c04/p04_iterator_protocol.py | 1 | 2195 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 自定义迭代器协议
Desc :
"""
class Node:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
def depth_first(self):
yield self
for c in self:
yield from c.depth_first()
class Node2:
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
def depth_first(self):
return DepthFirstIterator(self)
class DepthFirstIterator(object):
'''
Depth-first traversal
'''
def __init__(self, start_node):
self._node = start_node
self._children_iter = None
self._child_iter = None
def __iter__(self):
return self
def __next__(self):
# Return myself if just started; create an iterator for children
if self._children_iter is None:
self._children_iter = iter(self._node)
return self._node
# If processing a child, return its next item
elif self._child_iter:
try:
nextchild = next(self._child_iter)
return nextchild
except StopIteration:
self._child_iter = None
return next(self)
# Advance to the next child and start its iteration
else:
self._child_iter = next(self._children_iter).depth_first()
return next(self)
# Example
if __name__ == '__main__':
root = Node(0)
child1 = Node(1)
child2 = Node(2)
root.add_child(child1)
root.add_child(child2)
child1.add_child(Node(3))
child1.add_child(Node(4))
child2.add_child(Node(5))
for ch in root.depth_first():
print(ch)
# Outputs Node(0), Node(1), Node(3), Node(4), Node(2), Node(5)
| gpl-2.0 | 4,999,741,213,663,535,000 | 22.684783 | 72 | 0.548876 | false |
DirkHoffmann/indico | indico/modules/events/reviewing_questions_fields.py | 4 | 2523 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from wtforms.fields import BooleanField
from wtforms.validators import InputRequired
from indico.modules.events.fields import RatingReviewField
from indico.util.i18n import _
from indico.web.fields.base import BaseField, FieldConfigForm
from indico.web.fields.simple import BoolField, TextField
from indico.web.forms.widgets import SwitchWidget
class AbstractRatingReviewingQuestionConfigForm(FieldConfigForm):
no_score = BooleanField(_('Exclude from score'), widget=SwitchWidget())
class PaperRatingReviewingQuestionConfigForm(FieldConfigForm):
pass
class AbstractRatingReviewingQuestion(BaseField):
name = 'rating'
friendly_name = _('Rating')
common_settings = BaseField.common_settings + ('no_score',)
config_form_base = AbstractRatingReviewingQuestionConfigForm
wtf_field_class = RatingReviewField
required_validator = InputRequired
@property
def wtf_field_kwargs(self):
range_ = self.object.event.cfa.rating_range
choices = [(n, str(n)) for n in range(range_[0], range_[1] + 1)]
return {'coerce': int, 'choices': choices, 'rating_range': range_, 'question': self.object}
class PaperRatingReviewingQuestion(BaseField):
name = 'rating'
friendly_name = _('Rating')
config_form_base = PaperRatingReviewingQuestionConfigForm
wtf_field_class = RatingReviewField
required_validator = InputRequired
@property
def wtf_field_kwargs(self):
range_ = self.object.event.cfp.rating_range
choices = [(n, str(n)) for n in range(range_[0], range_[1] + 1)]
return {'coerce': int, 'choices': choices, 'rating_range': range_, 'question': self.object}
class BoolReviewingQuestion(BoolField, BaseField):
pass
class TextReviewingQuestionConfigForm(FieldConfigForm):
_order = ('title', 'is_required', 'description', 'max_length', 'max_words', 'multiline')
class TextReviewingQuestion(TextField, BaseField):
config_form_base = TextReviewingQuestionConfigForm
def get_reviewing_field_types(type_):
if type_ == 'abstracts':
return {f.name: f for f in [AbstractRatingReviewingQuestion, BoolReviewingQuestion, TextReviewingQuestion]}
elif type_ == 'papers':
return {f.name: f for f in [PaperRatingReviewingQuestion, BoolReviewingQuestion, TextReviewingQuestion]}
| gpl-3.0 | 1,532,861,685,992,132,900 | 34.535211 | 115 | 0.732461 | false |
Darthkpo/xtt | openpyxl/descriptors/tests/test_excel.py | 1 | 2799 | from __future__ import absolute_import
import pytest
from .. import Strict
@pytest.fixture
def UniversalMeasure():
from ..excel import UniversalMeasure
class Dummy(Strict):
value = UniversalMeasure()
return Dummy()
class TestUniversalMeasure:
@pytest.mark.parametrize("value",
["24.73mm", "0cm", "24pt", '999pc', "50pi"]
)
def test_valid(self, UniversalMeasure, value):
UniversalMeasure.value = value
assert UniversalMeasure.value == value
@pytest.mark.parametrize("value",
[24.73, '24.73zz', "24.73 mm", None, "-24.73cm"]
)
def test_invalid(self, UniversalMeasure, value):
with pytest.raises(ValueError):
UniversalMeasure.value = "{0}".format(value)
@pytest.fixture
def HexBinary():
from ..excel import HexBinary
class Dummy(Strict):
value = HexBinary()
return Dummy()
class TestHexBinary:
@pytest.mark.parametrize("value",
["aa35efd", "AABBCCDD"]
)
def test_valid(self, HexBinary, value):
HexBinary.value = value
assert HexBinary.value == value
@pytest.mark.parametrize("value",
["GGII", "35.5"]
)
def test_invalid(self, HexBinary, value):
with pytest.raises(ValueError):
HexBinary.value = value
@pytest.fixture
def TextPoint():
from ..excel import TextPoint
class Dummy(Strict):
value = TextPoint()
return Dummy()
class TestTextPoint:
@pytest.mark.parametrize("value",
[-400000, "400000", 0]
)
def test_valid(self, TextPoint, value):
TextPoint.value = value
assert TextPoint.value == int(value)
def test_invalid_value(self, TextPoint):
with pytest.raises(ValueError):
TextPoint.value = -400001
def test_invalid_type(self, TextPoint):
with pytest.raises(TypeError):
TextPoint.value = "40pt"
@pytest.fixture
def Percentage():
from ..excel import Percentage
class Dummy(Strict):
value = Percentage()
return Dummy()
class TestPercentage:
@pytest.mark.parametrize("value",
["15%", "15.5%"]
)
def test_valid(self, Percentage, value):
Percentage.value = value
assert Percentage.value == value
@pytest.mark.parametrize("value",
["15", "101%", "-1%"]
)
def test_valid(self, Percentage, value):
with pytest.raises(ValueError):
Percentage.value = value
| mit | -9,000,623,897,279,098,000 | 22.521008 | 77 | 0.544837 | false |
drix00/xray_spectrum_analyzer | python/xrayspectrumanalyzer/tools/FitPolynomialFunction.py | 1 | 5364 | #!/usr/bin/env python
"""
.. py:currentmodule:: ProbeBroadening3D.fit.FitPolynomialFunction
.. moduleauthor:: Hendrix Demers <[email protected]>
description
"""
###############################################################################
# Copyright 2016 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
import numpy as np
# Local modules.
# Project modules
import xrayspectrumanalyzer.tools.fitTools as fitTools
# Globals and constants variables.
class FitPolynomialFirstDegreeFunction(fitTools._FitObject):
def __init__(self, a=None, b=None):
if a is None:
self._aFixed = False
else:
self._aFixed = True
self._a = a
if b is None:
self._bFixed = False
else:
self._bFixed = True
self._b = b
def evaluation(self, x, parameters):
indexParameters = 0
if not self._aFixed:
self._a = parameters[indexParameters]
indexParameters +=1
if not self._bFixed:
self._b = parameters[indexParameters]
indexParameters +=1
yFit = self.function(x)
return yFit
def function(self, x):
return self._a + self._b*x
def getNumberFitParameters(self):
numberParameters = 0
if not self._aFixed:
numberParameters +=1
if not self._bFixed:
numberParameters +=1
return numberParameters
class FitPolynomialSecondDegreeFunction(fitTools._FitObject):
def __init__(self, a=None, b=None, c=None):
if a is None:
self._aFixed = False
else:
self._aFixed = True
self._a = a
if b is None:
self._bFixed = False
else:
self._bFixed = True
self._b = b
if c is None:
self._cFixed = False
else:
self._cFixed = True
self._c = c
def evaluation(self, x, parameters):
indexParameters = 0
if not self._aFixed:
self._a = parameters[indexParameters]
indexParameters +=1
if not self._bFixed:
self._b = parameters[indexParameters]
indexParameters +=1
if not self._cFixed:
self._c = parameters[indexParameters]
indexParameters +=1
yFit = self.function(x)
return yFit
def function(self, x):
return self._a + self._b*x + self._c*np.power(x, 2.0)
def getNumberFitParameters(self):
numberParameters = 0
if not self._aFixed:
numberParameters +=1
if not self._bFixed:
numberParameters +=1
if not self._cFixed:
numberParameters +=1
return numberParameters
class FitPolynomialThirdDegreeFunction(fitTools._FitObject):
def __init__(self, a=None, b=None, c=None, d=None):
if a is None:
self._aFixed = False
else:
self._aFixed = True
self._a = a
if b is None:
self._bFixed = False
else:
self._bFixed = True
self._b = b
if c is None:
self._cFixed = False
else:
self._cFixed = True
self._c = c
if d is None:
self._dFixed = False
else:
self._dFixed = True
self._d = d
def evaluation(self, x, parameters):
indexParameters = 0
if not self._aFixed:
self._a = parameters[indexParameters]
indexParameters +=1
if not self._bFixed:
self._b = parameters[indexParameters]
indexParameters +=1
if not self._cFixed:
self._c = parameters[indexParameters]
indexParameters +=1
if not self._dFixed:
self._d = parameters[indexParameters]
indexParameters +=1
yFit = self.function(x)
return yFit
def function(self, x):
return self._a + self._b*x + self._c*np.power(x, 2.0) + self._d*np.power(x, 3.0)
def getNumberFitParameters(self):
numberParameters = 0
if not self._aFixed:
numberParameters +=1
if not self._bFixed:
numberParameters +=1
if not self._cFixed:
numberParameters +=1
if not self._dFixed:
numberParameters +=1
return numberParameters
class PolynomialFirstDegreeFunction(object):
def __init__(self, a, b):
self._a = a
self._b = b
def __call__(self, x):
return self._a + self._b*x
if __name__ == '__main__': #pragma: no cover
import pyHendrixDemersTools.Runner as Runner
Runner.Runner().run(runFunction=None)
| apache-2.0 | 6,428,750,328,526,998,000 | 25.423645 | 88 | 0.557233 | false |
zhongzho/starter-python-bot | bot/eliza2.py | 1 | 7086 | #
# Copyright (C) 2001-2012 NLTK Project
# Authors: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
# Based on an Eliza implementation by Joe Strout <[email protected]>,
# Jeff Epler <[email protected]> and Jez Higgins <mailto:[email protected]>.
# a translation table used to convert things you say into things the
# computer says back, e.g. "I am" --> "you are"
from util import Chat, reflections
# a table of response pairs, where each pair consists of a
# regular expression, and a list of possible responses,
# with group-macros labelled as %1, %2.
pairs = (
(r'I need (.*)',
( "Why do you need %1?",
"Would it really help you to get %1?",
"Are you sure you need %1?")),
(r'Why don\'t you (.*)',
( "Do you really think I don't %1?",
"Perhaps eventually I will %1.",
"Do you really want me to %1?")),
(r'Why can\'t I (.*)',
( "Do you think you should be able to %1?",
"If you could %1, what would you do?",
"I don't know -- why can't you %1?",
"Have you really tried?")),
(r'I can\'t (.*)',
( "How do you know you can't %1?",
"Perhaps you could %1 if you tried.",
"What would it take for you to %1?")),
(r'I am (.*)',
( "Did you come to me because you are %1?",
"How long have you been %1?",
"How do you feel about being %1?")),
(r'I\'m (.*)',
( "How does being %1 make you feel?",
"Do you enjoy being %1?",
"Why do you tell me you're %1?",
"Why do you think you're %1?")),
(r'Are you (.*)',
( "Why does it matter whether I am %1?",
"Would you prefer it if I were not %1?",
"Perhaps you believe I am %1.",
"I may be %1 -- what do you think?")),
(r'What (.*)',
( "Why do you ask?",
"How would an answer to that help you?",
"What do you think?")),
(r'How (.*)',
( "How do you suppose?",
"Perhaps you can answer your own question.",
"What is it you're really asking?")),
(r'Because (.*)',
( "Is that the real reason?",
"What other reasons come to mind?",
"Does that reason apply to anything else?",
"If %1, what else must be true?")),
(r'(.*) sorry (.*)',
( "There are many times when no apology is needed.",
"What feelings do you have when you apologize?")),
(r'Hello(.*)',
( "Hello... I'm glad you could drop by today.",
"Hi there... how are you today?",
"Hello, how are you feeling today?")),
(r'I think (.*)',
( "Do you doubt %1?",
"Do you really think so?",
"But you're not sure %1?")),
(r'(.*) friend (.*)',
( "Tell me more about your friends.",
"When you think of a friend, what comes to mind?",
"Why don't you tell me about a childhood friend?")),
(r'Yes',
( "You seem quite sure.",
"OK, but can you elaborate a bit?")),
(r'(.*) computer(.*)',
( "Are you really talking about me?",
"Does it seem strange to talk to a computer?",
"How do computers make you feel?",
"Do you feel threatened by computers?")),
(r'Is it (.*)',
( "Do you think it is %1?",
"Perhaps it's %1 -- what do you think?",
"If it were %1, what would you do?",
"It could well be that %1.")),
(r'It is (.*)',
( "You seem very certain.",
"If I told you that it probably isn't %1, what would you feel?")),
(r'Can you (.*)',
( "What makes you think I can't %1?",
"If I could %1, then what?",
"Why do you ask if I can %1?")),
(r'Can I (.*)',
( "Perhaps you don't want to %1.",
"Do you want to be able to %1?",
"If you could %1, would you?")),
(r'You are (.*)',
( "Why do you think I am %1?",
"Does it please you to think that I'm %1?",
"Perhaps you would like me to be %1.",
"Perhaps you're really talking about yourself?")),
(r'You\'re (.*)',
( "Why do you say I am %1?",
"Why do you think I am %1?",
"Are we talking about you, or me?")),
(r'I don\'t (.*)',
( "Don't you really %1?",
"Why don't you %1?",
"Do you want to %1?")),
(r'I feel (.*)',
( "Good, tell me more about these feelings.",
"Do you often feel %1?",
"When do you usually feel %1?",
"When you feel %1, what do you do?")),
(r'I have (.*)',
( "Why do you tell me that you've %1?",
"Have you really %1?",
"Now that you have %1, what will you do next?")),
(r'I would (.*)',
( "Could you explain why you would %1?",
"Why would you %1?",
"Who else knows that you would %1?")),
(r'Is there (.*)',
( "Do you think there is %1?",
"It's likely that there is %1.",
"Would you like there to be %1?")),
(r'My (.*)',
( "I see, your %1.",
"Why do you say that your %1?",
"When your %1, how do you feel?")),
(r'You (.*)',
( "We should be discussing you, not me.",
"Why do you say that about me?",
"Why do you care whether I %1?")),
(r'Why (.*)',
( "Why don't you tell me the reason why %1?",
"Why do you think %1?" )),
(r'I want (.*)',
( "What would it mean to you if you got %1?",
"Why do you want %1?",
"What would you do if you got %1?",
"If you got %1, then what would you do?")),
(r'(.*) mother(.*)',
( "Tell me more about your mother.",
"What was your relationship with your mother like?",
"How do you feel about your mother?",
"How does this relate to your feelings today?",
"Good family relations are important.")),
(r'(.*) father(.*)',
( "Tell me more about your father.",
"How did your father make you feel?",
"How do you feel about your father?",
"Does your relationship with your father relate to your feelings today?",
"Do you have trouble showing affection with your family?")),
(r'(.*) child(.*)',
( "Did you have close friends as a child?",
"What is your favorite childhood memory?",
"Do you remember any dreams or nightmares from childhood?",
"Did the other children sometimes tease you?",
"How do you think your childhood experiences relate to your feelings today?")),
(r'(.*)\?',
( "Why do you ask that?",
"Please consider whether you can answer your own question.",
"Perhaps the answer lies within yourself?",
"Why don't you tell me?")),
(r'quit',
( "Thank you for talking with me.",
"Good-bye.",
"Thank you, that will be $150. Have a good day!")),
(r'(.*)',
( "Please tell me more.",
"Let's change focus a bit... Tell me about your family.",
"Can you elaborate on that?",
"Why do you say that %1?",
"I see.",
"Very interesting.",
"%1.",
"I see. And what does that tell you?",
"How does that make you feel?",
"How do you feel when you say that?"))
)
eliza_chatbot = Chat(pairs, reflections)
def eliza_chat():
print "Therapist\n---------"
print "Talk to the program by typing in plain English, using normal upper-"
print 'and lower-case letters and punctuation. Enter "quit" when done.'
print '='*72
print "Hello. How are you feeling today?"
eliza_chatbot.converse()
def demo():
eliza_chat()
if __name__ == "__main__":
demo()
| mit | 25,046,015,231,620,012 | 28.280992 | 83 | 0.5779 | false |
arummler/eudaq | legacy/etc/logtools/MagicLogBookToGoogleSpreadsheet.py | 14 | 2708 | #!/usr/bin/python
# Authors Mathieu Benoit ([email protected])
# Samir Arfaoui ([email protected])
import time
#You need to have the gdata (google docs) libraries in your PYTHONPATH
import gdata.spreadsheet.service
import getpass
import os
import subprocess
#UGLY HACK, adding check_output for python version lacking it
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Function parsing the magic log book for run times, number of events etc ...
def ParseMagicLogBook(FileLocation,runs) :
cmd = "cd ~/workspace/eudaq/bin;./MagicLogBook.exe %s/run%s.raw -p full" % ( FileLocation, runs )
result = subprocess.check_output(cmd, shell=True)
datastr = result.split()[13:]
print len(datastr) / 9
nlines = len(datastr) / 9
rundatalist = []
for i in range(nlines) :
l=datastr[i*9 :i*9+8]
print datastr[i*9 :i*9+8]
dico = {}
dico['run'] = l[0]
dico['time'] = l[2] + ' ' + l[3]
dico['events'] = l[4]
rundatalist.append(dico)
return rundatalist
# Parsing of the logbook output for the files in your raw file folder, the run string support wildcards
#runlist = ParseMagicLogBook('/mnt/datura_raid/data/2014w6_CLIC','00319*')
runlist = ParseMagicLogBook('/Your/Raw/Folder','00*')
# Find this value in the google doc url with 'key=XXX' and copy XXX below
spreadsheet_key = '0AkFaasasfEWsddfegEWrgeRGergGRErgerGERG3c'
# All spreadsheets have worksheets. I think worksheet #1 by default always
# has a value of 'od6'
worksheet_id = 'od6'
spr_client = gdata.spreadsheet.service.SpreadsheetsService()
spr_client.email = 'yourusername'
spr_client.password = ' youruniquegeneratedpassword '
spr_client.source = 'Example Spreadsheet Writing Application'
spr_client.ClientLogin(spr_client.email,spr_client.password)
# Prepare the dictionary to write
# Columns are associated to dictionary keys, so you need to put a title to each to of column in your speadshoot
for dico in runlist :
entry = spr_client.InsertRow(dico, spreadsheet_key, worksheet_id)
if isinstance(entry, gdata.spreadsheet.SpreadsheetsList):
print "Insert row succeeded."
else:
print "Insert row failed."
# Eh Voila you are done !
| lgpl-3.0 | 7,582,569,043,515,473,000 | 29.772727 | 111 | 0.702733 | false |
jcoady9/youtube-dl | youtube_dl/extractor/hearthisat.py | 23 | 5242 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
HEADRequest,
KNOWN_EXTENSIONS,
sanitized_Request,
str_to_int,
urlencode_postdata,
urlhandle_detect_ext,
)
class HearThisAtIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hearthis\.at/(?P<artist>[^/]+)/(?P<title>[A-Za-z0-9\-]+)/?$'
_PLAYLIST_URL = 'https://hearthis.at/playlist.php'
_TESTS = [{
'url': 'https://hearthis.at/moofi/dr-kreep',
'md5': 'ab6ec33c8fed6556029337c7885eb4e0',
'info_dict': {
'id': '150939',
'ext': 'wav',
'title': 'Moofi - Dr. Kreep',
'thumbnail': 're:^https?://.*\.jpg$',
'timestamp': 1421564134,
'description': 'Listen to Dr. Kreep by Moofi on hearthis.at - Modular, Eurorack, Mutable Intruments Braids, Valhalla-DSP',
'upload_date': '20150118',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 71,
'categories': ['Experimental'],
}
}, {
# 'download' link redirects to the original webpage
'url': 'https://hearthis.at/twitchsf/dj-jim-hopkins-totally-bitchin-80s-dance-mix/',
'md5': '5980ceb7c461605d30f1f039df160c6e',
'info_dict': {
'id': '811296',
'ext': 'mp3',
'title': 'TwitchSF - DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix!',
'description': 'Listen to DJ Jim Hopkins - Totally Bitchin\' 80\'s Dance Mix! by TwitchSF on hearthis.at - Dance',
'upload_date': '20160328',
'timestamp': 1459186146,
'thumbnail': 're:^https?://.*\.jpg$',
'comment_count': int,
'view_count': int,
'like_count': int,
'duration': 4360,
'categories': ['Dance'],
},
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
display_id = '{artist:s} - {title:s}'.format(**m.groupdict())
webpage = self._download_webpage(url, display_id)
track_id = self._search_regex(
r'intTrackId\s*=\s*(\d+)', webpage, 'track ID')
payload = urlencode_postdata({'tracks[]': track_id})
req = sanitized_Request(self._PLAYLIST_URL, payload)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
track = self._download_json(req, track_id, 'Downloading playlist')[0]
title = '{artist:s} - {title:s}'.format(**track)
categories = None
if track.get('category'):
categories = [track['category']]
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
meta_span = r'<span[^>]+class="%s".*?</i>([^<]+)</span>'
view_count = str_to_int(self._search_regex(
meta_span % 'plays_count', webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
meta_span % 'likes_count', webpage, 'like count', fatal=False))
comment_count = str_to_int(self._search_regex(
meta_span % 'comment_count', webpage, 'comment count', fatal=False))
duration = str_to_int(self._search_regex(
r'data-length="(\d+)', webpage, 'duration', fatal=False))
timestamp = str_to_int(self._search_regex(
r'<span[^>]+class="calctime"[^>]+data-time="(\d+)', webpage, 'timestamp', fatal=False))
formats = []
mp3_url = self._search_regex(
r'(?s)<a class="player-link"\s+(?:[a-zA-Z0-9_:-]+="[^"]+"\s+)*?data-mp3="([^"]+)"',
webpage, 'mp3 URL', fatal=False)
if mp3_url:
formats.append({
'format_id': 'mp3',
'vcodec': 'none',
'acodec': 'mp3',
'url': mp3_url,
})
download_path = self._search_regex(
r'<a class="[^"]*download_fct[^"]*"\s+href="([^"]+)"',
webpage, 'download URL', default=None)
if download_path:
download_url = compat_urlparse.urljoin(url, download_path)
ext_req = HEADRequest(download_url)
ext_handle = self._request_webpage(
ext_req, display_id, note='Determining extension')
ext = urlhandle_detect_ext(ext_handle)
if ext in KNOWN_EXTENSIONS:
formats.append({
'format_id': 'download',
'vcodec': 'none',
'ext': ext,
'url': download_url,
'preference': 2, # Usually better quality
})
self._sort_formats(formats)
return {
'id': track_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'timestamp': timestamp,
'view_count': view_count,
'comment_count': comment_count,
'like_count': like_count,
'categories': categories,
}
| unlicense | -3,868,348,171,557,122,600 | 37.82963 | 134 | 0.523846 | false |
pnedunuri/scipy | scipy/stats/_distn_infrastructure.py | 15 | 113133 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (``1 - cdf`` --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the
# lowest supported numpy version is >= 1.7.0, this special case can be
# removed (see gh-4314).
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for integration, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause | 5,672,481,910,288,009,000 | 33.397385 | 103 | 0.547515 | false |
rukku/inasafe | realtime/test_sftp_client.py | 3 | 1487 | """
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Ftp Client Test Cases.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__version__ = '0.5.0'
__date__ = '10/01/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
from sftp_client import SFtpClient
import os
class SFtpClientTest(unittest.TestCase):
def test_get_list_events(self):
"""Test to get all event ids
"""
my_ssh_client = SFtpClient()
assert(my_ssh_client is not None)
def test_download_path(self):
"""Test to download all directories and files under a path
"""
my_ssh_client = SFtpClient(the_working_dir='shakemaps')
assert(my_ssh_client is not None)
remote_path = os.path.join(
my_ssh_client.sftp.getcwd(), '20130113003746/output/grid.xml')
local_path = '/tmp/inasafe/'
print local_path
my_ssh_client.download_path(remote_path, local_path)
if __name__ == '__main__':
suite = unittest.makeSuite(SFtpClientTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 | -6,185,071,771,989,733,000 | 31.326087 | 78 | 0.661063 | false |
hydroffice/hyo_ssp | hydroffice/ssp/io/udpio.py | 1 | 3813 | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import socket
import threading
from abc import ABCMeta
log = logging.getLogger(__name__)
from .base_io import BaseIo
class UdpIO(BaseIo):
__metaclass__ = ABCMeta
@classmethod
def replace_non_ascii_byte(cls, txt):
return ''.join([i if ord(i) < 128 else ',' for i in txt])
def __init__(self, listen_port, desired_datagrams, timeout):
super(UdpIO, self).__init__()
self.listen_port = listen_port
self.desired_datagrams = desired_datagrams
self.timeout = timeout
self.data = None
self.sender = None
self.sock_in = None
# A few controls on behaviour
self.do_listen = False
self.listening = False
# Goodies for logging to memory
self.logging_to_memory = False
self.logged_data = []
# Goodies for logging to file
self.logging_to_file = False
self.logfile = None
self.logfile_name = None
def start_logging(self):
""" This method is meant to be over-ridden """
log.error("to be overloaded")
def stop_logging(self):
""" This method is meant to be over-ridden """
log.error("to be overloaded")
def clear_logged_data(self):
self.logged_data = []
def open_log_file(self, fname):
self.logfile_name = fname
self.logfile = open(fname, "wb")
return
def close_log_file(self):
self.logfile.close()
self.logfile = None
self.logfile_name = None
return
def start_listen(self, logfilename=''):
if logfilename != '':
self.open_log_file(logfilename)
self.logging_to_file = True
self.listening = True
log.info("starting listen thread")
threading.Thread(target=self.listen).start()
log.info("started listen thread")
def listen(self):
self.sock_in = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock_in.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2 ** 16)
if self.timeout > 0:
self.sock_in.settimeout(self.timeout)
try:
self.sock_in.bind(("0.0.0.0", self.listen_port))
except socket.error as e:
self.listening = False
self.sock_in.close()
try:
log.warning("port %d already bound? Not listening anymore. Error: %s" % (self.listen_port, e))
except UnicodeDecodeError:
log.warning("port %d already bound? Not listening anymore." % self.listen_port)
log.warning("issue: %s" % self.replace_non_ascii_byte(e))
return
log.info("going to listen on port %s for datagrams %s" % (self.listen_port, self.desired_datagrams))
self.do_listen = True
self.listening = True
while self.do_listen:
try:
self.data, self.sender = self.sock_in.recvfrom(2 ** 16)
except socket.timeout:
# log.info("socket timeout")
continue
if self.logging_to_file and self.logfile:
log.info("going to write to output file %s length is %s bytes"
% (self.logfile_name, len(self.data)))
self.log_to_file(self.data)
if self.logging_to_memory:
self.logged_data.append(self.data)
self.parse()
self.sock_in.close()
log.info("%s done listening!" % self.name)
def stop_listen(self):
self.do_listen = False
def parse(self):
return
def log_to_file(self, data):
self.logfile.write(data)
| lgpl-3.0 | 8,809,588,958,966,625,000 | 28.10687 | 110 | 0.579072 | false |
levelrf/level_basestation | grc/python/FlowGraph.py | 1 | 5095 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import expr_utils
from .. base.FlowGraph import FlowGraph as _FlowGraph
from .. gui.FlowGraph import FlowGraph as _GUIFlowGraph
import re
_variable_matcher = re.compile('^(variable\w*)$')
_parameter_matcher = re.compile('^(parameter)$')
class FlowGraph(_FlowGraph, _GUIFlowGraph):
def __init__(self, **kwargs):
_FlowGraph.__init__(self, **kwargs)
_GUIFlowGraph.__init__(self)
self._eval_cache = dict()
def _eval(self, code, namespace, namespace_hash):
"""
Evaluate the code with the given namespace.
Args:
code: a string with python code
namespace: a dict representing the namespace
namespace_hash: a unique hash for the namespace
Returns:
the resultant object
"""
if not code: raise Exception, 'Cannot evaluate empty statement.'
my_hash = hash(code) ^ namespace_hash
#cache if does not exist
if not self._eval_cache.has_key(my_hash):
self._eval_cache[my_hash] = eval(code, namespace, namespace)
#return from cache
return self._eval_cache[my_hash]
def get_io_signaturev(self, direction):
"""
Get a list of io signatures for this flow graph.
Args:
direction: a string of 'in' or 'out'
Returns:
a list of dicts with: type, label, vlen, size
"""
sorted_pads = {
'in': self.get_pad_sources(),
'out': self.get_pad_sinks(),
}[direction]
#load io signature
return [{
'label': str(pad.get_param('label').get_evaluated()),
'type': str(pad.get_param('type').get_evaluated()),
'vlen': str(pad.get_param('vlen').get_evaluated()),
'size': pad.get_param('type').get_opt('size'),
'optional': bool(pad.get_param('optional').get_evaluated()),
} for pad in sorted_pads]
def get_pad_sources(self):
"""
Get a list of pad source blocks sorted by id order.
Returns:
a list of pad source blocks in this flow graph
"""
pads = filter(lambda b: b.get_key() == 'pad_source', self.get_enabled_blocks())
return sorted(pads, lambda x, y: cmp(x.get_id(), y.get_id()))
def get_pad_sinks(self):
"""
Get a list of pad sink blocks sorted by id order.
Returns:
a list of pad sink blocks in this flow graph
"""
pads = filter(lambda b: b.get_key() == 'pad_sink', self.get_enabled_blocks())
return sorted(pads, lambda x, y: cmp(x.get_id(), y.get_id()))
def get_imports(self):
"""
Get a set of all import statments in this flow graph namespace.
Returns:
a set of import statements
"""
imports = sum([block.get_imports() for block in self.get_enabled_blocks()], [])
imports = sorted(set(imports))
return imports
def get_variables(self):
"""
Get a list of all variables in this flow graph namespace.
Exclude paramterized variables.
Returns:
a sorted list of variable blocks in order of dependency (indep -> dep)
"""
variables = filter(lambda b: _variable_matcher.match(b.get_key()), self.get_enabled_blocks())
return expr_utils.sort_objects(variables, lambda v: v.get_id(), lambda v: v.get_var_make())
def get_parameters(self):
"""
Get a list of all paramterized variables in this flow graph namespace.
Returns:
a list of paramterized variables
"""
parameters = filter(lambda b: _parameter_matcher.match(b.get_key()), self.get_enabled_blocks())
return parameters
def rewrite(self):
"""
Flag the namespace to be renewed.
"""
self._renew_eval_ns = True
_FlowGraph.rewrite(self)
def evaluate(self, expr):
"""
Evaluate the expression.
Args:
expr: the string expression
@throw Exception bad expression
Returns:
the evaluated data
"""
if self._renew_eval_ns:
self._renew_eval_ns = False
#reload namespace
n = dict()
#load imports
for imp in self.get_imports():
try: exec imp in n
except: pass
#load parameters
np = dict()
for parameter in self.get_parameters():
try:
e = eval(parameter.get_param('value').to_code(), n, n)
np[parameter.get_id()] = e
except: pass
n.update(np) #merge param namespace
#load variables
for variable in self.get_variables():
try:
e = eval(variable.get_param('value').to_code(), n, n)
n[variable.get_id()] = e
except: pass
#make namespace public
self.n = n
self.n_hash = hash(str(n))
#evaluate
e = self._eval(expr, self.n, self.n_hash)
return e
| gpl-3.0 | 6,510,629,608,634,344,000 | 28.114286 | 97 | 0.674975 | false |
plq/spyne | examples/django/rpctest/core/models.py | 2 | 2571 | # coding: utf-8
"""Rpc test models."""
from django.core.validators import MinLengthValidator, MaxLengthValidator
from django.db import models
class FieldContainer(models.Model):
"""Test model for ``DjangoMapper``."""
char_field = models.CharField(max_length=32, default='test')
char_field_nullable = models.CharField(max_length=32, null=True)
slug_field = models.SlugField(max_length=32, unique=True)
text_field = models.TextField(default='text_field')
email_field = models.EmailField()
boolean_field = models.BooleanField(default=True)
integer_field = models.IntegerField(default=1)
positive_integer_field = models.PositiveIntegerField(default=1)
float_field = models.FloatField(default=1)
decimal_field = models.DecimalField(max_digits=10, decimal_places=4,
default=1)
time_field = models.TimeField(auto_now_add=True)
date_field = models.DateField(auto_now_add=True)
datetime_field = models.DateTimeField(auto_now_add=True)
foreign_key = models.ForeignKey('self', null=True,
related_name='related_containers', on_delete=models.CASCADE)
one_to_one_field = models.OneToOneField('self', null=True,
on_delete=models.CASCADE)
custom_foreign_key = models.ForeignKey('RelatedFieldContainer', null=True,
related_name='related_fieldcontainers', on_delete=models.CASCADE)
custom_one_to_one_field = models.OneToOneField('RelatedFieldContainer',
null=True, on_delete=models.CASCADE)
url_field = models.URLField(default='http://example.com')
file_field = models.FileField(upload_to='test_file', null=True)
excluded_field = models.CharField(max_length=32, default='excluded')
blank_field = models.CharField(max_length=32, blank=True)
length_validators_field = models.CharField(
max_length=32, null=True, validators=[MinLengthValidator(3),
MaxLengthValidator(10)])
class RelatedFieldContainer(models.Model):
"""Related container model to test related fields."""
id = models.CharField(max_length=30, primary_key=True)
class User(models.Model):
"""Model for tests of relation field mapper."""
name = models.CharField(max_length=50)
class UserProfile(models.Model):
"""Related model for tests of relation field mapper."""
user = models.ForeignKey(User, on_delete=models.CASCADE)
data = models.CharField(max_length=50)
| lgpl-2.1 | 1,756,771,281,544,145,400 | 37.954545 | 80 | 0.669778 | false |
openqt/algorithms | leetcode/python/ac/lc438-find-all-anagrams-in-a-string.py | 1 | 2271 | # coding=utf-8
import unittest
"""438. Find All Anagrams in a String
https://leetcode.com/problems/find-all-anagrams-in-a-string/description/
Given a string **s** and a **non-empty** string **p** , find all the start
indices of **p** 's anagrams in **s**.
Strings consists of lowercase English letters only and the length of both
strings **s** and **p** will not be larger than 20,100.
The order of output does not matter.
**Example 1:**
**Input:**
s: "cbaebabacd" p: "abc"
**Output:**
[0, 6]
**Explanation:**
The substring with start index = 0 is "cba", which is an anagram of "abc".
The substring with start index = 6 is "bac", which is an anagram of "abc".
**Example 2:**
**Input:**
s: "abab" p: "ab"
**Output:**
[0, 1, 2]
**Explanation:**
The substring with start index = 0 is "ab", which is an anagram of "ab".
The substring with start index = 1 is "ba", which is an anagram of "ab".
The substring with start index = 2 is "ab", which is an anagram of "ab".
Similar Questions:
Valid Anagram (valid-anagram)
Permutation in String (permutation-in-string)
"""
class Solution(object):
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
if len(s) < len(p): return []
n, hp, ht = 0, {}, {}
while n < len(p):
hp[p[n]] = hp.setdefault(p[n], 0) + 1
ht[s[n]] = ht.setdefault(s[n], 0) + 1
n += 1
vals = [0] if hp == ht else []
while n < len(s):
a, b = s[n - len(p)], s[n]
if ht[a] <= 1:
del ht[a]
else:
ht[a] -= 1
ht[b] = ht.setdefault(b, 0) + 1
if hp == ht:
vals.append(n - len(p) + 1)
n += 1
return vals
class T(unittest.TestCase):
def test(self):
s = Solution()
self.assertEqual(s.findAnagrams("", 'a'), [])
self.assertEqual(s.findAnagrams("af", 'be'), [])
self.assertEqual(s.findAnagrams("cbaebabacd", 'abc'), [0, 6])
self.assertEqual(s.findAnagrams("abab", 'ab'), [0, 1, 2])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 5,922,783,463,375,500,000 | 23.684783 | 78 | 0.521797 | false |
oaubert/advene | lib/advene/plugins/hpi_motion_dynamics.py | 1 | 8797 | # Motion Dynamics Extractor plugin - estimate perceived motion changes based on optical flow computation.
#
# Copyright (C) 2021 Christian Hentschel ([email protected]), Jacob Löbkens ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
try:
import cv2
import numpy as np
missing_modules = False
except:
missing_modules = True
from advene.util.gstimporter import GstImporter
name = "Motion Dynamics Extractor"
def register(controller=None):
if not missing_modules:
controller.register_importer(MotionDynamicsExtractor)
return True
class MotionDynamicsExtractor(GstImporter):
name = _("Motion Dynamics Extractor")
annotation_filter = True
def __init__(self, *p, **kw):
super(MotionDynamicsExtractor, self).__init__(*p, **kw)
self.frame_width = 240
self.begin_timestamps = []
self.end_timestamps = []
self.begin_frame_pixbufs = []
self.end_frame_pixbuf = None
self.last_frame_pixbuf = None
self.last_frame_timestamp = 0
self.magnitudes = []
self.step_size = 500
self.window_size = 500
self.clip_percentile = 100
self.segment_timestamps = list()
self.optionparser.add_option(
"--step_size", action="store", type="int",
dest="step_size", default=500,
help=_(
"The start frames for optical flow computation are extracted at `stepsize` intervals (in ms)."),
)
self.optionparser.add_option(
"--window_size", action="store", type="int",
dest="window_size", default=500,
help=_("The end frames for optical flow computation are extracted at "
"`stepsize+windowsize` intervals (in ms)."),
)
self.optionparser.add_option(
"--clip_percentile", action="store", type=int,
dest="clip_percentile", default=100,
help=_("Magnitudes above the given percentile are clipped before scaling."),
)
def check_requirements(self):
"""Check if external requirements for the importers are met.
It returns a list of strings describing the unmet
requirements. If the list is empty, then all requirements are
met.
"""
unmet_requirements = []
if self.step_size <= 0:
unmet_requirements.append(_("stepsize must be > 0"))
if self.window_size <= 0:
unmet_requirements.append(_("windowsize must be > 0"))
if self.clip_percentile <= 0 or self.clip_percentile > 100:
unmet_requirements.append(_("clip_percentile must be in ]0;100]"))
return unmet_requirements
def normalize_and_clip(self):
segment_scores = list()
for i, start in enumerate(self.begin_timestamps):
end = start + self.step_size
idx = np.logical_or(
np.logical_and(np.asarray(self.begin_timestamps) >= start, np.asarray(self.begin_timestamps) < end),
np.logical_and(np.asarray(self.end_timestamps) >= start, np.asarray(self.end_timestamps) < end))
segment_scores.append(np.mean(np.asarray(self.magnitudes)[idx]))
# normalize
scores = segment_scores / np.percentile(segment_scores, self.clip_percentile)
scores = np.clip(scores, a_min=0, a_max=1) * 100.
scores = list(np.round(scores, decimals=2))
self.convert([{'begin': self.begin_timestamps[0],
'end': self.end_timestamps[-1],
'content': " ".join(["{s:.2f}".format(s=s) for s in scores])}])
# def generate_normalized_annotations(self):
# segment_scores = list()
#
# self.progress(0, _("Generating annotations"))
# for i, start in enumerate(self.begin_timestamps):
# self.progress(i / len(self.begin_timestamps))
# end = start + self.step_size
# idx = np.logical_or(
# np.logical_and(np.asarray(self.begin_timestamps) >= start, np.asarray(self.begin_timestamps) < end),
# np.logical_and(np.asarray(self.end_timestamps) >= start, np.asarray(self.end_timestamps) < end))
# segment_scores.append(np.mean(np.asarray(self.magnitudes)[idx]))
#
# # normalize
# segment_scores /= np.max(segment_scores)
# segment_scores *= 100.
#
# self.convert([{'begin': self.begin_timestamps[0],
# 'end': self.end_timestamps[-1],
# 'content': " ".join(["{s:.2f}".format(s=s) for s in segment_scores])}])
def do_finalize(self):
while len(self.end_timestamps) < len(self.begin_timestamps):
begin_frame = self.begin_frame_pixbufs[len(self.end_timestamps)]
flow = cv2.calcOpticalFlowFarneback(begin_frame, self.last_frame_pixbuf,
flow=None,
pyr_scale=0.5,
levels=3,
winsize=15,
iterations=3,
poly_n=5,
poly_sigma=1.2,
flags=0)
# mag and ang are matrices with the shape of the frame
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
self.magnitudes.append(np.sum(mag))
self.end_timestamps.append(self.last_frame_timestamp)
assert len(self.begin_timestamps) == len(self.end_timestamps) == len(self.magnitudes)
self.normalize_and_clip()
return True
def process_frame(self, frame):
cur_ts = int(frame['date'])
self.last_frame_timestamp = cur_ts
channels = 1
width = int(self.frame_width)
height = int(len(frame['data']) / channels / width)
cur_pixbuf = np.frombuffer(frame['data'], dtype=np.uint8).reshape((height, width, channels))
# always keep the last frame in the video in order to compute the last chunks:
self.last_frame_pixbuf = cur_pixbuf
if not len(self.begin_timestamps):
self.begin_timestamps.append(cur_ts)
self.begin_frame_pixbufs.append(cur_pixbuf)
elif cur_ts >= self.begin_timestamps[-1] + self.step_size:
self.begin_timestamps.append(cur_ts)
self.begin_frame_pixbufs.append(cur_pixbuf)
if len(self.begin_timestamps) > len(self.end_timestamps):
if cur_ts >= self.begin_timestamps[len(self.end_timestamps)] + self.window_size:
begin_frame = self.begin_frame_pixbufs[len(self.end_timestamps)]
flow = cv2.calcOpticalFlowFarneback(begin_frame, cur_pixbuf,
flow=None,
pyr_scale=0.5,
levels=3,
winsize=15,
iterations=3,
poly_n=5,
poly_sigma=1.2,
flags=0)
# mag and ang are matrices with shape of the frame
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
self.magnitudes.append(np.sum(mag))
self.end_timestamps.append(cur_ts)
return True
def setup_importer(self, filename):
self.ensure_new_type('motiondynamics',
title=_("Motion Dynamics Extractor"),
mimetype='application/x-advene-values',
description=_("Motion dynamics from optical flow extraction"))
return "videoconvert ! videoscale ! video/x-raw,width={frame_width},pixel-aspect-ratio=(fraction)1/1,format=GRAY8".format(
frame_width=self.frame_width)
| gpl-2.0 | 5,403,479,900,569,239,000 | 43.424242 | 130 | 0.559231 | false |
utkbansal/kuma | kuma/wiki/views/utils.py | 5 | 2432 | # -*- coding: utf-8 -*-
from datetime import datetime
import newrelic.agent
from kuma.core.cache import memcache
from ..constants import DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL
from ..models import Document
def split_slug(slug):
"""
Utility function to do basic slug splitting
"""
slug_split = slug.split('/')
length = len(slug_split)
root = None
seo_root = ''
bad_seo_roots = ['Web']
if length > 1:
root = slug_split[0]
if root in bad_seo_roots:
if length > 2:
seo_root = root + '/' + slug_split[1]
else:
seo_root = root
specific = slug_split.pop()
parent = '/'.join(slug_split)
return { # with this given: "some/kind/of/Path"
'specific': specific, # 'Path'
'parent': parent, # 'some/kind/of'
'full': slug, # 'some/kind/of/Path'
'parent_split': slug_split, # ['some', 'kind', 'of']
'length': length, # 4
'root': root, # 'some'
'seo_root': seo_root, # 'some'
}
@newrelic.agent.function_trace()
def document_last_modified(request, document_slug, document_locale):
"""
Utility function to derive the last modified timestamp of a document.
Mainly for the @condition decorator.
"""
# build an adhoc natural cache key to not have to do DB query
adhoc_natural_key = (document_locale, document_slug)
natural_key_hash = Document.natural_key_hash(adhoc_natural_key)
cache_key = DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % natural_key_hash
try:
last_mod = memcache.get(cache_key)
if last_mod is None:
doc = Document.objects.get(locale=document_locale,
slug=document_slug)
last_mod = doc.fill_last_modified_cache()
# Convert the cached Unix epoch seconds back to Python datetime
return datetime.fromtimestamp(float(last_mod))
except Document.DoesNotExist:
return None
def document_form_initial(document):
"""
Return a dict with the document data pertinent for the form.
"""
return {
'title': document.title,
'slug': document.slug,
'category': document.category,
'is_localizable': document.is_localizable,
'tags': list(document.tags.values_list('name', flat=True))
}
| mpl-2.0 | -968,280,042,417,950,200 | 29.024691 | 75 | 0.582237 | false |
saimn/doit | tests/test_cmd_auto.py | 7 | 4873 | import time
from multiprocessing import Process
import pytest
from doit.cmdparse import DefaultUpdate
from doit.task import Task
from doit.cmd_base import TaskLoader
from doit import filewatch
from doit import cmd_auto
from .conftest import CmdFactory
# skip all tests in this module if platform not supported
platform = filewatch.get_platform_system()
pytestmark = pytest.mark.skipif(
'platform not in filewatch.FileModifyWatcher.supported_platforms')
class TestFindFileDeps(object):
def find_deps(self, sel_tasks):
tasks = {
't1': Task("t1", [""], file_dep=['f1']),
't2': Task("t2", [""], file_dep=['f2'], task_dep=['t1']),
't3': Task("t3", [""], file_dep=['f3'], setup=['t1']),
}
return cmd_auto.Auto._find_file_deps(tasks, sel_tasks)
def test_find_file_deps(self):
assert set(['f1']) == self.find_deps(['t1'])
assert set(['f1', 'f2']) == self.find_deps(['t2'])
assert set(['f1', 'f3']) == self.find_deps(['t3'])
class TestDepChanged(object):
def test_changed(self, dependency1):
started = time.time()
assert not cmd_auto.Auto._dep_changed([dependency1], started, [])
assert cmd_auto.Auto._dep_changed([dependency1], started-100, [])
assert not cmd_auto.Auto._dep_changed([dependency1], started-100,
[dependency1])
class FakeLoader(TaskLoader):
def __init__(self, task_list, dep_file):
self.task_list = task_list
self.dep_file = dep_file
def load_tasks(self, cmd, params, args):
return self.task_list, {'verbosity':2, 'dep_file':self.dep_file}
class TestAuto(object):
def test_invalid_args(self, dependency1, depfile_name):
t1 = Task("t1", [""], file_dep=[dependency1])
task_loader = FakeLoader([t1], depfile_name)
cmd = CmdFactory(cmd_auto.Auto, task_loader=task_loader)
# terminates with error number
assert cmd.parse_execute(['t2']) == 3
def test_run_callback(self, monkeypatch):
result = []
def mock_cmd(callback, shell=None):
result.append(callback)
monkeypatch.setattr(cmd_auto, 'call', mock_cmd)
# success
result = []
cmd_auto.Auto._run_callback(0, 'success', 'failure')
assert 'success' == result[0]
# failure
result = []
cmd_auto.Auto._run_callback(3, 'success', 'failure')
assert 'failure' == result[0]
# nothing executed
result = []
cmd_auto.Auto._run_callback(0, None , None)
assert 0 == len(result)
cmd_auto.Auto._run_callback(1, None , None)
assert 0 == len(result)
def test_run_wait(self, dependency1, target1, depfile_name):
def ok():
with open(target1, 'w') as fp:
fp.write('ok')
t1 = Task("t1", [ok], file_dep=[dependency1])
cmd = CmdFactory(cmd_auto.Auto,
task_loader=FakeLoader([t1], depfile_name))
run_wait_proc = Process(target=cmd.run_watch,
args=(DefaultUpdate(), []))
run_wait_proc.start()
# wait until task is executed
for x in range(5):
try:
got = open(target1, 'r').read()
print(got)
if got == 'ok':
break
except:
print('busy')
time.sleep(0.1)
else: # pragma: no cover
raise Exception("target not created")
# write on file to terminate process
fd = open(dependency1, 'w')
fd.write("hi" + str(time.asctime()))
fd.close()
run_wait_proc.join(.5)
if run_wait_proc.is_alive(): # pragma: no cover
# this test is very flaky so we give it one more chance...
# write on file to terminate process
fd = open(dependency1, 'w')
fd.write("hi" + str(time.asctime()))
fd.close()
run_wait_proc.join(1)
if run_wait_proc.is_alive(): # pragma: no cover
run_wait_proc.terminate()
raise Exception("process not terminated")
assert 0 == run_wait_proc.exitcode
def test_execute(self, monkeypatch):
# use dumb operation instead of executing RUN command and waiting event
def fake_run(self, params, args): # pragma: no cover
5 + 2
monkeypatch.setattr(cmd_auto.Auto, 'run_watch', fake_run)
# after join raise exception to stop AUTO command
original = cmd_auto.Process.join
def join_interrupt(self):
original(self)
raise KeyboardInterrupt()
monkeypatch.setattr(cmd_auto.Process, 'join', join_interrupt)
cmd = CmdFactory(cmd_auto.Auto)
cmd.execute(None, None)
| mit | -1,066,652,798,924,691,000 | 31.925676 | 79 | 0.567617 | false |
fvpolpeta/devide | modules/user/muscleLinesToSurface.py | 7 | 12661 | # muscleLinesToSurface copyright (c) 2003 Charl P. Botha http://cpbotha.net/
# $Id$
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import module_utils
import vtk
class muscleLinesToSurface(ModuleBase, NoConfigModuleMixin):
"""Given muscle centre lines marked with 0-valued voxels, calculate a
continuous surface through these marked lines.
Make sure that ONLY the desired voxels have 0 value. You can do this
with for instance the doubleThreshold DeVIDE module. This module
calculates a 3D distance field, processes this field to yield a signed
distance field, extracts an isosurface and then clips off extraneous
surfaces.
NOTE: there should be SOME voxels on ALL slices, i.e. black slices are
not allowed. Handling this graciously would add far too much complexity
to this code. We're already handling breaks in the x-y plane.
$Revision: 1.1 $
"""
def __init__(self, module_manager):
# initialise our base class
ModuleBase.__init__(self, module_manager)
# initialise any mixins we might have
NoConfigModuleMixin.__init__(self)
self._distance = vtk.vtkImageEuclideanDistance()
# this seems to yield more accurate results in this case
# it would probably be better to calculate only 2d distance fields
self._distance.ConsiderAnisotropyOff()
self._xEndPoints = []
self._noFlipXes = []
self._pf1 = vtk.vtkProgrammableFilter() # yeah
self._pf1.SetInput(self._distance.GetOutput())
self._pf1.SetExecuteMethod(self.pf1Execute)
self._pf2 = vtk.vtkProgrammableFilter()
self._pf2.SetInput(self._pf1.GetOutput())
self._pf2.SetExecuteMethod(self.pf2Execute)
self._mc = vtk.vtkMarchingCubes()
self._mc.SetInput(self._pf1.GetOutput())
self._mc.SetValue(0,0.1)
self._iv = vtk.vtkImplicitVolume()
self._iv.SetVolume(self._pf2.GetOutput())
self._cpd = vtk.vtkClipPolyData()
self._cpd.SetClipFunction(self._iv)
self._cpd.SetInput(self._mc.GetOutput())
#self._cpd.InsideOutOn()
module_utils.setup_vtk_object_progress(self, self._distance,
'Calculating distance field...')
module_utils.setup_vtk_object_progress(self, self._pf1,
'Signing distance field...')
module_utils.setup_vtk_object_progress(self, self._pf2,
'Creating implicit volume...')
module_utils.setup_vtk_object_progress(self, self._mc,
'Extracting isosurface...')
module_utils.setup_vtk_object_progress(self, self._cpd,
'Clipping isosurface...')
self._iObj = self._distance
self._oObj = self._cpd
#self._oObj = self._pf2
self._viewFrame = self._createViewFrame({'distance' :
self._distance,
'pf1' :
self._pf1,
'pf2' :
self._pf2,
'mc' :
self._mc,
'cpd' :
self._cpd})
def pf1Execute(self):
inputData = self._pf1.GetStructuredPointsInput()
outputData = self._pf1.GetOutput()
# we would like to operate on the WHOLE shebang
inputData.UpdateInformation() # SetUpdateExtentToWholeExtent precond
inputData.SetUpdateExtentToWholeExtent()
inputData.Update()
#print "Extent: %s" % (inputData.GetUpdateExtent(),)
dimx, dimy, dimz = inputData.GetDimensions()
#print "Dimensions: %s" % ((dimx, dimy, dimz),)
if dimx == 0 or dimy == 0 or dimz == 0:
# FIXME: say something about what went wrong
outputData.SetExtent(0, -1, 0, -1, 0, -1)
outputData.SetUpdateExtent(0, -1, 0, -1, 0, -1)
outputData.SetWholeExtent(0, -1, 0, -1, 0, -1)
outputData.AllocateScalars()
return
outputData.DeepCopy(inputData)
xdim = inputData.GetWholeExtent()[1]
ydim = inputData.GetWholeExtent()[3]
zdim = inputData.GetWholeExtent()[5]
self._xEndPoints = [[] for dummy in range(zdim + 1)]
self._noFlipXes = [{} for dummy in range(zdim + 1)]
for z in xrange(zdim + 1):
x = 0
startPointFound = False
while not startPointFound and x != xdim + 1:
for y in xrange(ydim + 1):
val = inputData.GetScalarComponentAsDouble(x,y,z,0)
if val == 0:
startPointFound = True
self._xEndPoints[z].append((x,y))
# this will break out of the for loop (no else clause
# will be executed)
break
x += 1
if not startPointFound:
wx.LogError("ERROR: startPoint not found on slice %d." % (z,))
return
x = xdim
endPointFound = False
while not endPointFound and x != -1:
for y in xrange(ydim + 1):
val = inputData.GetScalarComponentAsDouble(x,y,z,0)
if val == 0:
endPointFound = True
self._xEndPoints[z].append((x,y))
break
x -= 1
if not endPointFound:
wx.LogError("ERROR: endPoint not found on slice %d." % (z,))
return
prevFlipy = -1
for x in xrange(self._xEndPoints[z][0][0],
self._xEndPoints[z][1][0] + 1):
signFlip = False
signFlipped = False
prevVal = -1
for y in xrange(ydim + 1):
val = inputData.GetScalarComponentAsDouble(x,y,z,0)
if val == 0 and prevVal != 0:
signFlip = not signFlip
signFlipped = True
prevFlipy = y
if signFlip:
outputData.SetScalarComponentFromDouble(x,y,z,0,-val)
else:
# this is necessary (CopyStructure doesn't do it)
outputData.SetScalarComponentFromDouble(x,y,z,0,val)
prevVal = val
if not signFlipped:
# if we went right through without striking a voxel,
# note the x position - we should not correct it in
# our correction step!
self._noFlipXes[z][x] = prevFlipy
elif x - 1 in self._noFlipXes[z]:
# the sign has flipped again, the previous col was a
# noflip,
# so adjust the LAST flipped X's y coord (for the masking
# in the implicitVolume)
self._noFlipXes[z][x-1] = prevFlipy
# now check the bottom row of the distance field!
for x in xrange(self._xEndPoints[z][0][0],
self._xEndPoints[z][1][0] + 1):
val = outputData.GetScalarComponentAsDouble(x,ydim,z,0)
if val > 0 and x not in self._noFlipXes[z]:
# this means it's screwed, we have to redo from bottom up
# first make all positive until we reach 0 again
y = ydim
while val != 0 and y != -1:
val = outputData.GetScalarComponentAsDouble(x,y,z,0)
if val > 0:
outputData.SetScalarComponentFromDouble(
x,y,z,0,-val)
y -= 1
# FIXME: continue here... past the first 0, we have to
# check for each voxel whether it's inside or outside
self._pf1.UpdateProgress(z / float(zdim))
# end for z
def pf2Execute(self):
"""Mask unwanted surface out with negative numbers. I'm evil.
"""
inputData = self._pf2.GetStructuredPointsInput()
outputData = self._pf2.GetOutput()
# we would like to operate on the WHOLE shebang
inputData.UpdateInformation()
inputData.SetUpdateExtentToWholeExtent()
inputData.Update()
dimx, dimy, dimz = inputData.GetDimensions()
if dimx == 0 or dimy == 0 or dimz == 0:
# FIXME: say something about what went wrong
outputData.SetExtent(0, -1, 0, -1, 0, -1)
outputData.SetUpdateExtent(0, -1, 0, -1, 0, -1)
outputData.SetWholeExtent(0, -1, 0, -1, 0, -1)
outputData.AllocateScalars()
return
outputData.DeepCopy(inputData)
xdim = inputData.GetWholeExtent()[1]
ydim = inputData.GetWholeExtent()[3]
zdim = inputData.GetWholeExtent()[5]
for z in xrange(zdim + 1):
x0 = self._xEndPoints[z][0][0]
y0 = self._xEndPoints[z][0][1]
for y in xrange(y0, ydim + 1):
for x in xrange(0, x0):
val = inputData.GetScalarComponentAsDouble(x,y,z,0)
# make this negative as well, so that the surface will
# get nuked by this implicitvolume
outputData.SetScalarComponentFromDouble(x,y,z,0,-val)
x1 = self._xEndPoints[z][1][0]
y1 = self._xEndPoints[z][1][1]
for y in xrange(y1, ydim + 1):
for x in xrange(x1 + 1, xdim + 1):
val = inputData.GetScalarComponentAsDouble(x,y,z,0)
# make this negative as well, so that the surface will
# get nuked by this implicitvolume
outputData.SetScalarComponentFromDouble(x,y,z,0,-val)
self._pf2.UpdateProgress(z / float(zdim))
for xf,yf in self._noFlipXes[z].items():
for y in xrange(yf, ydim + 1):
val = inputData.GetScalarComponentAsDouble(xf,y,z,0)
# this was noflip data, so it used to be positive
# we now make it negative, to get rid of all
# surfaces that so originated
outputData.SetScalarComponentFromDouble(xf,y,z,0,-val)
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# don't forget to call the close() method of the vtkPipeline mixin
NoConfigModuleMixin.close(self)
# get rid of our reference
del self._distance
del self._mc
del self._iv
del self._cpd
del self._pf1
del self._pf2
del self._iObj
del self._oObj
def get_input_descriptions(self):
return ('vtkImageData',)
def set_input(self, idx, inputStream):
self._iObj.SetInput(inputStream)
if inputStream:
# we need the poor old doubleThreshold to give us
# everything that it has. It's quite stingy with
# its UpdateExtent
inputStream.SetUpdateExtentToWholeExtent()
def get_output_descriptions(self):
return (self._oObj.GetOutput().GetClassName(),)
def get_output(self, idx):
return self._oObj.GetOutput()
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
def execute_module(self):
self._oObj.GetOutput().Update()
#print str(self._pf2.GetOutput().GetPointData().GetScalars())
def view(self, parent_window=None):
self._viewFrame.Show(True)
self._viewFrame.Raise()
| bsd-3-clause | 4,655,593,225,886,525,000 | 37.60061 | 78 | 0.515599 | false |
maxwhosevillage/django-simple-utilities | utilities/admin/__init__.py | 1 | 36603 | # coding: utf-8
import re
import StringIO
import pickle
import json
from django.contrib import admin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.utils.html import escape, escapejs, strip_spaces_between_tags
from django import forms
from django.http import HttpResponseRedirect
from django.utils.encoding import force_unicode, smart_str
from django.contrib.admin.views.main import ChangeList
from django.contrib import messages
from django.db import transaction, router
from django.http import Http404
from django.core.exceptions import PermissionDenied
from django.contrib.admin.util import get_deleted_objects
from django.contrib.admin.util import unquote
from django.contrib.admin.options import csrf_protect_m
from django.template.defaultfilters import slugify
from django.core.files.uploadedfile import UploadedFile
from django.utils import translation
try:
from django.utils import simplejson
except ImportError:
import simplejson
try:
from django.utils.functional import update_wrapper
except ImportError:
from functools import update_wrapper
from django.shortcuts import render_to_response
from django.core.files.base import ContentFile
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.utils import six
try:
from django.utils.text import truncate_words
except ImportError:
# django >=1.5
from django.utils.text import Truncator
from django.utils.functional import allow_lazy
def truncate_words(s, num, end_text='...'):
truncate = end_text and ' %s' % end_text or ''
return Truncator(s).words(num, truncate=truncate)
truncate_words = allow_lazy(truncate_words, six.text_type)
from utilities.deep_copy import deep_copy
from utilities.csv_generator import CsvGenerator
from utilities.models import HtmlMail, Recipient, Image, SiteEmail, GeneratedFile
from utilities.templatetags.generated_file import file_image, filename, sizify, is_error
from widgets import UpdateRelatedFieldWidgetWrapper
from django.core.urlresolvers import reverse
class RecipientInLine(admin.TabularInline):
model = Recipient
class ImageInLine(admin.TabularInline):
model = Image
class HtmlMailAdmin(admin.ModelAdmin):
inlines = [RecipientInLine, ImageInLine]
list_display = ('datetime', 'subject', 'recipients', 'status')
def recipients(self, obj):
recipitents = Recipient.objects.filter(htmlmail=obj)
return truncate_words(u', '.join([force_unicode(recipient) for recipient in recipitents]), 10)
recipients.short_description = _('Recipients')
def status(self, obj):
waiting_recipitents = Recipient.objects.filter(htmlmail=obj, sent=False)
sent_recipitents = Recipient.objects.filter(htmlmail=obj, sent=True)
if waiting_recipitents and sent_recipitents:
background = '#FAE087'
border = '#B0A16D'
color = '#575755'
status = _('Sending')
elif sent_recipitents:
background = '#C8DE96'
border = '#94AC5E'
color = '#585A56'
status = _('Sent')
else:
background = '#BC3238'
border = '#873034'
color = '#FFFFFF'
status = _('Waiting')
return '<span style="display: block; text-align: center; width: 60px; padding: 1px 5px; background:%s;border-radius:3px;border:1px solid %s; color:%s;">%s</span>' % (background, border, color, force_unicode(status))
status.short_description = _('State')
status.allow_tags = True
admin.site.register(HtmlMail, HtmlMailAdmin)
admin.site.register(SiteEmail)
def get_related_delete(deleted_objects):
if not isinstance(deleted_objects, list):
return [deleted_objects, ]
out = []
for url in deleted_objects:
out.extend(get_related_delete(url))
return out
class RelatedToolsAdmin(admin.ModelAdmin):
delete_confirmation_template = 'admin/delete_confirmation.html'
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context={}):
if request.POST and "_popup" in request.POST:
opts = self.model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
using = router.db_for_write(self.model)
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
del_objects = []
for url in get_related_delete(deleted_objects):
url = unquote(url)
import re
m = re.match('.*href="/admin/([^/]*)/([^/]*)/([^/]*)/".*', unicode(url))
# pro objekty, které nejsou zaregistrované v administraci url neexistuje. Co s tím?
if m:
del_objects.append({'app': smart_str(m.group(1)), 'model': smart_str(m.group(2)), 'id':smart_str(m.group(3))})
return HttpResponse(u'<script type="text/javascript">opener.dismissDeletePopup(window, %s);</script>' % \
del_objects)
extra_context['is_popup'] = "_popup" in request.REQUEST
return super(RelatedToolsAdmin, self).delete_view(request, object_id, extra_context=extra_context)
def formfield_for_dbfield(self, db_field, **kwargs):
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
request = kwargs.pop("request", None)
if db_field.__class__ in self.formfield_overrides:
kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
if formfield and db_field.name not in self.raw_id_fields and (not hasattr(db_field.rel.to._meta, 'admin_foreign_key_tools') or db_field.rel.to._meta.admin_foreign_key_tools) and (not hasattr(db_field, 'admin_foreign_key_tools') or db_field.admin_foreign_key_tools):
related_modeladmin = self.admin_site._registry.get(
db_field.rel.to)
can_add_related = bool(related_modeladmin and
related_modeladmin.has_add_permission(request))
formfield.widget = UpdateRelatedFieldWidgetWrapper(
formfield.widget, db_field.rel, self.admin_site,
can_add_related=can_add_related)
return formfield
return super(RelatedToolsAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def response_add(self, request, obj, post_url_continue='../%s/'):
if "_popup" in request.POST:
pk_value = obj._get_pk_val()
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s", %s);</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escapejs(obj), json.dumps(self.popup_attrs(obj))))
return super(RelatedToolsAdmin, self).response_add(request, obj, post_url_continue)
def response_change(self, request, obj):
if "_popup" in request.POST:
pk_value = obj._get_pk_val()
return HttpResponse('<script type="text/javascript">opener.dismissEditPopup(window, "%s", "%s", %s);</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escapejs(obj), json.dumps(self.popup_attrs(obj))))
return super(RelatedToolsAdmin, self).response_change(request, obj)
def popup_attrs(self, obj):
return {}
def _media(self):
media = super(RelatedToolsAdmin, self)._media()
js = []
js.append('%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL)
js.append('%sutilities/admin/js/RelatedObjectLookups.js' % settings.STATIC_URL)
media.add_js(js)
return media
media = property(_media)
class HiddenModelMixin(object):
def get_model_perms(self, *args, **kwargs):
perms = super(HiddenModelMixin, self).get_model_perms(*args, **kwargs)
perms['list_hide'] = True
return perms
class HiddenModelAdmin(HiddenModelMixin, RelatedToolsAdmin):
pass
from django.contrib.admin.util import quote
class MarshallingChangeList(ChangeList):
def url_for_result(self, result):
return "../%s/%s/" % (getattr(result, self.model_admin.real_type_field).model, quote(getattr(result, self.pk_attname)))
class MarshallingAdmin(RelatedToolsAdmin):
real_type_field = 'real_type'
parent = None
childs = []
change_form_template = 'admin/marshalling_change_form.html'
change_list_template = 'admin/marshalling_change_list.html'
delete_confirmation_template = 'admin/marshalling_delete_confirmation.html'
def get_changelist(self, request, **kwargs):
return MarshallingChangeList
def get_model_perms(self, *args, **kwargs):
perms = super(MarshallingAdmin, self).get_model_perms(*args, **kwargs)
if (self.parent != self.model):
perms['list_hide'] = True
perms['hide_add'] = True
return perms
def queryset(self, request, parent=False):
if not parent:
return super(MarshallingAdmin, self).queryset(request)
qs = self.parent._default_manager.get_query_set()
ordering = self.ordering or ()
if ordering:
qs = qs.order_by(*ordering)
return qs
def add_view(self, request, form_url='', extra_context={}):
from django.contrib.contenttypes.models import ContentType
if self.parent:
extra_context['parent'] = self.parent.__name__.lower()
return super(MarshallingAdmin, self).add_view(request, form_url, extra_context=extra_context)
def change_view(self, request, object_id, extra_context={}):
from django.contrib.contenttypes.models import ContentType
if object_id:
obj = self.get_object(request, object_id)
if ContentType.objects.get_for_model(type(obj)) != getattr(obj, self.real_type_field):
return HttpResponseRedirect('../../%s/%s' % (getattr(obj, self.real_type_field).model, object_id))
if self.parent:
extra_context['parent'] = self.parent.__name__.lower()
return super(MarshallingAdmin, self).change_view(request, object_id, extra_context=extra_context)
def changelist_view(self, request, extra_context={}):
if self.childs:
childs = []
for obj in self.childs:
childs.append({'name': obj.__name__.lower(), 'verbose_name': obj._meta.verbose_name})
extra_context['childs'] = childs
return super(MarshallingAdmin, self).changelist_view(request, extra_context=extra_context)
@csrf_protect_m
@transaction.atomic
def delete_view(self, request, object_id, extra_context={}):
if request.POST and not "_popup" in request.POST:
opts = self.model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
using = router.db_for_write(self.model)
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[obj], opts, request.user, self.admin_site, using)
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
self.log_deletion(request, obj, obj_display)
self.delete_model(request, obj)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../../%s/" % self.parent.__name__.lower())
if self.parent:
extra_context['parent'] = self.parent.__name__.lower()
return super(MarshallingAdmin, self).delete_view(request, object_id, extra_context=extra_context)
def response_change(self, request, obj):
if "_save" in request.POST:
opts = obj._meta
verbose_name = opts.verbose_name
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(verbose_name), 'obj': force_unicode(obj)}
self.message_user(request, msg)
if self.has_change_permission(request, None):
return HttpResponseRedirect('../../%s' % self.parent.__name__.lower())
else:
return HttpResponseRedirect('../../../')
return super(MarshallingAdmin, self).response_change(request, obj)
def response_add(self, request, obj, post_url_continue='../%s/'):
if "_save" in request.POST:
opts = obj._meta
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
self.message_user(request, msg)
if self.has_change_permission(request, None):
post_url = '../../%s' % self.parent.__name__.lower()
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
return super(MarshallingAdmin, self).response_add(request, obj, post_url_continue)
class MultipleFilesImportMixin(object):
change_form_template = 'admin/multiple_file_upload_change_form.html'
multiple_files_inline = None
max_file_size = 5000000
accept_file_types = []
max_number_of_files = None
def response_add(self, request, obj, post_url_continue='../%s/'):
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if "_continue_before_upload" in request.POST:
self.message_user(request, msg + ' ' + force_unicode(_("You may edit it again below.")))
post_url_continue += '?_upload=1'
if "_popup" in request.POST:
post_url_continue += "&_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
return super(MultipleFilesImportMixin, self).response_add(request, obj, post_url_continue)
def add_view(self, request, form_url='', extra_context={}):
sup = super(MultipleFilesImportMixin, self)
extra_context['multiplefilesimportmixin_super_template'] = sup.add_form_template or sup.change_form_template or 'admin/change_form.html'
return sup.add_view(request, form_url, extra_context)
def change_view(self, request, object_id, extra_context={}):
sup = super(MultipleFilesImportMixin, self)
extra_context['multiplefilesimportmixin_super_template'] = sup.change_form_template or 'admin/change_form.html'
extra_context['max_file_size'] = self.max_file_size
extra_context['accept_file_types'] = '|'.join(self.accept_file_types)
extra_context['max_number_of_files'] = self.max_number_of_files
extra_context['upload'] = request.GET.get('_upload', None)
return sup.change_view(request, object_id, extra_context)
def received_file(self, obj, file):
return False
def get_urls(self):
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^(.+)/fileupload/$',
self.fileupload_view,
name='%s_%s_fileupload' % info),
)
urlpatterns += super(MultipleFilesImportMixin, self).get_urls()
return urlpatterns
def fileupload_view(self, request, object_id):
obj = self.get_object(request, unquote(object_id))
opts = self.model._meta
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
result = []
if request.FILES.has_key('files[]') and request.FILES['files[]']:
file = request.FILES['files[]']
wrapped_file = UploadedFile(file)
filename = wrapped_file.name
file_size = wrapped_file.file.size
if not self.received_file(obj, file):
result.append({"error":'emptyResult', })
else:
result.append({"name":filename,
"size":file_size,
})
response_data = simplejson.dumps(result)
else:
result.append({"error":6, })
if "application/json" in request.META['HTTP_ACCEPT_ENCODING']:
mimetype = 'application/json'
else:
mimetype = 'text/plain'
return HttpResponse(response_data, mimetype=mimetype)
def _media(self):
media = super(MultipleFilesImportMixin, self)._media()
js = []
js.append('%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL)
js.append('%sutilities/js/jquery.colorbox-min.js' % settings.STATIC_URL)
media.add_js(js)
css = {'screen': [], 'pring': []}
css['screen'].append('%sutilities/css/colorbox.css' % settings.STATIC_URL)
media.add_css(css)
return media
media = property(_media)
class DynamicListDisplayModelMixin(object):
def __init__(self, model, admin_site):
super(DynamicListDisplayModelMixin, self).__init__(model, admin_site)
self.default_list_display = self.list_display
def _change_list_display(self, list_display):
list_display_copy = list(self.list_display)
for field in self.list_display[1:]:
list_display_copy.remove(field)
self.list_display = list_display_copy
for field in list_display:
if (not field in self.list_display):
self.list_display.append(field)
def get_list_display(self, request):
return self.default_list_display
def changelist_view(self, request, extra_context=None):
self._change_list_display(self.get_list_display(request))
return super(DynamicListDisplayModelMixin, self).changelist_view(request, extra_context=extra_context)
class DynamicFieldsetsModelMixin(object):
def __init__(self, model, admin_site):
super(DynamicFieldsetsModelMixin, self).__init__(model, admin_site)
self.default_fieldsets = self.fieldsets
def change_view(self, request, object_id, extra_context=None):
self.fieldsets = self.get_fieldsets(request)
return super(DynamicFieldsetsModelMixin, self).change_view(request, object_id, extra_context=extra_context)
def add_view(self, request, form_url='', extra_context=None):
self.fieldsets = self.get_fieldsets(request)
return super(DynamicFieldsetsModelMixin, self).add_view(request, form_url=form_url, extra_context=extra_context)
def get_fieldsets(self, request, obj=None):
if self.default_fieldsets:
return self.default_fieldsets
return super(DynamicFieldsetsModelMixin, self).get_fieldsets(request, obj=obj)
class CloneModelMixin(object):
change_form_template = 'admin/clone_change_form.html'
def pre_clone_save(self, obj):
pass
def response_change(self, request, obj):
if ('_clone' in request.POST):
opts = self.model._meta
msg = _(u'The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
copied_obj = deep_copy(obj, False)
self.message_user(request, force_unicode(msg) + " " + force_unicode(_(u'Please update another values')))
if "_popup" in request.REQUEST:
return HttpResponseRedirect(request.path + "../%s?_popup=1" % copied_obj.pk)
else:
return HttpResponseRedirect(request.path + "../%s" % copied_obj.pk)
return super(CloneModelMixin, self).response_change(request, obj)
def add_view(self, request, form_url='', extra_context={}):
sup = super(CloneModelMixin, self)
extra_context['clonemodelmixin_super_template'] = sup.add_form_template or sup.change_form_template or 'admin/change_form.html'
return sup.add_view(request, form_url, extra_context)
def change_view(self, request, object_id, extra_context={}):
sup = super(CloneModelMixin, self)
extra_context['clonemodelmixin_super_template'] = sup.change_form_template or 'admin/change_form.html'
return sup.change_view(request, object_id, extra_context)
def _media(self):
media = super(CloneModelMixin, self)._media()
js = ['%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL]
media.add_js(js)
return media
media = property(_media)
class AdminPagingMixin(object):
change_form_template = 'admin/paging_change_form.html'
page_ordering = 'pk'
def add_view(self, request, form_url='', extra_context={}):
sup = super(AdminPagingMixin, self)
extra_context['pagingmixin_super_template'] = sup.add_form_template or sup.change_form_template or 'admin/change_form.html'
return sup.add_view(request, form_url, extra_context)
def change_view(self, request, object_id, extra_context={}):
sup = super(AdminPagingMixin, self)
model = self.model
opts = model._meta
obj = sup.get_object(request, object_id)
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if hasattr(sup, 'parent'):
qs = sup.queryset(request, True)
else:
qs = sup.queryset(request)
qs = qs.order_by(self.page_ordering)
next_qs = qs.filter(**{'%s__gt' % self.page_ordering:getattr(obj, self.page_ordering)}).order_by('%s' % self.page_ordering)
prev_qs = qs.filter(**{'%s__lt' % self.page_ordering:getattr(obj, self.page_ordering)}).order_by('-%s' % self.page_ordering)
if next_qs:
extra_context['next_obj'] = {'app': next_qs[0]._meta.app_label, 'obj':next_qs[0]._meta.object_name.lower(), 'pk':next_qs[0]._get_pk_val(), 'verbose_name': next_qs[0]._meta.verbose_name}
else:
extra_context['next_obj'] = None
if prev_qs:
extra_context['prev_obj'] = {'app': prev_qs[0]._meta.app_label, 'obj':prev_qs[0]._meta.object_name.lower(), 'pk':prev_qs[0]._get_pk_val(), 'verbose_name': prev_qs[0]._meta.verbose_name}
else:
extra_context['prev_obj'] = None
extra_context['pagingmixin_super_template'] = sup.change_form_template or 'admin/change_form.html'
return sup.change_view(request, object_id, extra_context)
def _media(self):
media = super(AdminPagingMixin, self)._media()
css = {'screen': ['%sutilities/admin/css/paging-admin.css' % settings.STATIC_URL]}
media.add_css(css)
return media
media = property(_media)
class TreeChangeList(ChangeList):
def tree_sort(self, parent):
result = []
ordering = self.model_admin.ordering
filter_values = {self.model_admin.parent: parent}
qs = self.result_list.filter(**filter_values)
if (ordering):
qs.order_by(ordering)
for obj in qs:
result = result + [obj.pk] + self.tree_sort(obj)
return result
def get_depth(self, obj):
depth = 0
parent = getattr(obj, self.model_admin.parent)
obj.parent
while(parent != None):
parent = getattr(parent, self.model_admin.parent)
depth += 1
return depth
class TreeModelMixin(object):
parent = None
change_list_template = 'admin/change_tree.html'
def queryset(self, request):
qs = super(TreeModelMixin, self).queryset(request)
for obj in qs:
obj.depth = 0
return qs
def get_changelist(self, request, **kwargs):
return TreeChangeList
def changelist_view(self, request, extra_context={}):
sup = super(TreeModelMixin, self)
extra_context['treemodelmixin_super_template'] = sup.change_list_template or 'admin/change_list.html'
return sup.changelist_view(request, extra_context)
class CSVImportForm(forms.Form):
csv_file = forms.FileField(max_length=50)
class CSVExportMixin(object):
# change_list_template = 'admin/csv_import_change_list.html'
csv_delimiter = ';'
csv_fields = ()
csv_formatters = {}
csv_quotechar = '"'
csv_header = False
csv_DB_values = False
csv_bom = False
csv_encoding = 'utf-8'
actions = ['export_csv', ]
def pre_import_save(self, obj):
pass
def import_csv(self, f):
csv_generator = CsvGenerator(self, self.model, self.csv_fields, header=self.csv_header, delimiter=self.csv_delimiter, quotechar=self.csv_quotechar, DB_values=self.csv_DB_values, csv_formatters=self.csv_formatters, encoding=self.csv_encoding)
obj = csv_generator.import_csv(f, self)
return obj
def export_csv(self, request, queryset):
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s.csv' % slugify(queryset.model.__name__)
if self.csv_bom:
response.write("\xEF\xBB\xBF")
csv_generator = CsvGenerator(self, self.model, self.get_csv_fields(request), header=self.csv_header, delimiter=self.csv_delimiter, quotechar=self.csv_quotechar, DB_values=self.csv_DB_values, csv_formatters=self.csv_formatters, encoding=self.csv_encoding)
csv_generator.export_csv(response, queryset)
return response
export_csv.short_description = _(u"Export to CSV")
def get_csv_fields(self, request):
return self.csv_fields
def changelist_view(self, request, extra_context={}):
sup = super(CSVExportMixin, self)
import_form = CSVImportForm()
if ('_csv-import' in request.POST):
import_form = CSVImportForm(request.POST, request.FILES)
if(import_form.is_valid()):
# try:
self.import_csv(request.FILES['csv_file'])
# messages.info(request, _(u'CSV import byl úspěšně dokončen'))
# except:
# messages.error(request, _(u'Špatný formát CSV souboru'))
else:
messages.error(request, _(u'File must be in CSV format.'))
return HttpResponseRedirect('')
extra_context['csvimportmixin_super_template'] = sup.change_list_template or 'admin/change_list.html'
extra_context['import_form'] = import_form
return sup.changelist_view(request, extra_context=extra_context)
class GeneratedFilesMixin(object):
change_list_template = 'admin/generated_files_change_list.html'
progress_image = '%sutilities/images/icons/progress.gif' % settings.STATIC_URL
error_image = '%sutilities/images/icons/error.png' % settings.STATIC_URL
file_images = {
'csv': '%sutilities/images/icons/CSV.png' % settings.STATIC_URL,
'zip': '%sutilities/images/icons/ZIP.png' % settings.STATIC_URL,
'pdf': '%sutilities/images/icons/PDF.png' % settings.STATIC_URL,
}
timeout = 120
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('',
url(r'^generate-files/$', wrap(self.exported_files_view), name='%s_%s_exported_files' % info),
url(r'^generate-files/(.+)/$', wrap(self.exported_file_view), name='%s_%s_exported_file' % info),
) + super(GeneratedFilesMixin, self).get_urls()
return urlpatterns
def changelist_view(self, request, extra_context={}):
sup = super(GeneratedFilesMixin, self)
extra_context['generated_files_super_template'] = sup.change_list_template or 'admin/change_list.html'
return sup.changelist_view(request, extra_context=extra_context)
def exported_files_view(self, request, extra_context={}):
extra_context['exported_files'] = GeneratedFile.objects.filter(content_type=ContentType.objects.get_for_model(self.model)).order_by('-datetime')
extra_context['STATIC_URL'] = settings.STATIC_URL
extra_context['progress_image'] = self.progress_image
extra_context['error_image'] = self.error_image
extra_context['file_images'] = self.file_images
extra_context['timeout'] = self.timeout
return render_to_response('admin/generated_files.html', extra_context)
def exported_file_view(self, request, object_id, extra_context={}):
from django.utils import simplejson
generated_file = GeneratedFile.objects.get(pk=object_id)
if generated_file.file:
json_data = {
'file_image': file_image(generated_file, self.file_images, self.progress_image, self.error_image, self.timeout),
'file_name': filename(generated_file, self.timeout),
'file_url': generated_file.file.url,
'file_size': sizify(generated_file.file.size),
'generated': True
}
else:
json_data = {
'generated': False,
'error': is_error(generated_file, self.timeout),
'file_image': self.error_image
}
json_dump = simplejson.dumps(json_data)
return HttpResponse(json_dump, mimetype='application/json')
def _media(self):
media = super(GeneratedFilesMixin, self)._media()
js = (
'%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL,
'%sutilities/js/jquery.colorbox-min.js' % settings.STATIC_URL
)
media.add_js(js)
css = {'screen': ['%sutilities/admin/css/colorbox.css' % settings.STATIC_URL]}
media.add_css(css)
return media
media = property(_media)
class AsynchronousCSVExportMixin(GeneratedFilesMixin, CSVExportMixin):
def export_csv(self, request, queryset):
from utilities.tasks import generate_csv
gf = GeneratedFile(content_type=ContentType.objects.get_for_model(self.model), count_objects=queryset.count())
gf.save()
messages.info(request, _(u'Objects is exporting to CSV'), extra_tags='generated-files-info')
generate_csv.delay(gf.pk, self.model._meta.app_label, self.model._meta.object_name, queryset.values_list('pk', flat=True), self.csv_fields, translation.get_language())
class DashboardMixin(object):
change_list_template = 'admin/dashboard_change_list.html'
dashboard_table = []
def changelist_view(self, request, extra_context={}):
sup = super(DashboardMixin, self)
extra_context['dashboardmixin_super_template'] = sup.change_list_template or 'admin/change_list.html'
extra_context['show_dashboard'] = self.get_dashboard_table(request)
return sup.changelist_view(request, extra_context=extra_context)
def dashboard_view(self, request, extra_context={}):
dashboard_table = []
cl = self.get_changelist(request)(request, self.model, self.list_display, self.list_display_links, self.list_filter, self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self.list_editable, self)
qs = cl.get_query_set()
media = {'js': [], 'css': {'print': [], 'screen': []}}
row_num = 0
for row in self.get_dashboard_table(request):
dashboard_table_row = []
col_num = 0
for col in row:
media = col.widget_instance.get_media(media)
col.widget_instance.prefix = '%s-%s' % (row_num, col_num)
dashboard_table_row.append({'colspan': col.get_colspan(), 'html':col.render(qs, self)})
col_num += 1
dashboard_table.append(dashboard_table_row)
row_num += 1
extra_context['media'] = media
extra_context['dashboard_table'] = dashboard_table
return render_to_response('admin/dashboard.html', extra_context)
def get_dashboard_table(self, request):
return self.dashboard_table
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
from django.conf.urls.defaults import patterns, url
info = self.model._meta.app_label, self.model._meta.module_name
urlpatterns = patterns('', url(r'^dashboard/$', wrap(self.dashboard_view), name='%s_%s_dashboard' % info),) + super(DashboardMixin, self).get_urls()
return urlpatterns
def _media(self):
media = super(DashboardMixin, self)._media()
js = (
'%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL,
'%sutilities/js/jquery.colorbox-min.js' % settings.STATIC_URL
)
media.add_js(js)
css = {'screen': ['%sutilities/admin/css/colorbox.css' % settings.STATIC_URL]}
media.add_css(css)
return media
media = property(_media)
class HighlightedTabularInLine(admin.TabularInline):
template = 'admin/edit_inline/highlighted_tabular.html'
def _media(self):
media = super(HighlightedTabularInLine, self)._media()
js = []
js.append('%sutilities/js/jquery-1.6.4.min.js' % settings.STATIC_URL)
js.append('%sutilities/admin/js/highlighted-tabular.js' % settings.STATIC_URL)
media.add_js(js)
return media
media = property(_media)
class DefaultFilterMixin(object):
default_filters = ()
def get_default_filters(self):
return self.default_filters
def changelist_view(self, request, *args, **kwargs):
from django.http import HttpResponseRedirect
default_filters = self.get_default_filters()
if default_filters:
try:
test = request.META['HTTP_REFERER'].split(request.META['PATH_INFO'])
if test and test[-1] and not test[-1].startswith('?'):
url = reverse('admin:%s_%s_changelist' % (self.opts.app_label, self.opts.module_name))
filters = []
for filter in default_filters:
key = filter.split('=')[0]
if not request.GET.has_key(key):
filters.append(filter)
if filters:
return HttpResponseRedirect("%s?%s" % (url, "&".join(filters)))
except: pass
return super(DefaultFilterMixin, self).changelist_view(request, *args, **kwargs)
try:
from sorl.thumbnail.shortcuts import get_thumbnail
except ImportError:
pass
else:
from .sorl_thumbnail import AdminImageMixin
| gpl-3.0 | -7,080,564,700,646,644,000 | 41.011481 | 277 | 0.625765 | false |
ticklemepierce/osf.io | website/addons/wiki/routes.py | 22 | 4570 | """
"""
import os
from framework.routing import Rule, json_renderer
from website.routes import OsfWebRenderer
from . import views
TEMPLATE_DIR = '../addons/wiki/templates/'
settings_routes = {
'rules': [],
'prefix': '/api/v1',
}
widget_routes = {
'rules': [
Rule([
'/project/<pid>/wiki/widget/',
'/project/<pid>/node/<nid>/wiki/widget/',
], 'get', views.wiki_widget, json_renderer),
],
'prefix': '/api/v1',
}
# NOTE: <wname> refers to a wiki page's key, e.g. 'Home'
page_routes = {
'rules': [
# Home (Base) | GET
Rule([
'/project/<pid>/wiki/',
'/project/<pid>/node/<nid>/wiki/',
], 'get', views.project_wiki_home, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
# View (Id) | GET
Rule([
'/project/<pid>/wiki/id/<wid>/',
'/project/<pid>/node/<nid>/wiki/id/<wid>/',
], 'get', views.project_wiki_id_page, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
# Wiki | GET
Rule([
'/project/<pid>/wiki/<wname>/',
'/project/<pid>/node/<nid>/wiki/<wname>/',
], 'get', views.project_wiki_view, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
# Edit | GET (legacy url, trigger redirect)
Rule([
'/project/<pid>/wiki/<wname>/edit/',
'/project/<pid>/node/<nid>/wiki/<wname>/edit/',
], 'get', views.project_wiki_edit, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
# Compare | GET (legacy url, trigger redirect)
Rule([
'/project/<pid>/wiki/<wname>/compare/<int:wver>/',
'/project/<pid>/node/<nid>/wiki/<wname>/compare/<int:wver>/',
], 'get', views.project_wiki_compare, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
# Edit | POST
Rule([
'/project/<pid>/wiki/<wname>/',
'/project/<pid>/node/<nid>/wiki/<wname>/',
], 'post', views.project_wiki_edit_post, OsfWebRenderer(os.path.join(TEMPLATE_DIR, 'edit.mako'))),
]
}
api_routes = {
'rules': [
# Home (Base) : GET
Rule([
'/project/<pid>/wiki/',
'/project/<pid>/node/<nid>/wiki/',
], 'get', views.project_wiki_home, json_renderer),
# Draft : GET
Rule([
'/project/<pid>/wiki/<wname>/draft/',
'/project/<pid>/node/<nid>/wiki/<wname>/draft/',
], 'get', views.wiki_page_draft, json_renderer),
# Content : GET
# <wver> refers to a wiki page's version number
Rule([
'/project/<pid>/wiki/<wname>/content/',
'/project/<pid>/node/<nid>/wiki/<wname>/content/',
'/project/<pid>/wiki/<wname>/content/<wver>/',
'/project/<pid>/node/<nid>/wiki/<wname>/content/<wver>/',
], 'get', views.wiki_page_content, json_renderer),
# Validate | GET
Rule([
'/project/<pid>/wiki/<wname>/validate/',
'/project/<pid>/node/<nid>/wiki/<wname>/validate/',
], 'get', views.project_wiki_validate_name, json_renderer),
# Edit | POST
Rule([
'/project/<pid>/wiki/<wname>/edit/',
'/project/<pid>/node/<nid>/wiki/<wname>/edit/',
], 'post', views.project_wiki_edit_post, json_renderer),
# Rename : PUT
Rule([
'/project/<pid>/wiki/<wname>/rename/',
'/project/<pid>/node/<nid>/wiki/<wname>/rename/',
], 'put', views.project_wiki_rename, json_renderer),
# Delete : DELETE
Rule([
'/project/<pid>/wiki/<wname>/',
'/project/<pid>/node/<nid>/wiki/<wname>/',
], 'delete', views.project_wiki_delete, json_renderer),
# Change Wiki Settings | PUT
Rule([
'/project/<pid>/wiki/settings/',
'/project/<pid>/node/<nid>/wiki/settings/',
], 'put', views.edit_wiki_settings, json_renderer),
#Permissions Info for Settings Page | GET
Rule(
[
'/project/<pid>/wiki/settings/',
'/project/<pid>/node/<nid>/wiki/settings/'
],
'get',
views.get_node_wiki_permissions,
json_renderer,
),
# Wiki Menu : GET
Rule([
'/project/<pid>/wiki/<wname>/grid/',
'/project/<pid>/node/<nid>/wiki/<wname>/grid/'
], 'get', views.project_wiki_grid_data, json_renderer),
],
'prefix': '/api/v1',
}
| apache-2.0 | 4,821,086,195,160,131,000 | 29.466667 | 106 | 0.514004 | false |
jorconnor/senior-design-rpl | testfiles/python/input/dependencies/dependencies1.py | 3 | 6647 | #The MIT License (MIT)
#
#Copyright (c) 2017 Jordan Connor
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import pandas
import numpy as np
from bokeh.io import show
from bokeh.plotting import figure
from project import lattice
from collections import Counter,defaultdict
from multiprocessing import Process, Pipe
from datetime import datetime
from math import *
QUIT = "QUIT"
class sim_helper(object):
def __init__(self,L,fN,u):
self.L = L
self.u = u
# Set our initial state
self.E1 = None
while self.E1 is None or (self.E1 < self.u[0] or self.E1 >= self.u[1]):
self.lat = lattice.isinglattice(L)
self.E1 = self.lat.E()
#Set the initial f paramater
self.f = np.e
#Define our histogram counter
self.H = Counter()
#Define our density of states and initialize to our guess
self.g0 = np.log(1)
#Define our modification paramater
self.fN = fN
self.G = {self.E1 : self.g0}
def sweep(self):
for i in range(self.L**2):
#Do the trial flip and calculate the new energy
E2 = None
x = None
y = None
x,y = np.random.randint(0,self.L,2)
#self.lat.flip(x,y)
#E2 = self.lat.E()
E2 = self.E1 + self.lat.dU(x, y)
if not (E2 < self.u[0] or E2 >= self.u[1]):
#self.lat.flip(x, y)
#else:
#Accept the energy if it meets the wang landau criterion
#or reverse the flip
if E2 not in self.G.keys():
self.G[E2] = self.g0
if(np.random.uniform() <= np.exp(float(self.G[self.E1])-self.G[E2])):
self.E1 = E2
self.lat.flip(x, y)
#else:
#self.lat.flip(x,y)
#update our DOS for the current energy
self.G[self.E1] += np.log(self.f)
#Add our new energy to the histogram
self.H[self.E1] += 1
def clear(self,f):
self.f = f
self.H.clear()
def sim_process(conn):
L,fN,u = conn.recv()
helper = sim_helper(L,fN,u)
while(conn.recv() != "EOF"):
for i in range(10000):
helper.sweep()
conn.send(helper.G)
conn.send(helper.H)
newF = conn.recv()
if(newF != helper.f):
helper.clear(newF)
conn.close()
class wanglandauising(object):
def __init__(self,L,p,fN):
self.L = L
self.p = p
#Define our normalized DOS
self.GN = {}
#Define an nonnormalized DOS
self.G = {}
#Define our modification factors
self.f = np.e
self.fN = fN
self.H = Counter()
self.pCount = 2
self.processes = []
self.conns = []
A = 2*L**2+.06
#self.ranges = [[-A,-A/2.0],[-A/2.0,0],[0,A/2.0],[A/2.0,A]]
#print(self.ranges)
self.ranges = [[-100,0],[0,100]]
#self.ranges=[[-1000,1000]]
def run(self):
for i in range(self.pCount):
parent_conn, child_conn = Pipe()
self.processes.append(Process(target=sim_process, args=(child_conn,)))
self.conns.append(parent_conn)
self.processes[i].start()
self.conns[i].send([self.L,self.fN,self.ranges[i]])
while not self.f < np.exp(10**-8):
for i in range(self.pCount):
self.conns[i].send("GO")
for conn in self.conns:
for e,g in conn.recv().iteritems():
self.G[e] = g
self.H += conn.recv()
self.check_flatness()
for i in range(self.pCount):
self.conns[i].send("EOF")
self.conns[i].close()
self.processes[i].join()
#Normalize our DOS
for e,g in self.G.iteritems():
self.GN[e] = g - self.G[-2] + np.log(2)
#print(self.GN)
def check_flatness(self,a=":)"):
#Determine the average histogram
avgH = 0.0
size = 0.0
for e,count in self.H.iteritems():
avgH += count
size += 1.0
avgH = avgH/size
#Now finish our average and determine our percetnage
avgH = avgH*self.p
#Now verify the wanglandau criterion is satisfied
cSat = True
for e,count in self.H.iteritems():
if count <= avgH:
print(str(count) + " " + str(avgH))
cSat = False
break
#If satisfied we reduce our modification factor
if cSat:
self.f = self.f**(1/float(self.fN))
self.H.clear()
for conn in self.conns:
conn.send(self.f)
print(self.f)
def u(self,T):
num = 0.0
den = 0.0
for e,g in self.G.iteritems():
num += (e*g*np.exp(-float(e)/T))
den += (g*np.exp(-float(e)/T))
return (num/den)/self.L
if __name__ == '__main__':
#Run the simulation
L = 4
sim = wanglandauising(L,.8,2)
t1 = datetime.now()
sim.run()
t2 = datetime.now()
delta = t2-t1
print(delta.microseconds)
#Now use the DOS to generate our energies for various values of t
U = []
G = []
for e,g in sim.GN.iteritems():
U.append(e)
G.append(g)
s1 = figure(width=500, plot_height=500,title="DOS for" + str(L) +" x "+str(L)+ "Ising Model")
s1.circle(U,G,size=5,color="navy",alpha=.5)
s1.xaxis.axis_label = "Energy per Lattice Site"
s1.yaxis.axis_label = "ln(g(e))"
show(s1)
| mit | -5,181,699,294,558,578,000 | 30.060748 | 97 | 0.555288 | false |
xiehe/gjjx-zhushou | login.py | 1 | 2287 | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import webbrowser
import random
import re
from PIL import Image
from io import BytesIO
header = {
'User-Agent' : 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.13 Safari/537.36',
'Referer': 'http://www.gjjx.com.cn'
}
class Login:
def __init__(self, session):
self.session = session
self.captcha_path = 'temp/captcha.png'
# 获取验证码
def get_captcha(self):
"""
@return Bool, 文件流
"""
url = 'http://www.gjjx.com.cn/member/captcha/' + str(random.random())
r = self.session.get(url)
if r.status_code == 200:
im = Image.open(BytesIO(r.content))
im.save(self.captcha_path, 'png')
return True, r.content
else:
return False, r.content
# 显示验证码
def show_image(self, file_path):
"""
:param file_path: 图片文件路径
"""
if sys.version_info >= (3, 3):
from shlex import quote
else:
from pipes import quote
if sys.platform == "darwin":
command = "open -a /Applications/Preview.app %s&" % quote(file_path)
os.system(command)
else:
webbrowser.open(os.path.join(os.getcwd(), file_path))
# 登录
def login(self, payload):
url = 'http://www.gjjx.com.cn/member/login?hash=' + str(random.random())
r = self.session.post(url, data=payload, headers=header)
json = r.json()
user = {
'name': '', # 真实姓名
}
if json['code'] == 10008:
user['name'] = json['data']['user']['truename']
return True, user
else:
return False, self.login_err_msg(json['code'])
# 登录错误提示
def login_err_msg(self, code):
d = {
20000: u'提交参数错误',
20002: u'用户未激活或被锁定',
20003: u'用户名或密码错误',
20004: u'验证码错误',
20005: u'用户不存在',
20001: u'用户已登陆',
}
if code == 20004 or code == 20003:
self.getCaptcha()
return d[code] | mit | -5,721,049,146,547,121,000 | 26.21519 | 130 | 0.526291 | false |
bertucho/epic-movie-quotes-quiz | dialogos/build/cffi/testing/cffi0/test_ffi_backend.py | 3 | 11151 | import py, sys, platform
import pytest
from testing.cffi0 import backend_tests, test_function, test_ownlib
from cffi import FFI
import _cffi_backend
class TestFFI(backend_tests.BackendTests,
test_function.TestFunction,
test_ownlib.TestOwnLib):
TypeRepr = "<ctype '%s'>"
@staticmethod
def Backend():
return _cffi_backend
def test_not_supported_bitfield_in_result(self):
ffi = FFI(backend=self.Backend())
ffi.cdef("struct foo_s { int a,b,c,d,e; int x:1; };")
e = py.test.raises(NotImplementedError, ffi.callback,
"struct foo_s foo(void)", lambda: 42)
assert str(e.value) == ("struct foo_s(*)(): "
"callback with unsupported argument or return type or with '...'")
def test_inspecttype(self):
ffi = FFI(backend=self.Backend())
assert ffi.typeof("long").kind == "primitive"
assert ffi.typeof("long(*)(long, long**, ...)").cname == (
"long(*)(long, long * *, ...)")
assert ffi.typeof("long(*)(long, long**, ...)").ellipsis is True
def test_new_handle(self):
ffi = FFI(backend=self.Backend())
o = [2, 3, 4]
p = ffi.new_handle(o)
assert ffi.typeof(p) == ffi.typeof("void *")
assert ffi.from_handle(p) is o
assert ffi.from_handle(ffi.cast("char *", p)) is o
py.test.raises(RuntimeError, ffi.from_handle, ffi.NULL)
class TestBitfield:
def check(self, source, expected_ofs_y, expected_align, expected_size):
# NOTE: 'expected_*' is the numbers expected from GCC.
# The numbers expected from MSVC are not explicitly written
# in this file, and will just be taken from the compiler.
ffi = FFI()
ffi.cdef("struct s1 { %s };" % source)
ctype = ffi.typeof("struct s1")
# verify the information with gcc
ffi1 = FFI()
ffi1.cdef("""
static const int Gofs_y, Galign, Gsize;
struct s1 *try_with_value(int fieldnum, long long value);
""")
fnames = [name for name, cfield in ctype.fields
if name and cfield.bitsize > 0]
setters = ['case %d: s.%s = value; break;' % iname
for iname in enumerate(fnames)]
lib = ffi1.verify("""
struct s1 { %s };
struct sa { char a; struct s1 b; };
#define Gofs_y offsetof(struct s1, y)
#define Galign offsetof(struct sa, b)
#define Gsize sizeof(struct s1)
struct s1 *try_with_value(int fieldnum, long long value)
{
static struct s1 s;
memset(&s, 0, sizeof(s));
switch (fieldnum) { %s }
return &s;
}
""" % (source, ' '.join(setters)))
if sys.platform == 'win32':
expected_ofs_y = lib.Gofs_y
expected_align = lib.Galign
expected_size = lib.Gsize
else:
assert (lib.Gofs_y, lib.Galign, lib.Gsize) == (
expected_ofs_y, expected_align, expected_size)
# the real test follows
assert ffi.offsetof("struct s1", "y") == expected_ofs_y
assert ffi.alignof("struct s1") == expected_align
assert ffi.sizeof("struct s1") == expected_size
# compare the actual storage of the two
for name, cfield in ctype.fields:
if cfield.bitsize < 0 or not name:
continue
if int(ffi.cast(cfield.type, -1)) == -1: # signed
min_value = -(1 << (cfield.bitsize-1))
max_value = (1 << (cfield.bitsize-1)) - 1
else:
min_value = 0
max_value = (1 << cfield.bitsize) - 1
for t in [1, 2, 4, 8, 16, 128, 2813, 89728, 981729,
-1,-2,-4,-8,-16,-128,-2813,-89728,-981729]:
if min_value <= t <= max_value:
self._fieldcheck(ffi, lib, fnames, name, t)
def _fieldcheck(self, ffi, lib, fnames, name, value):
s = ffi.new("struct s1 *")
setattr(s, name, value)
assert getattr(s, name) == value
raw1 = ffi.buffer(s)[:]
t = lib.try_with_value(fnames.index(name), value)
raw2 = ffi.buffer(t, len(raw1))[:]
assert raw1 == raw2
def test_bitfield_basic(self):
self.check("int a; int b:9; int c:20; int y;", 8, 4, 12)
self.check("int a; short b:9; short c:7; int y;", 8, 4, 12)
self.check("int a; short b:9; short c:9; int y;", 8, 4, 12)
def test_bitfield_reuse_if_enough_space(self):
self.check("int a:2; char y;", 1, 4, 4)
self.check("int a:1; char b ; int c:1; char y;", 3, 4, 4)
self.check("int a:1; char b:8; int c:1; char y;", 3, 4, 4)
self.check("char a; int b:9; char y;", 3, 4, 4)
self.check("char a; short b:9; char y;", 4, 2, 6)
self.check("int a:2; char b:6; char y;", 1, 4, 4)
self.check("int a:2; char b:7; char y;", 2, 4, 4)
self.check("int a:2; short b:15; char c:2; char y;", 5, 4, 8)
self.check("int a:2; char b:1; char c:1; char y;", 1, 4, 4)
@pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_anonymous_no_align(self):
L = FFI().alignof("long long")
self.check("char y; int :1;", 0, 1, 2)
self.check("char x; int z:1; char y;", 2, 4, 4)
self.check("char x; int :1; char y;", 2, 1, 3)
self.check("char x; long long z:48; char y;", 7, L, 8)
self.check("char x; long long :48; char y;", 7, 1, 8)
self.check("char x; long long z:56; char y;", 8, L, 8 + L)
self.check("char x; long long :56; char y;", 8, 1, 9)
self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L)
self.check("char x; long long :57; char y;", L + 8, 1, L + 9)
@pytest.mark.skipif(
"not platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_anonymous_align_arm(self):
L = FFI().alignof("long long")
self.check("char y; int :1;", 0, 4, 4)
self.check("char x; int z:1; char y;", 2, 4, 4)
self.check("char x; int :1; char y;", 2, 4, 4)
self.check("char x; long long z:48; char y;", 7, L, 8)
self.check("char x; long long :48; char y;", 7, 8, 8)
self.check("char x; long long z:56; char y;", 8, L, 8 + L)
self.check("char x; long long :56; char y;", 8, L, 8 + L)
self.check("char x; long long z:57; char y;", L + 8, L, L + 8 + L)
self.check("char x; long long :57; char y;", L + 8, L, L + 8 + L)
@pytest.mark.skipif("platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_zero(self):
L = FFI().alignof("long long")
self.check("char y; int :0;", 0, 1, 4)
self.check("char x; int :0; char y;", 4, 1, 5)
self.check("char x; int :0; int :0; char y;", 4, 1, 5)
self.check("char x; long long :0; char y;", L, 1, L + 1)
self.check("short x, y; int :0; int :0;", 2, 2, 4)
self.check("char x; int :0; short b:1; char y;", 5, 2, 6)
self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8)
@pytest.mark.skipif(
"not platform.machine().startswith(('arm', 'aarch64'))")
def test_bitfield_zero_arm(self):
L = FFI().alignof("long long")
self.check("char y; int :0;", 0, 4, 4)
self.check("char x; int :0; char y;", 4, 4, 8)
self.check("char x; int :0; int :0; char y;", 4, 4, 8)
self.check("char x; long long :0; char y;", L, 8, L + 8)
self.check("short x, y; int :0; int :0;", 2, 4, 4)
self.check("char x; int :0; short b:1; char y;", 5, 4, 8)
self.check("int a:1; int :0; int b:1; char y;", 5, 4, 8)
def test_error_cases(self):
ffi = FFI()
py.test.raises(TypeError,
'ffi.cdef("struct s1 { float x:1; };"); ffi.new("struct s1 *")')
py.test.raises(TypeError,
'ffi.cdef("struct s2 { char x:0; };"); ffi.new("struct s2 *")')
py.test.raises(TypeError,
'ffi.cdef("struct s3 { char x:9; };"); ffi.new("struct s3 *")')
def test_struct_with_typedef(self):
ffi = FFI()
ffi.cdef("typedef struct { float x; } foo_t;")
p = ffi.new("foo_t *", [5.2])
assert repr(p).startswith("<cdata 'foo_t *' ")
def test_struct_array_no_length(self):
ffi = FFI()
ffi.cdef("struct foo_s { int x; int a[]; };")
p = ffi.new("struct foo_s *", [100, [200, 300, 400]])
assert p.x == 100
assert ffi.typeof(p.a) is ffi.typeof("int *") # no length available
assert p.a[0] == 200
assert p.a[1] == 300
assert p.a[2] == 400
@pytest.mark.skipif("sys.platform != 'win32'")
def test_getwinerror(self):
ffi = FFI()
code, message = ffi.getwinerror(1155)
assert code == 1155
assert message == ("No application is associated with the "
"specified file for this operation")
ffi.cdef("void SetLastError(int);")
lib = ffi.dlopen("Kernel32.dll")
lib.SetLastError(2)
code, message = ffi.getwinerror()
assert code == 2
assert message == "The system cannot find the file specified"
code, message = ffi.getwinerror(-1)
assert code == 2
assert message == "The system cannot find the file specified"
def test_from_buffer(self):
import array
ffi = FFI()
a = array.array('H', [10000, 20000, 30000])
c = ffi.from_buffer(a)
assert ffi.typeof(c) is ffi.typeof("char[]")
ffi.cast("unsigned short *", c)[1] += 500
assert list(a) == [10000, 20500, 30000]
def test_all_primitives(self):
ffi = FFI()
for name in [
"char",
"short",
"int",
"long",
"long long",
"signed char",
"unsigned char",
"unsigned short",
"unsigned int",
"unsigned long",
"unsigned long long",
"float",
"double",
"long double",
"wchar_t",
"_Bool",
"int8_t",
"uint8_t",
"int16_t",
"uint16_t",
"int32_t",
"uint32_t",
"int64_t",
"uint64_t",
"int_least8_t",
"uint_least8_t",
"int_least16_t",
"uint_least16_t",
"int_least32_t",
"uint_least32_t",
"int_least64_t",
"uint_least64_t",
"int_fast8_t",
"uint_fast8_t",
"int_fast16_t",
"uint_fast16_t",
"int_fast32_t",
"uint_fast32_t",
"int_fast64_t",
"uint_fast64_t",
"intptr_t",
"uintptr_t",
"intmax_t",
"uintmax_t",
"ptrdiff_t",
"size_t",
"ssize_t",
]:
x = ffi.sizeof(name)
assert 1 <= x <= 16
| mit | 2,344,467,808,344,002,600 | 39.111511 | 78 | 0.506143 | false |
bcornwellmott/frappe | frappe/patches/v7_1/refactor_integration_broker.py | 23 | 1670 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
def execute():
for doctype_name in ["Razorpay Log", "Razorpay Payment", "Razorpay Settings"]:
delete_doc("DocType", doctype_name)
reload_doctypes()
setup_services()
def delete_doc(doctype, doctype_name):
frappe.delete_doc(doctype, doctype_name)
def reload_doctypes():
for doctype in ("razorpay_settings", "paypal_settings", "dropbox_settings", "ldap_settings"):
frappe.reload_doc("integrations", "doctype", doctype)
def setup_services():
for service in [{"old_name": "Razorpay", "new_name": "Razorpay"},
{"old_name": "PayPal", "new_name": "PayPal"},
{"old_name": "Dropbox Integration", "new_name": "Dropbox"},
{"old_name": "LDAP Auth", "new_name": "LDAP"}]:
try:
service_doc = frappe.get_doc("Integration Service", service["old_name"])
settings = json.loads(service_doc.custom_settings_json)
service_settings = frappe.new_doc("{0} Settings".format(service["new_name"]))
service_settings.update(settings)
service_settings.flags.ignore_mandatory = True
service_settings.save(ignore_permissions=True)
if service["old_name"] in ["Dropbox Integration", "LDAP Auth"]:
delete_doc("Integration Service", service["old_name"])
new_service_doc = frappe.get_doc({
"doctype": "Integration Service",
"service": service["new_name"],
"enabled": 1
})
new_service_doc.flags.ignore_mandatory = True
new_service_doc.save(ignore_permissions=True)
except Exception:
pass
| mit | 1,341,075,913,696,431,400 | 31.115385 | 94 | 0.686228 | false |
Maaphoo/Retr3d | multiprocessing/process.py | 52 | 9370 | #
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = ['Process', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_current_process._children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_current_process._children):
if p._popen.poll() is not None:
_current_process._children.discard(p)
#
# The `Process` class
#
class Process(object):
'''
Process objects represent activity that is run in a separate process
The class is analagous to `threading.Thread`
'''
_Popen = None
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
assert group is None, 'group argument must be None for now'
count = _current_process._counter.next()
self._identity = _current_process._identity + (count,)
self._authkey = _current_process._authkey
self._daemonic = _current_process._daemonic
self._tempdir = _current_process._tempdir
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._daemonic, \
'daemonic processes are not allowed to have children'
_cleanup()
if self._Popen is not None:
Popen = self._Popen
else:
from .forking import Popen
self._popen = Popen(self)
_current_process._children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_current_process._children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, basestring), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._daemonic
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._daemonic = daemonic
@property
def authkey(self):
return self._authkey
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._authkey = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self._daemonic and ' daemon' or '')
##
def _bootstrap(self):
from . import util
global _current_process
try:
self._children = set()
self._counter = itertools.count(1)
try:
sys.stdin.close()
sys.stdin = open(os.devnull)
except (OSError, ValueError):
pass
_current_process = self
util._finalizer_registry.clear()
util._run_after_forkers()
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit, e:
if not e.args:
exitcode = 1
elif isinstance(e.args[0], int):
exitcode = e.args[0]
else:
sys.stderr.write(str(e.args[0]) + '\n')
sys.stderr.flush()
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
sys.stderr.flush()
traceback.print_exc()
util.info('process exiting with exitcode %d' % exitcode)
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .forking import Popen
if not Popen.thread_is_spawning():
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(Process):
def __init__(self):
self._identity = ()
self._daemonic = False
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._counter = itertools.count(1)
self._children = set()
self._authkey = AuthenticationString(os.urandom(32))
self._tempdir = None
_current_process = _MainProcess()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in signal.__dict__.items():
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
| gpl-3.0 | -7,880,929,767,971,504,000 | 28.009288 | 79 | 0.589861 | false |
tensorflow/tensorflow | tensorflow/python/keras/layers/preprocessing/benchmarks/bucketized_column_dense_benchmark.py | 7 | 2966 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of bucketized columns with dense inputs."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing import discretization
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10 # The number of times to run each benchmark.
BATCH_SIZES = [32, 256]
### KPL AND FC IMPLEMENTATION BENCHMARKS ###
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
max_value = 25.0
bins = np.arange(1.0, max_value)
data = fc_bm.create_data(
max_length, batch_size * NUM_REPEATS, 100000, dtype=float)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=dt.float32))
model.add(discretization.Discretization(bins))
# FC implementation
fc = fcv2.bucketized_column(
fcv2.numeric_column("data"), boundaries=list(bins))
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {"data": data.to_tensor(default_value=0.0)}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {"data": data.to_tensor(default_value=0.0)}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "bucketized|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
| apache-2.0 | -3,177,815,211,221,658,000 | 36.544304 | 101 | 0.719488 | false |
andresailer/DIRAC | MonitoringSystem/private/TypeLoader.py | 3 | 1396 | """
It is used to load the monitoring types.
"""
import re
from DIRAC.Core.Utilities import DIRACSingleton
from DIRAC.Core.Utilities.Plotting.ObjectLoader import loadObjects
from DIRAC.MonitoringSystem.Client.Types.BaseType import BaseType
__RCSID__ = "$Id$"
########################################################################
class TypeLoader( object ):
"""
.. class:: BaseType
:param ~DIRACSingleton.DIRACSingleton metaclass: this is a singleton
:param dict loaded: it stores the loaded classes
:param str path: The location of the classes
:param ~DIRAC.MonitoringSystem.Client.Types.BaseType.BaseType parentCls: it is the parent class
:param regexp: regular expression...
"""
__metaclass__ = DIRACSingleton.DIRACSingleton
__loaded = {}
__path = ""
__parentCls = None
__reFilter = None
########################################################################
def __init__( self ):
"""c'tor
"""
self.__loaded = {}
self.__path = "MonitoringSystem/Client/Types"
self.__parentCls = BaseType
self.__reFilter = re.compile( r".*[a-z1-9]\.py$" )
########################################################################
def getTypes( self ):
"""
It returns all monitoring classes
"""
if not self.__loaded:
self.__loaded = loadObjects( self.__path, self.__reFilter, self.__parentCls )
return self.__loaded
| gpl-3.0 | 5,862,922,863,693,751,000 | 28.083333 | 97 | 0.565903 | false |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/distutils/msvccompiler.py | 1 | 24144 | """distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
__revision__ = "$Id$"
import sys, os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = False
try:
import winreg
_can_read_reg = True
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = True
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while True:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i += 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while True:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i += 1
return d
def convert_mbcs(s):
dec = getattr(s, "decode", None)
if dec is not None:
try:
s = dec("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError as exc: #
raise DistutilsPlatformError(
"""Python was built with Visual Studio 2003;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = s.replace(k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = sys.version.find(prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return "Intel"
j = sys.version.find(")", i)
return sys.version[i+len(prefix):j]
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
class MSVCCompiler(CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if self.__arch == "Intel":
# x86
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.__product = "Visual Studio version %s" % self.__version
else:
# Win64. Assume this was built with the platform SDK
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
self.initialized = False
def initialize(self):
self.__paths = []
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
self.__paths = self.get_msvc_paths("path")
if len(self.__paths) == 0:
raise DistutilsPlatformError("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed."
% self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in os.environ['path'].split(';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = ";".join(self.__paths)
self.preprocess_options = None
if self.__arch == "Intel":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames(self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized:
self.initialize()
compile_info = self._setup_compile(output_dir, macros, include_dirs,
sources, depends, extra_postargs)
macros, objects, extra_postargs, pp_opts, build = compile_info
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname(src)
rc_dir = os.path.dirname(obj)
try:
# first compile .MC to .RC and .H file
self.spawn([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError as msg:
raise CompileError(msg)
continue
else:
# how to handle this file?
raise CompileError("Don't know how to compile %s to %s"
% (src, obj))
output_opt = "/Fo" + obj
try:
self.spawn([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
return objects
def create_static_lib(self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
output_filename = self.library_filename(output_libname,
output_dir=output_dir)
if self._need_link(objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn([self.lib] + lib_args)
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized:
self.initialize()
(objects, output_dir) = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
(libraries, library_dirs, runtime_library_dirs) = fixed_args
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options(self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
self.spawn([self.linker] + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option(self, dir):
raise DistutilsPlatformError(
"don't know how to set runtime library search path for MSVC++")
def library_option(self, lib):
return self.library_filename(lib)
def find_library_file(self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in os.environ['Path'].split(';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return self.__macros.sub(d[path]).split(";")
else:
return d[path].split(";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = ';'.join(p)
if get_build_version() >= 8.0:
log.debug("Importing new compiler from distutils.msvc9compiler")
OldMSVCCompiler = MSVCCompiler
from distutils.msvc9compiler import MSVCCompiler
# get_build_architecture not really relevant now we support cross-compile
from distutils.msvc9compiler import MacroExpander
| gpl-2.0 | -6,819,336,377,355,405,000 | 35.607477 | 99 | 0.516029 | false |
shiftregister/shiftregister | mapping/all_lines.py | 1 | 2627 | # 1|LINESTRING(8.9871812 50.0591294,8.4325862 49.1043102)
# sets of lines from each point to each point longX, latY (in csv is lat/long so we need to swop)
import csv, sys
#epsilon=sys.float_info.epsilon
epsilon=0.01 # precision
#data = sys.argv[1]
#title = sys.argv[2]
def read_csv_file(filename):
data = []
for row in csv.reader(open(filename)):
data.append(row)
return data
# read in csv into dictionary of x/y
data=read_csv_file('MegP_Brandenburg,_Berlin.csv')
ylatitude = []
xlongitude = []
#print data
skip=0 # to skip the first line
for row in data:
if skip>0:
ylatitude.append(float(row[6]))
xlongitude.append(float(row[5]))# for: http://www.megalithic.co.uk/topics.php?countries=1&kmldown=1
skip+=1
# print ylatitude
cnt=0;
for x in range(len(xlongitude)):
# connect point 0 to 1+, 1 to 2+ etc and write ID and LINESTRING
for points in range(len(xlongitude)-1-x):
print str(cnt)+"|LINESTRING("+str(xlongitude[x])+" "+str(ylatitude[x])+","+str(xlongitude[points+x+1])+" "+str(ylatitude[points+x+1])+")"
cnt+=1
def isBetween(ax, ay, bx, by, cx, cy):
crossproduct = (cy - ay) * (bx - ax) - (cx - ax) * (by - ay)
if abs(crossproduct) > epsilon : return False # (or != 0 if using integers)
dotproduct = (cx - ax) * (bx - ax) + (cy - ay)*(by - ay)
if dotproduct < 0 : return False
squaredlengthba = (bx - ax)*(bx - ax) + (by - ay)*(by - ay)
if dotproduct > squaredlengthba: return False
return True
# this works - next step what straight lines run through several points
# or start with (furthest) points from say 0, anything on the way and so on...
# problem is that sites close to each other skew results - removed these
# cnt=0
# for x in range(len(xlongitude)):
# # connect point 0 to 1+, 1 to 2+ etc and write ID and LINESTRING
# for points in range(len(xlongitude)-1):
# for between in range(len(xlongitude)-2):
# if isBetween(xlongitude[x], ylatitude[x], xlongitude[points+1], ylatitude[points+1],xlongitude[between+2], ylatitude[between+2]) and x!=(points+1) and x!=(between+2) and (points+1)!=between+2:
# # and we don't ahve already in dict
# # print "FOUND!"
# # print x, points+1, between+2
# print str(cnt)+"|LINESTRING("+str(xlongitude[x])+" "+str(ylatitude[x])+","+str(xlongitude[points+1])+" "+str(ylatitude[points+1])+","+str(xlongitude[between+2])+" "+str(ylatitude[between+2])+")"
# cnt+=1
# how to take a line (eg. fukushima, chernobyl) and extend this in both directions?
| gpl-3.0 | -1,033,840,676,938,384,400 | 36.528571 | 212 | 0.632661 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.