repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
shinfan/api-client-staging
|
generated/python/gapic-google-cloud-pubsub-v1/google/cloud/gapic/pubsub/v1/subscriber_client.py
|
7
|
49157
|
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/pubsub/v1/pubsub.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.pubsub.v1 Subscriber API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
from google.gax.utils import oneof
import google.gax
from google.cloud.proto.pubsub.v1 import pubsub_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import duration_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
_PageDesc = google.gax.PageDescriptor
class SubscriberClient(object):
"""
The service that an application uses to manipulate subscriptions and to
consume messages from a subscription via the ``Pull`` method.
"""
SERVICE_ADDRESS = 'pubsub.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_subscriptions':
_PageDesc('page_token', 'next_page_token', 'subscriptions'),
'list_snapshots':
_PageDesc('page_token', 'next_page_token', 'snapshots')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_SNAPSHOT_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/snapshots/{snapshot}')
_SUBSCRIPTION_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/subscriptions/{subscription}')
_TOPIC_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/topics/{topic}')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({
'project': project,
})
@classmethod
def snapshot_path(cls, project, snapshot):
"""Returns a fully-qualified snapshot resource name string."""
return cls._SNAPSHOT_PATH_TEMPLATE.render({
'project': project,
'snapshot': snapshot,
})
@classmethod
def subscription_path(cls, project, subscription):
"""Returns a fully-qualified subscription resource name string."""
return cls._SUBSCRIPTION_PATH_TEMPLATE.render({
'project':
project,
'subscription':
subscription,
})
@classmethod
def topic_path(cls, project, topic):
"""Returns a fully-qualified topic resource name string."""
return cls._TOPIC_PATH_TEMPLATE.render({
'project': project,
'topic': topic,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_snapshot_name(cls, snapshot_name):
"""Parses the project from a snapshot resource.
Args:
snapshot_name (string): A fully-qualified path representing a snapshot
resource.
Returns:
A string representing the project.
"""
return cls._SNAPSHOT_PATH_TEMPLATE.match(snapshot_name).get('project')
@classmethod
def match_snapshot_from_snapshot_name(cls, snapshot_name):
"""Parses the snapshot from a snapshot resource.
Args:
snapshot_name (string): A fully-qualified path representing a snapshot
resource.
Returns:
A string representing the snapshot.
"""
return cls._SNAPSHOT_PATH_TEMPLATE.match(snapshot_name).get('snapshot')
@classmethod
def match_project_from_subscription_name(cls, subscription_name):
"""Parses the project from a subscription resource.
Args:
subscription_name (string): A fully-qualified path representing a subscription
resource.
Returns:
A string representing the project.
"""
return cls._SUBSCRIPTION_PATH_TEMPLATE.match(subscription_name).get(
'project')
@classmethod
def match_subscription_from_subscription_name(cls, subscription_name):
"""Parses the subscription from a subscription resource.
Args:
subscription_name (string): A fully-qualified path representing a subscription
resource.
Returns:
A string representing the subscription.
"""
return cls._SUBSCRIPTION_PATH_TEMPLATE.match(subscription_name).get(
'subscription')
@classmethod
def match_project_from_topic_name(cls, topic_name):
"""Parses the project from a topic resource.
Args:
topic_name (string): A fully-qualified path representing a topic
resource.
Returns:
A string representing the project.
"""
return cls._TOPIC_PATH_TEMPLATE.match(topic_name).get('project')
@classmethod
def match_topic_from_topic_name(cls, topic_name):
"""Parses the topic from a topic resource.
Args:
topic_name (string): A fully-qualified path representing a topic
resource.
Returns:
A string representing the topic.
"""
return cls._TOPIC_PATH_TEMPLATE.match(topic_name).get('topic')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A SubscriberClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-pubsub-v1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'subscriber_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.pubsub.v1.Subscriber',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.iam_policy_stub = config.create_stub(
iam_policy_pb2.IAMPolicyStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self.subscriber_stub = config.create_stub(
pubsub_pb2.SubscriberStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._create_subscription = api_callable.create_api_call(
self.subscriber_stub.CreateSubscription,
settings=defaults['create_subscription'])
self._get_subscription = api_callable.create_api_call(
self.subscriber_stub.GetSubscription,
settings=defaults['get_subscription'])
self._update_subscription = api_callable.create_api_call(
self.subscriber_stub.UpdateSubscription,
settings=defaults['update_subscription'])
self._list_subscriptions = api_callable.create_api_call(
self.subscriber_stub.ListSubscriptions,
settings=defaults['list_subscriptions'])
self._delete_subscription = api_callable.create_api_call(
self.subscriber_stub.DeleteSubscription,
settings=defaults['delete_subscription'])
self._modify_ack_deadline = api_callable.create_api_call(
self.subscriber_stub.ModifyAckDeadline,
settings=defaults['modify_ack_deadline'])
self._acknowledge = api_callable.create_api_call(
self.subscriber_stub.Acknowledge, settings=defaults['acknowledge'])
self._pull = api_callable.create_api_call(
self.subscriber_stub.Pull, settings=defaults['pull'])
self._streaming_pull = api_callable.create_api_call(
self.subscriber_stub.StreamingPull,
settings=defaults['streaming_pull'])
self._modify_push_config = api_callable.create_api_call(
self.subscriber_stub.ModifyPushConfig,
settings=defaults['modify_push_config'])
self._list_snapshots = api_callable.create_api_call(
self.subscriber_stub.ListSnapshots,
settings=defaults['list_snapshots'])
self._create_snapshot = api_callable.create_api_call(
self.subscriber_stub.CreateSnapshot,
settings=defaults['create_snapshot'])
self._delete_snapshot = api_callable.create_api_call(
self.subscriber_stub.DeleteSnapshot,
settings=defaults['delete_snapshot'])
self._seek = api_callable.create_api_call(
self.subscriber_stub.Seek, settings=defaults['seek'])
self._set_iam_policy = api_callable.create_api_call(
self.iam_policy_stub.SetIamPolicy,
settings=defaults['set_iam_policy'])
self._get_iam_policy = api_callable.create_api_call(
self.iam_policy_stub.GetIamPolicy,
settings=defaults['get_iam_policy'])
self._test_iam_permissions = api_callable.create_api_call(
self.iam_policy_stub.TestIamPermissions,
settings=defaults['test_iam_permissions'])
# Service calls
def create_subscription(self,
name,
topic,
push_config=None,
ack_deadline_seconds=None,
retain_acked_messages=None,
message_retention_duration=None,
options=None):
"""
Creates a subscription to a given topic.
If the subscription already exists, returns ``ALREADY_EXISTS``.
If the corresponding topic doesn't exist, returns ``NOT_FOUND``.
If the name is not provided in the request, the server will assign a random
name for this subscription on the same project as the topic, conforming
to the
`resource name format <https://cloud.google.com/pubsub/docs/overview#names>`_.
The generated name is populated in the returned Subscription object.
Note that for REST API requests, you must specify a name in the request.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> name = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> topic = client.topic_path('[PROJECT]', '[TOPIC]')
>>> response = client.create_subscription(name, topic)
Args:
name (string): The name of the subscription. It must have the format
``\"projects/{project}/subscriptions/{subscription}\"``. ``{subscription}`` must
start with a letter, and contain only letters (``[A-Za-z]``), numbers
(``[0-9]``), dashes (``-``), underscores (``_``), periods (``.``), tildes (``~``),
plus (``+``) or percent signs (``%``). It must be between 3 and 255 characters
in length, and it must not start with ``\"goog\"``.
topic (string): The name of the topic from which this subscription is receiving messages.
Format is ``projects/{project}/topics/{topic}``.
The value of this field will be ``_deleted-topic_`` if the topic has been
deleted.
push_config (:class:`google.cloud.proto.pubsub.v1.pubsub_pb2.PushConfig`): If push delivery is used with this subscription, this field is
used to configure it. An empty ``pushConfig`` signifies that the subscriber
will pull and ack messages using API methods.
ack_deadline_seconds (int): This value is the maximum time after a subscriber receives a message
before the subscriber should acknowledge the message. After message
delivery but before the ack deadline expires and before the message is
acknowledged, it is an outstanding message and will not be delivered
again during that time (on a best-effort basis).
For pull subscriptions, this value is used as the initial value for the ack
deadline. To override this value for a given message, call
``ModifyAckDeadline`` with the corresponding ``ack_id`` if using
pull.
The minimum custom deadline you can specify is 10 seconds.
The maximum custom deadline you can specify is 600 seconds (10 minutes).
If this parameter is 0, a default value of 10 seconds is used.
For push delivery, this value is also used to set the request timeout for
the call to the push endpoint.
If the subscriber never acknowledges the message, the Pub/Sub
system will eventually redeliver the message.
retain_acked_messages (bool): Indicates whether to retain acknowledged messages. If true, then
messages are not expunged from the subscription's backlog, even if they are
acknowledged, until they fall out of the ``message_retention_duration``
window.
message_retention_duration (:class:`google.protobuf.duration_pb2.Duration`): How long to retain unacknowledged messages in the subscription's backlog,
from the moment a message is published.
If ``retain_acked_messages`` is true, then this also configures the retention
of acknowledged messages, and thus configures how far back in time a ``Seek``
can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10
minutes.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Subscription` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.Subscription(
name=name,
topic=topic,
push_config=push_config,
ack_deadline_seconds=ack_deadline_seconds,
retain_acked_messages=retain_acked_messages,
message_retention_duration=message_retention_duration)
return self._create_subscription(request, options)
def get_subscription(self, subscription, options=None):
"""
Gets the configuration details of a subscription.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> response = client.get_subscription(subscription)
Args:
subscription (string): The name of the subscription to get.
Format is ``projects/{project}/subscriptions/{sub}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Subscription` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.GetSubscriptionRequest(subscription=subscription)
return self._get_subscription(request, options)
def update_subscription(self, subscription, update_mask, options=None):
"""
Updates an existing subscription. Note that certain properties of a
subscription, such as its topic, are not modifiable.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.cloud.proto.pubsub.v1 import pubsub_pb2
>>> from google.protobuf import field_mask_pb2
>>> client = subscriber_client.SubscriberClient()
>>> subscription = pubsub_pb2.Subscription()
>>> update_mask = field_mask_pb2.FieldMask()
>>> response = client.update_subscription(subscription, update_mask)
Args:
subscription (:class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Subscription`): The updated subscription object.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Indicates which fields in the provided subscription to update.
Must be specified and non-empty.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Subscription` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.UpdateSubscriptionRequest(
subscription=subscription, update_mask=update_mask)
return self._update_subscription(request, options)
def list_subscriptions(self, project, page_size=None, options=None):
"""
Lists matching subscriptions.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> client = subscriber_client.SubscriberClient()
>>> project = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_subscriptions(project):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_subscriptions(project, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project (string): The name of the cloud project that subscriptions belong to.
Format is ``projects/{project}``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Subscription` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.ListSubscriptionsRequest(
project=project, page_size=page_size)
return self._list_subscriptions(request, options)
def delete_subscription(self, subscription, options=None):
"""
Deletes an existing subscription. All messages retained in the subscription
are immediately dropped. Calls to ``Pull`` after deletion will return
``NOT_FOUND``. After a subscription is deleted, a new one may be created with
the same name, but the new one has no association with the old
subscription or its topic unless the same topic is specified.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> client.delete_subscription(subscription)
Args:
subscription (string): The subscription to delete.
Format is ``projects/{project}/subscriptions/{sub}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.DeleteSubscriptionRequest(
subscription=subscription)
self._delete_subscription(request, options)
def modify_ack_deadline(self,
subscription,
ack_ids,
ack_deadline_seconds,
options=None):
"""
Modifies the ack deadline for a specific message. This method is useful
to indicate that more time is needed to process a message by the
subscriber, or to make the message available for redelivery if the
processing was interrupted. Note that this does not modify the
subscription-level ``ackDeadlineSeconds`` used for subsequent messages.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> ack_ids = []
>>> ack_deadline_seconds = 0
>>> client.modify_ack_deadline(subscription, ack_ids, ack_deadline_seconds)
Args:
subscription (string): The name of the subscription.
Format is ``projects/{project}/subscriptions/{sub}``.
ack_ids (list[string]): List of acknowledgment IDs.
ack_deadline_seconds (int): The new ack deadline with respect to the time this request was sent to
the Pub/Sub system. For example, if the value is 10, the new
ack deadline will expire 10 seconds after the ``ModifyAckDeadline`` call
was made. Specifying zero may immediately make the message available for
another pull request.
The minimum deadline you can specify is 0 seconds.
The maximum deadline you can specify is 600 seconds (10 minutes).
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.ModifyAckDeadlineRequest(
subscription=subscription,
ack_ids=ack_ids,
ack_deadline_seconds=ack_deadline_seconds)
self._modify_ack_deadline(request, options)
def acknowledge(self, subscription, ack_ids, options=None):
"""
Acknowledges the messages associated with the ``ack_ids`` in the
``AcknowledgeRequest``. The Pub/Sub system can remove the relevant messages
from the subscription.
Acknowledging a message whose ack deadline has expired may succeed,
but such a message may be redelivered later. Acknowledging a message more
than once will not result in an error.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> ack_ids = []
>>> client.acknowledge(subscription, ack_ids)
Args:
subscription (string): The subscription whose message is being acknowledged.
Format is ``projects/{project}/subscriptions/{sub}``.
ack_ids (list[string]): The acknowledgment ID for the messages being acknowledged that was returned
by the Pub/Sub system in the ``Pull`` response. Must not be empty.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.AcknowledgeRequest(
subscription=subscription, ack_ids=ack_ids)
self._acknowledge(request, options)
def pull(self,
subscription,
max_messages,
return_immediately=None,
options=None):
"""
Pulls messages from the server. Returns an empty list if there are no
messages available in the backlog. The server may return ``UNAVAILABLE`` if
there are too many concurrent pull requests pending for the given
subscription.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> max_messages = 0
>>> response = client.pull(subscription, max_messages)
Args:
subscription (string): The subscription from which messages should be pulled.
Format is ``projects/{project}/subscriptions/{sub}``.
max_messages (int): The maximum number of messages returned for this request. The Pub/Sub
system may return fewer than the number specified.
return_immediately (bool): If this field set to true, the system will respond immediately even if
it there are no messages available to return in the ``Pull`` response.
Otherwise, the system may wait (for a bounded amount of time) until at
least one message is available, rather than returning no messages. The
client may cancel the request if it does not wish to wait any longer for
the response.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.PullResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.PullRequest(
subscription=subscription,
max_messages=max_messages,
return_immediately=return_immediately)
return self._pull(request, options)
def streaming_pull(self, requests, options=None):
"""
(EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will
respond with UNIMPLEMENTED errors unless you have been invited to test
this feature. Contact [email protected] with any questions.
Establishes a stream with the server, which sends messages down to the
client. The client streams acknowledgements and ack deadline modifications
back to the server. The server will close the stream and return the status
on any error. The server may close the stream with status ``OK`` to reassign
server-side resources, in which case, the client should re-establish the
stream. ``UNAVAILABLE`` may also be returned in the case of a transient error
(e.g., a server restart). These should also be retried by the client. Flow
control can be achieved by configuring the underlying RPC channel.
EXPERIMENTAL: This method interface might change in the future.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.cloud.proto.pubsub.v1 import pubsub_pb2
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> stream_ack_deadline_seconds = 0
>>> request = pubsub_pb2.StreamingPullRequest(subscription=subscription, stream_ack_deadline_seconds=stream_ack_deadline_seconds)
>>> requests = [request]
>>> for element in client.streaming_pull(requests):
>>> # process element
>>> pass
Args:
requests (iterator[:class:`google.cloud.proto.pubsub.v1.pubsub_pb2.StreamingPullRequest`]): The input objects.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
iterator[:class:`google.cloud.proto.pubsub.v1.pubsub_pb2.StreamingPullResponse`].
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
return self._streaming_pull(requests, options)
def modify_push_config(self, subscription, push_config, options=None):
"""
Modifies the ``PushConfig`` for a specified subscription.
This may be used to change a push subscription to a pull one (signified by
an empty ``PushConfig``) or vice versa, or change the endpoint URL and other
attributes of a push subscription. Messages will accumulate for delivery
continuously through the call regardless of changes to the ``PushConfig``.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.cloud.proto.pubsub.v1 import pubsub_pb2
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> push_config = pubsub_pb2.PushConfig()
>>> client.modify_push_config(subscription, push_config)
Args:
subscription (string): The name of the subscription.
Format is ``projects/{project}/subscriptions/{sub}``.
push_config (:class:`google.cloud.proto.pubsub.v1.pubsub_pb2.PushConfig`): The push configuration for future deliveries.
An empty ``pushConfig`` indicates that the Pub/Sub system should
stop pushing messages from the given subscription and allow
messages to be pulled and acknowledged - effectively pausing
the subscription if ``Pull`` is not called.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.ModifyPushConfigRequest(
subscription=subscription, push_config=push_config)
self._modify_push_config(request, options)
def list_snapshots(self, project, page_size=None, options=None):
"""
Lists the existing snapshots.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> client = subscriber_client.SubscriberClient()
>>> project = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_snapshots(project):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_snapshots(project, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project (string): The name of the cloud project that snapshots belong to.
Format is ``projects/{project}``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Snapshot` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.ListSnapshotsRequest(
project=project, page_size=page_size)
return self._list_snapshots(request, options)
def create_snapshot(self, name, subscription, options=None):
"""
Creates a snapshot from the requested subscription.
If the snapshot already exists, returns ``ALREADY_EXISTS``.
If the requested subscription doesn't exist, returns ``NOT_FOUND``.
If the name is not provided in the request, the server will assign a random
name for this snapshot on the same project as the subscription, conforming
to the
`resource name format <https://cloud.google.com/pubsub/docs/overview#names>`_.
The generated name is populated in the returned Snapshot object.
Note that for REST API requests, you must specify a name in the request.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> name = client.snapshot_path('[PROJECT]', '[SNAPSHOT]')
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> response = client.create_snapshot(name, subscription)
Args:
name (string): Optional user-provided name for this snapshot.
If the name is not provided in the request, the server will assign a random
name for this snapshot on the same project as the subscription.
Note that for REST API requests, you must specify a name.
Format is ``projects/{project}/snapshots/{snap}``.
subscription (string): The subscription whose backlog the snapshot retains.
Specifically, the created snapshot is guaranteed to retain:
(a) The existing backlog on the subscription. More precisely, this is
::
defined as the messages in the subscription's backlog that are
unacknowledged upon the successful completion of the
`CreateSnapshot` request; as well as:
(b) Any messages published to the subscription's topic following the
::
successful completion of the CreateSnapshot request.
Format is ``projects/{project}/subscriptions/{sub}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.Snapshot` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.CreateSnapshotRequest(
name=name, subscription=subscription)
return self._create_snapshot(request, options)
def delete_snapshot(self, snapshot, options=None):
"""
Removes an existing snapshot. All messages retained in the snapshot
are immediately dropped. After a snapshot is deleted, a new one may be
created with the same name, but the new one has no association with the old
snapshot or its subscription, unless the same subscription is specified.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> snapshot = client.snapshot_path('[PROJECT]', '[SNAPSHOT]')
>>> client.delete_snapshot(snapshot)
Args:
snapshot (string): The name of the snapshot to delete.
Format is ``projects/{project}/snapshots/{snap}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = pubsub_pb2.DeleteSnapshotRequest(snapshot=snapshot)
self._delete_snapshot(request, options)
def seek(self, subscription, time=None, snapshot=None, options=None):
"""
Seeks an existing subscription to a point in time or to a given snapshot,
whichever is provided in the request.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> response = client.seek(subscription)
Args:
subscription (string): The subscription to affect.
time (:class:`google.protobuf.timestamp_pb2.Timestamp`): The time to seek to.
Messages retained in the subscription that were published before this
time are marked as acknowledged, and messages retained in the
subscription that were published after this time are marked as
unacknowledged. Note that this operation affects only those messages
retained in the subscription (configured by the combination of
``message_retention_duration`` and ``retain_acked_messages``). For example,
if ``time`` corresponds to a point before the message retention
window (or to a point before the system's notion of the subscription
creation time), only retained messages will be marked as unacknowledged,
and already-expunged messages will not be restored.
snapshot (string): The snapshot to seek to. The snapshot's topic must be the same as that of
the provided subscription.
Format is ``projects/{project}/snapshots/{snap}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.pubsub.v1.pubsub_pb2.SeekResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
oneof.check_oneof(
time=time,
snapshot=snapshot, )
# Create the request object.
request = pubsub_pb2.SeekRequest(
subscription=subscription, time=time, snapshot=snapshot)
return self._seek(request, options)
def set_iam_policy(self, resource, policy, options=None):
"""
Sets the access control policy on the specified resource. Replaces any
existing policy.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> from google.iam.v1 import policy_pb2
>>> client = subscriber_client.SubscriberClient()
>>> resource = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> policy = policy_pb2.Policy()
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (string): REQUIRED: The resource for which the policy is being specified.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
policy (:class:`google.iam.v1.policy_pb2.Policy`): REQUIRED: The complete policy to be applied to the ``resource``. The size of
the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.iam.v1.policy_pb2.Policy` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
return self._set_iam_policy(request, options)
def get_iam_policy(self, resource, options=None):
"""
Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> resource = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> response = client.get_iam_policy(resource)
Args:
resource (string): REQUIRED: The resource for which the policy is being requested.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.iam.v1.policy_pb2.Policy` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
return self._get_iam_policy(request, options)
def test_iam_permissions(self, resource, permissions, options=None):
"""
Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Example:
>>> from google.cloud.gapic.pubsub.v1 import subscriber_client
>>> client = subscriber_client.SubscriberClient()
>>> resource = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>> permissions = []
>>> response = client.test_iam_permissions(resource, permissions)
Args:
resource (string): REQUIRED: The resource for which the policy detail is being requested.
``resource`` is usually specified as a path. For example, a Project
resource is specified as ``projects/{project}``.
permissions (list[string]): The set of permissions to check for the ``resource``. Permissions with
wildcards (such as '*' or 'storage.*') are not allowed. For more
information see
`IAM Overview <https://cloud.google.com/iam/docs/overview#permissions>`_.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
return self._test_iam_permissions(request, options)
|
bsd-3-clause
|
mortii/reddit_instagram_bot
|
instagram_scraper.py
|
1
|
3201
|
import json
import urllib2 as urllib
import re as regex
from bs4 import BeautifulSoup
import streamable
import imgur
class Instagram:
def __init__(self, url):
url_reader = urllib.urlopen(url)
soup = BeautifulSoup(url_reader, 'html.parser')
self.url = url
self.user = self._get_user(soup)
self.title, self.time = self._get_title_and_time(soup)
self.caption = self._get_caption(soup)
self.pic_url = self._get_pic_url(soup)
self.vid_url = self._get_vid_url(soup)
if self.vid_url is not None:
self.media = 'Video'
self.mirror_url = streamable.upload_video(self.vid_url)
else:
self.media = 'Image'
self.mirror_url = imgur.upload_picture(self.pic_url)
def _get_user(self, soup):
tag = soup.findAll("meta", {"content": regex.compile(r'@')})
for tag_attributes in tag:
content = tag_attributes.get('content')
user = self._slice_user(content)
return user
def _slice_user(self, content):
begin = content.find("@")
content = content[begin:]
end = content.find(" ")
user = content[:end]
return user
def _get_title_and_time(self, soup):
tag = soup.findAll("meta", {"content": regex.compile(r'.UTC')})
for tag_attributes in tag:
content = tag_attributes.get("content")
content = self._remove_default_name(content)
title, time = self._slice_title_and_time(content)
return title, time
def _remove_default_name(self, content):
# done in order to avoid having '@username' twice in the mirror header
has_default_name = content.find('by @')
if has_default_name != -1:
content = content.replace("by @", "by ")
return content
def _slice_title_and_time(self, text):
DATETIME_PATTERN = r'[a-zA-Z]+\s[\d]+,\s20[\d]{2}\sat\s[\d:pmam]+\sUTC'
reg = regex.search(DATETIME_PATTERN, text)
time_start = reg.start()
title = text[:time_start - 2]
time = text[time_start:]
return title, time
def _get_caption(self, soup):
all_text = str(soup)
window_data = self._get_window_data(all_text)
rough_caption = self._get_json_caption(window_data)
caption = self._clean_caption(rough_caption)
return caption
def _get_window_data(self, all_text):
WINDOW_START_PATTERN = r'<script type="text/javascript">window._sharedData'
WINDOW_END_PATTERN = r'};</script>'
reg_start = regex.search(WINDOW_START_PATTERN, all_text)
reg_end = regex.search(WINDOW_END_PATTERN, all_text)
begin = reg_start.start() + 52
end = reg_end.start() + 1
window_data = all_text[begin:end]
return window_data
def _get_json_caption(self, window_data):
window_data = json.loads(window_data)
for post in window_data['entry_data']['PostPage']:
try:
return post['media']['caption']
except KeyError:
return ""
def _clean_caption(self, rough_caption):
caption = regex.sub(r'#', r'\#', rough_caption)
caption = regex.sub(r'\n', r'\n\n>', caption)
caption = regex.sub(r'\"', r'"', caption)
return caption
def _get_pic_url(self, soup):
tag = soup.findAll("meta", {"content": regex.compile(r'.jpg\?')})
for pic_url in tag:
return pic_url.get("content")
def _get_vid_url(self, soup):
tag = soup.findAll("meta", {"content": regex.compile(r'.mp4')})
for vid_url in tag:
return vid_url.get("content")
|
gpl-3.0
|
Michal-Fularz/decision_tree
|
decision_trees/own_parfit/plot.py
|
2
|
6724
|
import matplotlib.pyplot as plt
import matplotlib.colorbar as cb
import numpy as np
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
__all__ = ["plot1DGrid", "plot2DGrid", "plot3DGrid", "plotScores"]
def plot1DGrid(scores, paramsToPlot, scoreLabel, vrange):
"""
Makes a line plot of scores, over the parameter to plot
:param scores: A list of scores, estimated using scoreModels
:param paramsToPlot: The parameter to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The yrange of the plot
"""
key = list(paramsToPlot.keys())
plt.figure(figsize=(int(round(len(paramsToPlot[key[0]]) / 1.33)), 6))
plt.plot(np.linspace(0, max(paramsToPlot[key[0]]), len(
paramsToPlot[key[0]])), scores, '-or')
plt.xlabel(key[0])
plt.xticks(np.linspace(0, max(paramsToPlot[key[0]]), len(
paramsToPlot[key[0]])), paramsToPlot[key[0]])
if scoreLabel is not None:
plt.ylabel(scoreLabel)
else:
plt.ylabel('Score')
if vrange is not None:
plt.ylim(vrange[0], vrange[1])
plt.box(on=False)
plt.show()
def plot2DGrid(scores, paramsToPlot, keysToPlot, scoreLabel, vrange):
"""
Plots a heatmap of scores, over the paramsToPlot
:param scores: A list of scores, estimated using parallelizeScore
:param paramsToPlot: The parameters to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The visible range of the heatmap (range you wish the heatmap to be specified over)
"""
scoreGrid = np.reshape(
scores, (len(paramsToPlot[keysToPlot[0]]), len(paramsToPlot[keysToPlot[1]])))
plt.figure(figsize=(int(round(len(paramsToPlot[keysToPlot[1]]) / 1.33)), int(
round(len(paramsToPlot[keysToPlot[0]]) / 1.33))))
if vrange is not None:
plt.imshow(scoreGrid, cmap='jet', vmin=vrange[0], vmax=vrange[1])
else:
plt.imshow(scoreGrid, cmap='jet')
plt.xlabel(keysToPlot[1])
plt.xticks(
np.arange(len(paramsToPlot[keysToPlot[1]])), paramsToPlot[keysToPlot[1]])
plt.ylabel(keysToPlot[0])
plt.yticks(
np.arange(len(paramsToPlot[keysToPlot[0]])), paramsToPlot[keysToPlot[0]])
if scoreLabel is not None:
plt.title(scoreLabel)
else:
plt.title('Score')
plt.colorbar()
plt.box(on=False)
plt.show()
def plot3DGrid(scores, paramsToPlot, keysToPlot, scoreLabel, vrange):
"""
Plots a grid of heatmaps of scores, over the paramsToPlot
:param scores: A list of scores, estimated using parallelizeScore
:param paramsToPlot: The parameters to plot, chosen automatically by plotScores
:param scoreLabel: The specified score label (dependent on scoring metric used)
:param vrange: The visible range of the heatmap (range you wish the heatmap to be specified over)
"""
vmin = np.min(scores)
vmax = np.max(scores)
scoreGrid = np.reshape(scores, (len(paramsToPlot[keysToPlot[0]]), len(
paramsToPlot[keysToPlot[1]]), len(paramsToPlot[keysToPlot[2]])))
smallest_dim = np.argmin(scoreGrid.shape)
if smallest_dim != 2:
scoreGrid = np.swapaxes(scoreGrid, smallest_dim, 2)
keysToPlot[smallest_dim], keysToPlot[2] = keysToPlot[2], keysToPlot[smallest_dim]
nelements = scoreGrid.shape[2]
nrows = np.floor(nelements ** 0.5).astype(int)
ncols = np.ceil(1. * nelements / nrows).astype(int)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex='all', sharey='all', figsize=(int(round(len(
paramsToPlot[keysToPlot[1]]) * ncols * 1.33)), int(round(len(paramsToPlot[keysToPlot[0]]) * nrows * 1.33))))
i = 0
for ax in axes.flat:
if vrange is not None:
im = ax.imshow(scoreGrid[:, :, i], cmap='jet',
vmin=vrange[0], vmax=vrange[1])
else:
im = ax.imshow(scoreGrid[:, :, i],
cmap='jet', vmin=vmin, vmax=vmax)
ax.set_xlabel(keysToPlot[1])
ax.set_xticks(np.arange(len(paramsToPlot[keysToPlot[1]])))
ax.set_xticklabels(paramsToPlot[keysToPlot[1]])
ax.set_ylabel(keysToPlot[0])
ax.set_yticks(np.arange(len(paramsToPlot[keysToPlot[0]])))
ax.set_yticklabels(paramsToPlot[keysToPlot[0]])
ax.set_title(keysToPlot[2] + ' = ' +
str(paramsToPlot[keysToPlot[2]][i]))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
i += 1
if i == nelements:
break
if scoreLabel is not None:
fig.suptitle(scoreLabel, fontsize=18)
else:
fig.suptitle('Score', fontsize=18)
fig.subplots_adjust(right=0.8)
cbar = cb.make_axes(ax, location='right', fraction=0.03)
fig.colorbar(im, cax=cbar[0])
plt.show()
def plotScores(scores, paramGrid, scoreLabel=None, vrange=None):
"""
Makes a plot representing how the scores vary over the parameter grid
Automatically decides whether to use a simple line plot (varying over one parameter)
or a heatmap (varying over two parameters)
:param scores: A list of scores, estimated using scoreModels
:param paramGrid: The parameter grid specified when fitting the models using fitModels
:param scoreLabel: The specified label (dependent on scoring metric used), e.g. 'AUC'
:param vrange: The visible range over which to display the scores
:return:
"""
keys = sorted(list(paramGrid)[0].keys())
uniqParams = dict()
order = dict()
for k in keys:
order[k] = np.unique([params[k] if params[k] is not None else 'None'
for params in list(paramGrid)], return_index=True)[1]
uniqParams[k] = [params[k]
for params in np.asarray(list(paramGrid))[sorted(order[k])]]
keysToPlot = list()
for k in keys:
if len(uniqParams[k]) > 1:
keysToPlot.append(k)
for k in keys:
if k not in keysToPlot:
uniqParams.pop(k, None)
numDim = len(keysToPlot)
if numDim > 3:
print('Too many dimensions to plot.')
elif numDim == 3:
plot3DGrid(scores, uniqParams, keysToPlot, scoreLabel, vrange)
elif numDim == 2:
plot2DGrid(scores, uniqParams, keysToPlot, scoreLabel, vrange)
elif numDim == 1:
plot1DGrid(scores, uniqParams, scoreLabel, vrange)
else:
print('No parameters that vary in the grid')
|
mit
|
thanhacun/odoo
|
addons/crm_profiling/wizard/__init__.py
|
438
|
1080
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import open_questionnaire
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
wkeyword/pip
|
pip/pep425tags.py
|
249
|
4427
|
"""Generate and work with PEP 425 Compatibility Tags."""
from __future__ import absolute_import
import re
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
return ''.join(map(str, sys.version_info[:2]))
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, noarch=False):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
major = sys.version_info[0]
# Support all previous minor Python versions.
for minor in range(sys.version_info[1], -1, -1):
versions.append(''.join(map(str, (major, minor))))
impl = get_abbr_impl()
abis = []
try:
soabi = sysconfig.get_config_var('SOABI')
except IOError as e: # Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
soabi = None
if soabi and soabi.startswith('cpython-'):
abis[0:0] = ['cp' + soabi.split('-')[1]]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = get_platform()
if sys.platform == 'darwin':
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
actual_arches = [actual_arch]
if actual_arch in ('i386', 'ppc'):
actual_arches.append('fat')
if actual_arch in ('i386', 'x86_64'):
actual_arches.append('intel')
if actual_arch in ('i386', 'ppc', 'x86_64'):
actual_arches.append('fat3')
if actual_arch in ('ppc64', 'x86_64'):
actual_arches.append('fat64')
if actual_arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
actual_arches.append('universal')
tpl = '{0}_{1}_%i_%s'.format(name, major)
arches = []
for m in range(int(minor) + 1):
for a in actual_arches:
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# Has binaries, does not use the Python API:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
supported_tags = get_supported()
supported_tags_noarch = get_supported(noarch=True)
|
mit
|
Hasimir/brython
|
www/src/Lib/importlib/abc.py
|
743
|
14595
|
"""Abstract base classes related to import."""
from . import _bootstrap
from . import machinery
try:
import _frozen_importlib
except ImportError as exc:
if exc.name != '_frozen_importlib':
raise
_frozen_importlib = None
import abc
import imp
import marshal
import sys
import tokenize
import warnings
def _register(abstract_cls, *classes):
for cls in classes:
abstract_cls.register(cls)
if _frozen_importlib is not None:
frozen_cls = getattr(_frozen_importlib, cls.__name__)
abstract_cls.register(frozen_cls)
class Finder(metaclass=abc.ABCMeta):
"""Legacy abstract base class for import finders.
It may be subclassed for compatibility with legacy third party
reimplementations of the import system. Otherwise, finder
implementations should derive from the more specific MetaPathFinder
or PathEntryFinder ABCs.
"""
@abc.abstractmethod
def find_module(self, fullname, path=None):
"""An abstract method that should find a module.
The fullname is a str and the optional path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
class MetaPathFinder(Finder):
"""Abstract base class for import finders on sys.meta_path."""
@abc.abstractmethod
def find_module(self, fullname, path):
"""Abstract method which, when implemented, should find a module.
The fullname is a str and the path is a str or None.
Returns a Loader object.
"""
raise NotImplementedError
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by importlib.invalidate_caches().
"""
return NotImplemented
_register(MetaPathFinder, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.PathFinder, machinery.WindowsRegistryFinder)
class PathEntryFinder(Finder):
"""Abstract base class for path entry finders used by PathFinder."""
@abc.abstractmethod
def find_loader(self, fullname):
"""Abstract method which, when implemented, returns a module loader.
The fullname is a str. Returns a 2-tuple of (Loader, portion) where
portion is a sequence of file system locations contributing to part of
a namespace package. The sequence may be empty and the loader may be
None.
"""
raise NotImplementedError
find_module = _bootstrap._find_module_shim
def invalidate_caches(self):
"""An optional method for clearing the finder's cache, if any.
This method is used by PathFinder.invalidate_caches().
"""
return NotImplemented
_register(PathEntryFinder, machinery.FileFinder)
class Loader(metaclass=abc.ABCMeta):
"""Abstract base class for import loaders."""
@abc.abstractmethod
def load_module(self, fullname):
"""Abstract method which when implemented should load a module.
The fullname is a str."""
raise NotImplementedError
@abc.abstractmethod
def module_repr(self, module):
"""Abstract method which when implemented calculates and returns the
given module's repr."""
raise NotImplementedError
class ResourceLoader(Loader):
"""Abstract base class for loaders which can return data from their
back-end storage.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def get_data(self, path):
"""Abstract method which when implemented should return the bytes for
the specified path. The path must be a str."""
raise NotImplementedError
class InspectLoader(Loader):
"""Abstract base class for loaders which support inspection about the
modules they can load.
This ABC represents one of the optional protocols specified by PEP 302.
"""
@abc.abstractmethod
def is_package(self, fullname):
"""Abstract method which when implemented should return whether the
module is a package. The fullname is a str. Returns a bool."""
raise NotImplementedError
@abc.abstractmethod
def get_code(self, fullname):
"""Abstract method which when implemented should return the code object
for the module. The fullname is a str. Returns a types.CodeType."""
raise NotImplementedError
@abc.abstractmethod
def get_source(self, fullname):
"""Abstract method which should return the source code for the
module. The fullname is a str. Returns a str."""
raise NotImplementedError
_register(InspectLoader, machinery.BuiltinImporter, machinery.FrozenImporter,
machinery.ExtensionFileLoader)
class ExecutionLoader(InspectLoader):
"""Abstract base class for loaders that wish to support the execution of
modules as scripts.
This ABC represents one of the optional protocols specified in PEP 302.
"""
@abc.abstractmethod
def get_filename(self, fullname):
"""Abstract method which should return the value that __file__ is to be
set to."""
raise NotImplementedError
class FileLoader(_bootstrap.FileLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class partially implementing the ResourceLoader and
ExecutionLoader ABCs."""
_register(FileLoader, machinery.SourceFileLoader,
machinery.SourcelessFileLoader)
class SourceLoader(_bootstrap.SourceLoader, ResourceLoader, ExecutionLoader):
"""Abstract base class for loading source code (and optionally any
corresponding bytecode).
To support loading from source code, the abstractmethods inherited from
ResourceLoader and ExecutionLoader need to be implemented. To also support
loading from bytecode, the optional methods specified directly by this ABC
is required.
Inherited abstractmethods not implemented in this ABC:
* ResourceLoader.get_data
* ExecutionLoader.get_filename
"""
def path_mtime(self, path):
"""Return the (int) modification time for the path (str)."""
if self.path_stats.__func__ is SourceLoader.path_stats:
raise NotImplementedError
return int(self.path_stats(path)['mtime'])
def path_stats(self, path):
"""Return a metadata dict for the source pointed to by the path (str).
Possible keys:
- 'mtime' (mandatory) is the numeric timestamp of last source
code modification;
- 'size' (optional) is the size in bytes of the source code.
"""
if self.path_mtime.__func__ is SourceLoader.path_mtime:
raise NotImplementedError
return {'mtime': self.path_mtime(path)}
def set_data(self, path, data):
"""Write the bytes to the path (if possible).
Accepts a str path and data as bytes.
Any needed intermediary directories are to be created. If for some
reason the file cannot be written because of permissions, fail
silently.
"""
raise NotImplementedError
_register(SourceLoader, machinery.SourceFileLoader)
class PyLoader(SourceLoader):
"""Implement the deprecated PyLoader ABC in terms of SourceLoader.
This class has been deprecated! It is slated for removal in Python 3.4.
If compatibility with Python 3.1 is not needed then implement the
SourceLoader ABC instead of this class. If Python 3.1 compatibility is
needed, then use the following idiom to have a single class that is
compatible with Python 3.1 onwards::
try:
from importlib.abc import SourceLoader
except ImportError:
from importlib.abc import PyLoader as SourceLoader
class CustomLoader(SourceLoader):
def get_filename(self, fullname):
# Implement ...
def source_path(self, fullname):
'''Implement source_path in terms of get_filename.'''
try:
return self.get_filename(fullname)
except ImportError:
return None
def is_package(self, fullname):
filename = os.path.basename(self.get_filename(fullname))
return os.path.splitext(filename)[0] == '__init__'
"""
@abc.abstractmethod
def is_package(self, fullname):
raise NotImplementedError
@abc.abstractmethod
def source_path(self, fullname):
"""Abstract method. Accepts a str module name and returns the path to
the source code for the module."""
raise NotImplementedError
def get_filename(self, fullname):
"""Implement get_filename in terms of source_path.
As get_filename should only return a source file path there is no
chance of the path not existing but loading still being possible, so
ImportError should propagate instead of being turned into returning
None.
"""
warnings.warn("importlib.abc.PyLoader is deprecated and is "
"slated for removal in Python 3.4; "
"use SourceLoader instead. "
"See the importlib documentation on how to be "
"compatible with Python 3.1 onwards.",
DeprecationWarning)
path = self.source_path(fullname)
if path is None:
raise ImportError(name=fullname)
else:
return path
class PyPycLoader(PyLoader):
"""Abstract base class to assist in loading source and bytecode by
requiring only back-end storage methods to be implemented.
This class has been deprecated! Removal is slated for Python 3.4. Implement
the SourceLoader ABC instead. If Python 3.1 compatibility is needed, see
PyLoader.
The methods get_code, get_source, and load_module are implemented for the
user.
"""
def get_filename(self, fullname):
"""Return the source or bytecode file path."""
path = self.source_path(fullname)
if path is not None:
return path
path = self.bytecode_path(fullname)
if path is not None:
return path
raise ImportError("no source or bytecode path available for "
"{0!r}".format(fullname), name=fullname)
def get_code(self, fullname):
"""Get a code object from source or bytecode."""
warnings.warn("importlib.abc.PyPycLoader is deprecated and slated for "
"removal in Python 3.4; use SourceLoader instead. "
"If Python 3.1 compatibility is required, see the "
"latest documentation for PyLoader.",
DeprecationWarning)
source_timestamp = self.source_mtime(fullname)
# Try to use bytecode if it is available.
bytecode_path = self.bytecode_path(fullname)
if bytecode_path:
data = self.get_data(bytecode_path)
try:
magic = data[:4]
if len(magic) < 4:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
raw_timestamp = data[4:8]
if len(raw_timestamp) < 4:
raise EOFError("bad timestamp in {}".format(fullname))
pyc_timestamp = _bootstrap._r_long(raw_timestamp)
raw_source_size = data[8:12]
if len(raw_source_size) != 4:
raise EOFError("bad file size in {}".format(fullname))
# Source size is unused as the ABC does not provide a way to
# get the size of the source ahead of reading it.
bytecode = data[12:]
# Verify that the magic number is valid.
if imp.get_magic() != magic:
raise ImportError(
"bad magic number in {}".format(fullname),
name=fullname, path=bytecode_path)
# Verify that the bytecode is not stale (only matters when
# there is source to fall back on.
if source_timestamp:
if pyc_timestamp < source_timestamp:
raise ImportError("bytecode is stale", name=fullname,
path=bytecode_path)
except (ImportError, EOFError):
# If source is available give it a shot.
if source_timestamp is not None:
pass
else:
raise
else:
# Bytecode seems fine, so try to use it.
return marshal.loads(bytecode)
elif source_timestamp is None:
raise ImportError("no source or bytecode available to create code "
"object for {0!r}".format(fullname),
name=fullname)
# Use the source.
source_path = self.source_path(fullname)
if source_path is None:
message = "a source path must exist to load {0}".format(fullname)
raise ImportError(message, name=fullname)
source = self.get_data(source_path)
code_object = compile(source, source_path, 'exec', dont_inherit=True)
# Generate bytecode and write it out.
if not sys.dont_write_bytecode:
data = bytearray(imp.get_magic())
data.extend(_bootstrap._w_long(source_timestamp))
data.extend(_bootstrap._w_long(len(source) & 0xFFFFFFFF))
data.extend(marshal.dumps(code_object))
self.write_bytecode(fullname, data)
return code_object
@abc.abstractmethod
def source_mtime(self, fullname):
"""Abstract method. Accepts a str filename and returns an int
modification time for the source of the module."""
raise NotImplementedError
@abc.abstractmethod
def bytecode_path(self, fullname):
"""Abstract method. Accepts a str filename and returns the str pathname
to the bytecode for the module."""
raise NotImplementedError
@abc.abstractmethod
def write_bytecode(self, fullname, bytecode):
"""Abstract method. Accepts a str filename and bytes object
representing the bytecode for the module. Returns a boolean
representing whether the bytecode was written or not."""
raise NotImplementedError
|
bsd-3-clause
|
jmesteve/medical
|
openerp/tests/addons/test_impex/models.py
|
95
|
5855
|
# -*- coding: utf-8 -*-
from openerp.osv import orm, fields
def selection_fn(obj, cr, uid, context=None):
return list(enumerate(["Corge", "Grault", "Wheee", "Moog"]))
def function_fn(model, cr, uid, ids, field_name, arg, context):
return dict((id, 3) for id in ids)
def function_fn_write(model, cr, uid, id, field_name, field_value, fnct_inv_arg, context):
""" just so CreatorCase.export can be used
"""
pass
models = [
('boolean', fields.boolean()),
('integer', fields.integer()),
('float', fields.float()),
('decimal', fields.float(digits=(16, 3))),
('string.bounded', fields.char('unknown', size=16)),
('string.required', fields.char('unknown', size=None, required=True)),
('string', fields.char('unknown', size=None)),
('date', fields.date()),
('datetime', fields.datetime()),
('text', fields.text()),
('selection', fields.selection([(1, "Foo"), (2, "Bar"), (3, "Qux"), (4, '')])),
('selection.function', fields.selection(selection_fn)),
# just relate to an integer
('many2one', fields.many2one('export.integer')),
('one2many', fields.one2many('export.one2many.child', 'parent_id')),
('many2many', fields.many2many('export.many2many.other')),
('function', fields.function(function_fn, fnct_inv=function_fn_write, type="integer")),
# related: specialization of fields.function, should work the same way
# TODO: reference
]
for name, field in models:
attrs = {
'_name': 'export.%s' % name,
'_columns': {
'const': fields.integer(),
'value': field
},
'_defaults': {'const': 4},
'name_get': (lambda self, cr, uid, ids, context=None:
[(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]),
'name_search': (lambda self, cr, uid, name, operator, context=None:
self.name_get(cr, uid,
self.search(cr, uid, [['value', operator, int(name.split(':')[1])]])
, context=context)
if isinstance(name, basestring) and name.split(':')[0] == self._name
else [])
}
NewModel = type(
'Export%s' % ''.join(section.capitalize() for section in name.split('.')),
(orm.Model,),
attrs)
class One2ManyChild(orm.Model):
_name = 'export.one2many.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many'),
'str': fields.char('unknown', size=None),
'value': fields.integer()
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
return (self.name_get(cr, user,
self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
, context=context)
if isinstance(name, basestring) and name.split(':')[0] == self._name
else [])
class One2ManyMultiple(orm.Model):
_name = 'export.one2many.multiple'
_columns = {
'parent_id': fields.many2one('export.one2many.recursive'),
'const': fields.integer(),
'child1': fields.one2many('export.one2many.child.1', 'parent_id'),
'child2': fields.one2many('export.one2many.child.2', 'parent_id'),
}
_defaults = { 'const': 36 }
class One2ManyChildMultiple(orm.Model):
_name = 'export.one2many.multiple.child'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'parent_id': fields.many2one('export.one2many.multiple'),
'str': fields.char('unknown', size=None),
'value': fields.integer()
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
class One2ManyChild1(orm.Model):
_name = 'export.one2many.child.1'
_inherit = 'export.one2many.multiple.child'
class One2ManyChild2(orm.Model):
_name = 'export.one2many.child.2'
_inherit = 'export.one2many.multiple.child'
class Many2ManyChild(orm.Model):
_name = 'export.many2many.other'
# FIXME: orm.py:1161, fix to name_get on m2o field
_rec_name = 'value'
_columns = {
'str': fields.char('unknown', size=None),
'value': fields.integer()
}
def name_get(self, cr, uid, ids, context=None):
return [(record.id, "%s:%s" % (self._name, record.value))
for record in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
return (self.name_get(cr, user,
self.search(cr, user, [['value', operator, int(name.split(':')[1])]])
, context=context)
if isinstance(name, basestring) and name.split(':')[0] == self._name
else [])
class SelectionWithDefault(orm.Model):
_name = 'export.selection.withdefault'
_columns = {
'const': fields.integer(),
'value': fields.selection([(1, "Foo"), (2, "Bar")]),
}
_defaults = {
'const': 4,
'value': 2,
}
class RecO2M(orm.Model):
_name = 'export.one2many.recursive'
_columns = {
'value': fields.integer(),
'child': fields.one2many('export.one2many.multiple', 'parent_id')
}
class OnlyOne(orm.Model):
_name = 'export.unique'
_columns = {
'value': fields.integer(),
}
_sql_constraints = [
('value_unique', 'unique (value)', "The value must be unique"),
]
|
agpl-3.0
|
yhpeng-git/mxnet
|
example/ssd/demo.py
|
9
|
4697
|
import argparse
import tools.find_mxnet
import mxnet as mx
import os
import importlib
import sys
from detect.detector import Detector
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx,
nms_thresh=0.5, force_nms=True):
"""
wrapper for initialize a detector
Parameters:
----------
net : str
test network name
prefix : str
load model prefix
epoch : int
load model epoch
data_shape : int
resize image shape
mean_pixels : tuple (float, float, float)
mean pixel values (R, G, B)
ctx : mx.ctx
running context, mx.cpu() or mx.gpu(?)
force_nms : bool
force suppress different categories
"""
sys.path.append(os.path.join(os.getcwd(), 'symbol'))
if net is not None:
net = importlib.import_module("symbol_" + net) \
.get_symbol(len(CLASSES), nms_thresh, force_nms)
detector = Detector(net, prefix + "_" + str(data_shape), epoch, \
data_shape, mean_pixels, ctx=ctx)
return detector
def parse_args():
parser = argparse.ArgumentParser(description='Single-shot detection network demo')
parser.add_argument('--network', dest='network', type=str, default='vgg16_ssd_300',
choices=['vgg16_ssd_300', 'vgg16_ssd_512'], help='which network to use')
parser.add_argument('--images', dest='images', type=str, default='./data/demo/dog.jpg',
help='run demo with images, use comma(without space) to seperate multiple images')
parser.add_argument('--dir', dest='dir', nargs='?',
help='demo image directory, optional', type=str)
parser.add_argument('--ext', dest='extension', help='image extension, optional',
type=str, nargs='?')
parser.add_argument('--epoch', dest='epoch', help='epoch of trained model',
default=0, type=int)
parser.add_argument('--prefix', dest='prefix', help='trained model prefix',
default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str)
parser.add_argument('--cpu', dest='cpu', help='(override GPU) use CPU to detect',
action='store_true', default=False)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0,
help='GPU device id to detect with')
parser.add_argument('--data-shape', dest='data_shape', type=int, default=300,
help='set image shape')
parser.add_argument('--mean-r', dest='mean_r', type=float, default=123,
help='red mean value')
parser.add_argument('--mean-g', dest='mean_g', type=float, default=117,
help='green mean value')
parser.add_argument('--mean-b', dest='mean_b', type=float, default=104,
help='blue mean value')
parser.add_argument('--thresh', dest='thresh', type=float, default=0.5,
help='object visualize score threshold, default 0.6')
parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.5,
help='non-maximum suppression threshold, default 0.5')
parser.add_argument('--force', dest='force_nms', type=bool, default=True,
help='force non-maximum suppression on different class')
parser.add_argument('--timer', dest='show_timer', type=bool, default=True,
help='show detection time')
parser.add_argument('--deploy', dest='deploy_net', action='store_true', default=False,
help='Load network from json file, rather than from symbol')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.cpu:
ctx = mx.cpu()
else:
ctx = mx.gpu(args.gpu_id)
# parse image list
image_list = [i.strip() for i in args.images.split(',')]
assert len(image_list) > 0, "No valid image specified to detect"
network = None if args.deploy_net else args.network
detector = get_detector(network, args.prefix, args.epoch,
args.data_shape,
(args.mean_r, args.mean_g, args.mean_b),
ctx, args.nms_thresh, args.force_nms)
# run detection
detector.detect_and_visualize(image_list, args.dir, args.extension,
CLASSES, args.thresh, args.show_timer)
|
apache-2.0
|
klausman/scion
|
python/lib/sciond_api/host_info.py
|
3
|
2021
|
# Copyright 2017 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`host_info` --- Host info objects
======================================
"""
# Stdlib
import logging
# External
import capnp # noqa
# SCION
import proto.sciond_capnp as P
from lib.packet.host_addr import HostAddrIPv4, HostAddrIPv6
from lib.packet.packet_base import Cerealizable
from lib.types import AddrType
class HostInfo(Cerealizable): # pragma: no cover
NAME = "HostInfo"
P_CLS = P.HostInfo
@classmethod
def from_values(cls, addrs, port):
"""
Returns a HostInfo object with the specified entries.
:param addrs: The list of HostAddr objects.
:param port: The first hop port.
"""
p = cls.P_CLS.new_message()
if port:
p.port = port
for addr in addrs:
if addr.TYPE == AddrType.IPV4:
p.addrs.ipv4 = addr.pack()
elif addr.TYPE == AddrType.IPV6:
p.addrs.ipv6 = addr.pack()
else:
logging.warning("Unsupported address type: %s" % addr.TYPE)
return cls(p)
def ipv4(self):
if self.p.addrs.ipv4:
return HostAddrIPv4(self.p.addrs.ipv4)
return None
def ipv6(self):
if self.p.addrs.ipv6:
return HostAddrIPv6(self.p.addrs.ipv6)
return None
def short_desc(self):
return ("IPv4: %s IPv6: %s Port: %d" %
(self.ipv4() or "unset", self.ipv6() or "unset", self.p.port))
|
apache-2.0
|
red-hood/calendarserver
|
txweb2/dav/method/report_principal_search_property_set.py
|
1
|
2879
|
# -*- test-case-name: txweb2.dav.test.test_report_expand -*-
##
# Copyright (c) 2006-2015 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, [email protected]
##
"""
WebDAV principal-search-property-set report
"""
__all__ = ["report_DAV__principal_search_property_set"]
from twisted.internet.defer import deferredGenerator
from twext.python.log import Logger
from txweb2 import responsecode
from txdav.xml import element as davxml
from txweb2.http import HTTPError, Response, StatusResponse
from txweb2.stream import MemoryStream
log = Logger()
def report_DAV__principal_search_property_set(self, request, principal_search_property_set):
"""
Generate a principal-search-property-set REPORT. (RFC 3744, section 9.5)
"""
# Verify root element
if not isinstance(principal_search_property_set, davxml.PrincipalSearchPropertySet):
raise ValueError("%s expected as root element, not %s."
% (davxml.PrincipalSearchPropertySet.sname(), principal_search_property_set.sname()))
# Only handle Depth: 0
depth = request.headers.getHeader("depth", "0")
if depth != "0":
log.error("Error in principal-search-property-set REPORT, Depth set to %s" % (depth,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Depth %s not allowed" % (depth,)))
# Get details from the resource
result = self.principalSearchPropertySet()
if result is None:
log.error("Error in principal-search-property-set REPORT not supported on: %s" % (self,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Not allowed on this resource"))
yield Response(code=responsecode.OK, stream=MemoryStream(result.toxml()))
report_DAV__principal_search_property_set = deferredGenerator(report_DAV__principal_search_property_set)
|
apache-2.0
|
mmardini/django
|
tests/generic_views/test_edit.py
|
22
|
17034
|
from __future__ import unicode_literals
from unittest import expectedFailure
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django import forms
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.edit import FormMixin, ModelFormMixin, CreateView
from . import views
from .models import Artist, Author
class FormMixinTests(TestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertEqual(None, default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, 'http://testserver/list/authors/')
class ModelFormMixinTests(TestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_with_interpolated_redirect(self):
res = self.client.post('/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertFalse('object' in res.context)
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@expectedFailure
def test_update_put(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.put('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
# Here is the expected failure. PUT data are not processed in any special
# way by django. So the request will equal to a POST without data, hence
# the form will be invalid and redisplayed with errors (status code 200).
# See also #12635
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.all()[0].pk
self.assertRedirects(res, 'http://testserver/edit/author/%d/update/' % pk)
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertFalse('author' in res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, 'http://testserver/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
|
bsd-3-clause
|
skygenomics/CSBB-v1.0
|
WINDOWS_VERSION/Modules/PCA_Samples.py
|
2
|
2048
|
print '\n''Importing Libraries''\n'
import pip
def install(package):
pip.main(['install', package])
uninstall(numpy)
uninstall(scipy)
install(numpy)
install(scipy)
install(--upgrade,numpy)
install(--upgrade,scipy)
install(pandas)
install(--upgrade,pandas)
install(scikit-learn)
install(--upgrade ,scikit-learn)
import os
import sys
import pylab
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
print '\n'"Reading File"'\n'
file1 = sys.argv[1]
data = pd.read_csv(file1,index_col = 0, sep = '\t')
print '\n'"Creating Indexes" '\n'
data.index.names = ['gene']
data.columns.names = ['samples']
data = pd.DataFrame.transpose(data)
print '\n'"Performing Principal Component Analysis on Samples"'\n'
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit(data)
data1 = pca.transform(data)
data2 = pd.DataFrame(data1)
ratio = pca.explained_variance_ratio_
outfile1 = os.path.splitext(file1)[0]+'_PC_variance'+'.txt'
print '\n' "Writing variance of each Principal Component" + ' ' + 'in' + ' '+ outfile1 + '\n'
with open(os.path.splitext(file1)[0]+'_PC_variance'+'.txt', 'w') as OUT:
for f in range(0,3):
a = ratio[f]*100
string = "PC"+str(f+1)
b = str(string) + ' '+"variance is"+' '+str(a)
OUT.write("%s\n" % str(b))
data2.index = data.index
data2.columns = ['PC1', 'PC2', 'PC3']
data2.head()
outfile2 = os.path.splitext(file1)[0]+'_PCA_Loading_values'+'.txt'
print '\n' "Writing Loading values of Samples in each Principal Component" + ' '+ 'in' + ' '+outfile2 + '\n'
with open(os.path.splitext(file1)[0]+'_PCA_Loading_values'+'.txt', 'w') as OUT1:
OUT1.write('{0}' .format(data2))
print '\n' "Generating PC1 vs PC2 scatter plot" '\n'
ax = data2.plot(kind='scatter', x='PC1', y='PC2', c='c', s=100)
for i, Gene in enumerate(data.index):
ax.annotate(Gene, (data2.iloc[i].PC1, data2.iloc[i].PC2))
a = os.path.splitext(file1)
pylab.savefig(os.path.splitext(file1)[0] + '_PCA_on_Samples' +'.pdf')
|
mit
|
peeyush-tm/check_mk
|
web/plugins/icons/wato.py
|
3
|
2730
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import wato
def wato_link(folder, site, hostname, where):
if not config.wato_enabled:
return ""
if 'X' in html.display_options:
url = "wato.py?folder=%s&host=%s" % \
(html.urlencode(folder), html.urlencode(hostname))
if where == "inventory":
url += "&mode=inventory"
help = _("Edit services")
icon = "services"
else:
url += "&mode=edithost"
help = _("Edit this host")
icon = "wato"
return '<a href="%s">%s</a>' % (url, html.render_icon(icon, help))
else:
return ""
def paint_wato(what, row, tags, custom_vars):
if not wato.may_see_hosts() or html.mobile:
return
filename = row["host_filename"]
if filename.startswith("/wato/") and filename.endswith("hosts.mk"):
wato_folder = filename[6:-8].rstrip("/")
if what == "host":
return wato_link(wato_folder, row["site"], row["host_name"], "edithost")
elif row["service_description"] in [ "Check_MK inventory", "Check_MK Discovery" ]:
return wato_link(wato_folder, row["site"], row["host_name"], "inventory")
multisite_icons.append({
'host_columns': [ "filename" ],
'paint': paint_wato,
})
|
gpl-2.0
|
takeshineshiro/neutron
|
neutron/db/migration/alembic_migrations/ryu_init_ops.py
|
47
|
1206
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial operations for the Ryu plugin
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'tunnelkeylasts',
sa.Column('last_key', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('last_key'))
op.create_table(
'tunnelkeys',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('tunnel_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ),
sa.PrimaryKeyConstraint('tunnel_key'))
|
apache-2.0
|
vaygr/ansible
|
lib/ansible/modules/packaging/os/slackpkg.py
|
14
|
6161
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Kim Nørgaard
# Written by Kim Nørgaard <[email protected]>
# Based on pkgng module written by bleader <[email protected]>
# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: slackpkg
short_description: Package manager for Slackware >= 12.2
description:
- Manage binary packages for Slackware using 'slackpkg' which
is available in versions after 12.2.
version_added: "2.0"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
choices: [ 'present', 'absent', 'latest' ]
required: false
default: present
update_cache:
description:
- update the package database first
required: false
default: false
choices: [ true, false ]
author: Kim Nørgaard (@KimNorgaard)
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''
# Install package foo
- slackpkg:
name: foo
state: present
# Remove packages foo and bar
- slackpkg:
name: foo,bar
state: absent
# Make sure that it is the most updated package
- slackpkg:
name: foo
state: latest
'''
from ansible.module_utils.basic import AnsibleModule
def query_package(module, slackpkg_path, name):
import glob
import platform
machine = platform.machine()
packages = glob.glob("/var/log/packages/%s-*-[%s|noarch]*" % (name,
machine))
if len(packages) > 0:
return True
return False
def remove_packages(module, slackpkg_path, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
remove %s" % (slackpkg_path,
package))
if not module.check_mode and query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if query_package(module, slackpkg_path, package):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
install %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def upgrade_packages(module, slackpkg_path, packages):
install_c = 0
for package in packages:
if not module.check_mode:
rc, out, err = module.run_command("%s -default_answer=y -batch=on \
upgrade %s" % (slackpkg_path,
package))
if not module.check_mode and not query_package(module, slackpkg_path,
package):
module.fail_json(msg="failed to install %s: %s" % (package, out),
stderr=err)
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)"
% (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_cache(module, slackpkg_path):
rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
if rc != 0:
module.fail_json(msg="Could not update package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="installed", choices=['installed', 'removed', 'absent', 'present', 'latest']),
name=dict(aliases=["pkg"], required=True, type='list'),
update_cache=dict(default=False, aliases=["update-cache"],
type='bool'),
),
supports_check_mode=True)
slackpkg_path = module.get_bin_path('slackpkg', True)
p = module.params
pkgs = p['name']
if p["update_cache"]:
update_cache(module, slackpkg_path)
if p['state'] == 'latest':
upgrade_packages(module, slackpkg_path, pkgs)
elif p['state'] in ['present', 'installed']:
install_packages(module, slackpkg_path, pkgs)
elif p["state"] in ['removed', 'absent']:
remove_packages(module, slackpkg_path, pkgs)
if __name__ == '__main__':
main()
|
gpl-3.0
|
khkaminska/bokeh
|
bokeh/compat/mplexporter/renderers/fake_renderer.py
|
64
|
2561
|
from .base import Renderer
class FakeRenderer(Renderer):
"""
Fake Renderer
This is a fake renderer which simply outputs a text tree representing the
elements found in the plot(s). This is used in the unit tests for the
package.
Below are the methods your renderer must implement. You are free to do
anything you wish within the renderer (i.e. build an XML or JSON
representation, call an external API, etc.) Here the renderer just
builds a simple string representation for testing purposes.
"""
def __init__(self):
self.output = ""
def open_figure(self, fig, props):
self.output += "opening figure\n"
def close_figure(self, fig):
self.output += "closing figure\n"
def open_axes(self, ax, props):
self.output += " opening axes\n"
def close_axes(self, ax):
self.output += " closing axes\n"
def open_legend(self, legend, props):
self.output += " opening legend\n"
def close_legend(self, legend):
self.output += " closing legend\n"
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
self.output += " draw text '{0}' {1}\n".format(text, text_type)
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
self.output += " draw path with {0} vertices\n".format(data.shape[0])
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
self.output += " draw image of size {0}\n".format(len(imdata))
class FullFakeRenderer(FakeRenderer):
"""
Renderer with the full complement of methods.
When the following are left undefined, they will be implemented via
other methods in the class. They can be defined explicitly for
more efficient or specialized use within the renderer implementation.
"""
def draw_line(self, data, coordinates, style, label, mplobj=None):
self.output += " draw line with {0} points\n".format(data.shape[0])
def draw_markers(self, data, coordinates, style, label, mplobj=None):
self.output += " draw {0} markers\n".format(data.shape[0])
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
self.output += (" draw path collection "
"with {0} offsets\n".format(offsets.shape[0]))
|
bsd-3-clause
|
hephaex/kernel
|
tools/perf/scripts/python/net_dropmonitor.py
|
2669
|
1738
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
gpl-2.0
|
golismero/golismero-devel
|
thirdparty_libs/nltk/test/doctest_nose_plugin.py
|
12
|
5686
|
# -*- coding: utf-8 -*-
"""
Patched version of nose doctest plugin.
See https://github.com/nose-devs/nose/issues/7
"""
from nose.plugins.doctests import *
class _DoctestFix(Doctest):
def options(self, parser, env):
super(_DoctestFix, self).options(parser, env)
parser.add_option('--doctest-options', action="append",
dest="doctestOptions",
metavar="OPTIONS",
help="Specify options to pass to doctest. " +
"Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
def configure(self, options, config):
super(_DoctestFix, self).configure(options, config)
self.optionflags = 0
if options.doctestOptions:
flags = ",".join(options.doctestOptions).split(',')
for flag in flags:
try:
if flag.startswith('+'):
self.optionflags |= getattr(doctest, flag[1:])
elif flag.startswith('-'):
self.optionflags &= ~getattr(doctest, flag[1:])
else:
raise ValueError(
"Must specify doctest options with starting " +
"'+' or '-'. Got %s" % (flag,))
except AttributeError:
raise ValueError("Unknown doctest option %s" %
(flag[1:],))
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
log.debug("loading from %s", module)
if not self.matches(module.__name__):
log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
log.exception("Attribute error loading from %s", module)
# nose allows module.__test__ = False; doctest does not and throws
# AttributeError
return
if not tests:
log.debug("No tests found in %s", module)
return
tests.sort()
module_file = src(module.__file__)
# FIXME this breaks the id plugin somehow (tests probably don't
# get wrapped in result proxy or something)
cases = []
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
cases.append(DocTestCase(test,
optionflags=self.optionflags,
result_var=self.doctest_result_var))
if cases:
yield self.suiteClass(cases, context=module, can_split=False)
def loadTestsFromFile(self, filename):
"""Load doctests from the file.
Tests are loaded only if filename's extension matches
configured doctest extension.
"""
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = open(filename)
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {'__file__': filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(
fixt_mod, globals(), locals(), ["nop"])
except ImportError, e:
log.debug(
"Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s",
fixt_mod, fixture_context)
if hasattr(fixture_context, 'globs'):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name,
filename=filename, lineno=0)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, 'setup_test', None),
tearDown=getattr(fixture_context, 'teardown_test', None),
result_var=self.doctest_result_var)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
doctests = self.finder.find(obj, module=getmodule(parent), name=name)
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
result_var=self.doctest_result_var)
def _plugin_supports_doctest_options(plugin_cls):
import optparse
plugin = plugin_cls()
parser = optparse.OptionParser()
plugin.options(parser, {})
return parser.has_option('--doctest-options')
if _plugin_supports_doctest_options(Doctest):
class DoctestFix(Doctest):
pass
else:
class DoctestFix(_DoctestFix):
pass
|
gpl-2.0
|
MyAOSP/external_chromium_org
|
third_party/closure_linter/closure_linter/errorrules.py
|
156
|
1379
|
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = '[email protected] (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors. For these,
it returns the value of the jsdoc flag.
"""
return FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)
|
bsd-3-clause
|
apasricha/KVMTrace-kernel-mod
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
jandrest2018/TWJ-2017-A
|
04 Angular/C-Web/node_modules/node-gyp/gyp/pylib/gyp/MSVSProject.py
|
2736
|
6387
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")
|
mit
|
luiscberrocal/homeworkpal
|
homeworkpal_project/employee/tests/factories.py
|
1
|
2598
|
import string
from django.conf import settings
from django.contrib.auth.models import User
from factory import LazyAttribute, lazy_attribute, SubFactory, Iterator
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from faker import Factory as FakerFactory
from employee.models import TENURE_TYPES, Employee, Position, CompanyGroup, CompanyGroupEmployeeAssignment, \
PositionAssignment
__author__ = 'luiscberrocal'
faker = FakerFactory.create()
class UserFactory(DjangoModelFactory):
class Meta:
model = User
first_name = LazyAttribute(lambda x: faker.first_name())
last_name = LazyAttribute(lambda x: faker.last_name())
password = 'user1'
@lazy_attribute
def username(self):
return '%s.%s' % (self.first_name.lower(), self.last_name.lower())
def email(self):
return '%[email protected]' % (self.username())
@classmethod
def _prepare(cls, create, **kwargs):
password = kwargs.pop('password', None)
user = super(UserFactory, cls)._prepare(create, **kwargs)
if password:
user.set_password(password)
if create:
user.save()
return user
class EmployeeFactory(DjangoModelFactory):
class Meta:
model = Employee
user = SubFactory(UserFactory)
middle_name = LazyAttribute(lambda x: faker.first_name())
company_id = FuzzyText(length=7, chars=string.digits)
tenure = Iterator(TENURE_TYPES, getter=lambda c: c[0])
class PositionFactory(DjangoModelFactory):
class Meta:
model = Position
number = FuzzyText(length=6, chars=string.digits)
grade = Iterator(['NM-07', 'NM-09', 'NM-11', 'NM-12', 'NM-13'])
type = Iterator(TENURE_TYPES, getter=lambda c: c[0])
owner = None
class CompanyGroupFactory(DjangoModelFactory):
class Meta:
model = CompanyGroup
name = FuzzyText(length=10, chars=string.ascii_uppercase)
description = 'Company group'
class CompanyGroupEmployeeAssignmentFactory(DjangoModelFactory):
class Meta:
model = CompanyGroupEmployeeAssignment
group = SubFactory(CompanyGroupFactory)
employee = SubFactory(EmployeeFactory)
start_date = LazyAttribute(lambda x: faker.date_time_between(start_date="-30y", end_date="-1y"))
class PositionAssignmentFactory(DjangoModelFactory):
class Meta:
model = PositionAssignment
position = SubFactory(PositionFactory)
employee = SubFactory(EmployeeFactory)
start_date = LazyAttribute(lambda x: faker.date_time_between(start_date="-30y", end_date="-1y"))
|
mit
|
chadoe/xbmc
|
tools/EventClients/lib/python/bt/hid.py
|
181
|
2733
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from bluetooth import *
import fcntl
import bluetooth._bluetooth as _bt
import array
class HID:
def __init__(self, bdaddress=None):
self.cport = 0x11 # HID's control PSM
self.iport = 0x13 # HID' interrupt PSM
self.backlog = 1
self.address = ""
if bdaddress:
self.address = bdaddress
# create the HID control socket
self.csock = BluetoothSocket( L2CAP )
self.csock.bind((self.address, self.cport))
set_l2cap_mtu(self.csock, 64)
self.csock.settimeout(2)
self.csock.listen(self.backlog)
# create the HID interrupt socket
self.isock = BluetoothSocket( L2CAP )
self.isock.bind((self.address, self.iport))
set_l2cap_mtu(self.isock, 64)
self.isock.settimeout(2)
self.isock.listen(self.backlog)
self.connected = False
def listen(self):
try:
(self.client_csock, self.caddress) = self.csock.accept()
print "Accepted Control connection from %s" % self.caddress[0]
(self.client_isock, self.iaddress) = self.isock.accept()
print "Accepted Interrupt connection from %s" % self.iaddress[0]
self.connected = True
return True
except Exception, e:
self.connected = False
return False
def get_local_address(self):
hci = BluetoothSocket( HCI )
fd = hci.fileno()
buf = array.array('B', [0] * 96)
fcntl.ioctl(fd, _bt.HCIGETDEVINFO, buf, 1)
data = struct.unpack_from("H8s6B", buf.tostring())
return data[2:8][::-1]
def get_control_socket(self):
if self.connected:
return (self.client_csock, self.caddress)
else:
return None
def get_interrupt_socket(self):
if self.connected:
return (self.client_isock, self.iaddress)
else:
return None
|
gpl-2.0
|
frankvdp/django
|
django/contrib/gis/gdal/prototypes/raster.py
|
59
|
5690
|
"""
This module houses the ctypes function prototypes for GDAL DataSource (raster)
related data structures.
"""
from ctypes import POINTER, c_bool, c_char_p, c_double, c_int, c_void_p
from functools import partial
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, std_call
from django.contrib.gis.gdal.prototypes.generation import (
chararray_output, const_string_output, double_output, int_output,
void_output, voidptr_output,
)
# For more detail about c function names and definitions see
# https://gdal.org/gdal_8h.html
# https://gdal.org/gdalwarper_8h.html
# https://www.gdal.org/gdal__utils_8h.html
# Prepare partial functions that use cpl error codes
void_output = partial(void_output, cpl=True)
const_string_output = partial(const_string_output, cpl=True)
double_output = partial(double_output, cpl=True)
# Raster Driver Routines
register_all = void_output(std_call('GDALAllRegister'), [], errcheck=False)
get_driver = voidptr_output(std_call('GDALGetDriver'), [c_int])
get_driver_by_name = voidptr_output(std_call('GDALGetDriverByName'), [c_char_p], errcheck=False)
get_driver_count = int_output(std_call('GDALGetDriverCount'), [])
get_driver_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
# Raster Data Source Routines
create_ds = voidptr_output(std_call('GDALCreate'), [c_void_p, c_char_p, c_int, c_int, c_int, c_int, c_void_p])
open_ds = voidptr_output(std_call('GDALOpen'), [c_char_p, c_int])
close_ds = void_output(std_call('GDALClose'), [c_void_p], errcheck=False)
flush_ds = int_output(std_call('GDALFlushCache'), [c_void_p])
copy_ds = voidptr_output(
std_call('GDALCreateCopy'),
[c_void_p, c_char_p, c_void_p, c_int, POINTER(c_char_p), c_void_p, c_void_p]
)
add_band_ds = void_output(std_call('GDALAddBand'), [c_void_p, c_int])
get_ds_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_ds_driver = voidptr_output(std_call('GDALGetDatasetDriver'), [c_void_p])
get_ds_xsize = int_output(std_call('GDALGetRasterXSize'), [c_void_p])
get_ds_ysize = int_output(std_call('GDALGetRasterYSize'), [c_void_p])
get_ds_raster_count = int_output(std_call('GDALGetRasterCount'), [c_void_p])
get_ds_raster_band = voidptr_output(std_call('GDALGetRasterBand'), [c_void_p, c_int])
get_ds_projection_ref = const_string_output(std_call('GDALGetProjectionRef'), [c_void_p])
set_ds_projection_ref = void_output(std_call('GDALSetProjection'), [c_void_p, c_char_p])
get_ds_geotransform = void_output(std_call('GDALGetGeoTransform'), [c_void_p, POINTER(c_double * 6)], errcheck=False)
set_ds_geotransform = void_output(std_call('GDALSetGeoTransform'), [c_void_p, POINTER(c_double * 6)])
get_ds_metadata = chararray_output(std_call('GDALGetMetadata'), [c_void_p, c_char_p], errcheck=False)
set_ds_metadata = void_output(std_call('GDALSetMetadata'), [c_void_p, POINTER(c_char_p), c_char_p])
get_ds_metadata_domain_list = chararray_output(std_call('GDALGetMetadataDomainList'), [c_void_p], errcheck=False)
get_ds_metadata_item = const_string_output(std_call('GDALGetMetadataItem'), [c_void_p, c_char_p, c_char_p])
set_ds_metadata_item = const_string_output(std_call('GDALSetMetadataItem'), [c_void_p, c_char_p, c_char_p, c_char_p])
free_dsl = void_output(std_call('CSLDestroy'), [POINTER(c_char_p)], errcheck=False)
if GDAL_VERSION >= (2, 1):
get_ds_info = const_string_output(std_call('GDALInfo'), [c_void_p, c_void_p])
else:
get_ds_info = None
# Raster Band Routines
band_io = void_output(
std_call('GDALRasterIO'),
[c_void_p, c_int, c_int, c_int, c_int, c_int, c_void_p, c_int, c_int, c_int, c_int, c_int]
)
get_band_xsize = int_output(std_call('GDALGetRasterBandXSize'), [c_void_p])
get_band_ysize = int_output(std_call('GDALGetRasterBandYSize'), [c_void_p])
get_band_index = int_output(std_call('GDALGetBandNumber'), [c_void_p])
get_band_description = const_string_output(std_call('GDALGetDescription'), [c_void_p])
get_band_ds = voidptr_output(std_call('GDALGetBandDataset'), [c_void_p])
get_band_datatype = int_output(std_call('GDALGetRasterDataType'), [c_void_p])
get_band_color_interp = int_output(std_call('GDALGetRasterColorInterpretation'), [c_void_p])
get_band_nodata_value = double_output(std_call('GDALGetRasterNoDataValue'), [c_void_p, POINTER(c_int)])
set_band_nodata_value = void_output(std_call('GDALSetRasterNoDataValue'), [c_void_p, c_double])
if GDAL_VERSION >= (2, 1):
delete_band_nodata_value = void_output(std_call('GDALDeleteRasterNoDataValue'), [c_void_p])
else:
delete_band_nodata_value = None
get_band_statistics = void_output(
std_call('GDALGetRasterStatistics'),
[
c_void_p, c_int, c_int, POINTER(c_double), POINTER(c_double),
POINTER(c_double), POINTER(c_double), c_void_p, c_void_p,
],
)
compute_band_statistics = void_output(
std_call('GDALComputeRasterStatistics'),
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double), POINTER(c_double), c_void_p, c_void_p],
)
# Reprojection routine
reproject_image = void_output(
std_call('GDALReprojectImage'),
[c_void_p, c_char_p, c_void_p, c_char_p, c_int, c_double, c_double, c_void_p, c_void_p, c_void_p]
)
auto_create_warped_vrt = voidptr_output(
std_call('GDALAutoCreateWarpedVRT'),
[c_void_p, c_char_p, c_char_p, c_int, c_double, c_void_p]
)
# Create VSI gdal raster files from in-memory buffers.
# https://gdal.org/cpl__vsi_8h.html
create_vsi_file_from_mem_buffer = voidptr_output(std_call('VSIFileFromMemBuffer'), [c_char_p, c_void_p, c_int, c_int])
get_mem_buffer_from_vsi_file = voidptr_output(std_call('VSIGetMemFileBuffer'), [c_char_p, POINTER(c_int), c_bool])
unlink_vsi_file = int_output(std_call('VSIUnlink'), [c_char_p])
|
bsd-3-clause
|
OpenAcademy-OpenStack/nova-scheduler
|
nova/scheduler/filters/metrics_filter.py
|
12
|
1979
|
# Copyright (c) 2014 Intel, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('weight_setting',
'nova.scheduler.weights.metrics',
group='metrics')
class MetricsFilter(filters.BaseHostFilter):
"""Metrics Filter
This filter is used to filter out those hosts which don't have the
corresponding metrics so these the metrics weigher won't fail due to
these hosts.
"""
def __init__(self):
super(MetricsFilter, self).__init__()
opts = utils.parse_options(CONF.metrics.weight_setting,
sep='=',
converter=float,
name="metrics.weight_setting")
self.keys = [x[0] for x in opts]
def host_passes(self, host_state, filter_properties):
unavail = [i for i in self.keys if i not in host_state.metrics]
if unavail:
LOG.debug(_("%(host_state)s does not have the following "
"metrics: %(metrics)s"),
{'host_state': host_state,
'metrics': ', '.join(unavail)})
return len(unavail) == 0
|
apache-2.0
|
mensler/ansible
|
examples/scripts/uptime.py
|
58
|
3218
|
#!/usr/bin/env python
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.plugins.callback import CallbackBase
# Create a callback object so we can capture the output
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = {}
self.host_unreachable = {}
self.host_failed = {}
def v2_runner_on_unreachable(self, result):
self.host_unreachable[result._host.get_name()] = result
def v2_runner_on_ok(self, result, *args, **kwargs):
self.host_ok[result._host.get_name()] = result
def v2_runner_on_failed(self, result, *args, **kwargs):
self.host_failed[result._host.get_name()] = result
def main():
host_list = ['localhost', 'www.example.com', 'www.google.com']
Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user',
'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args',
'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
# initialize needed objects
variable_manager = VariableManager()
loader = DataLoader()
options = Options(connection='smart', module_path='/usr/share/ansible', forks=100,
remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None,
sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None,
become_user=None, verbosity=None, check=False)
passwords = dict()
# create inventory and pass to var manager
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=host_list)
variable_manager.set_inventory(inventory)
# create play with tasks
play_source = dict(
name = "Ansible Play",
hosts = host_list,
gather_facts = 'no',
tasks = [ dict(action=dict(module='command', args=dict(cmd='/usr/bin/uptime'))) ]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
# actually run it
tqm = None
callback = ResultsCollector()
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
tqm._stdout_callback = callback
result = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
print("UP ***********")
for host, result in callback.host_ok.items():
print('{} >>> {}'.format(host, result._result['stdout']))
print("FAILED *******")
for host, result in callback.host_failed.items():
print('{} >>> {}'.format(host, result._result['msg']))
print("DOWN *********")
for host, result in callback.host_unreachable.items():
print('{} >>> {}'.format(host, result._result['msg']))
if __name__ == '__main__':
main()
|
gpl-3.0
|
guorendong/iridium-browser-ubuntu
|
third_party/WebKit/Source/bindings/scripts/v8_dictionary.py
|
6
|
8246
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate template contexts of dictionaries for both v8 bindings and
implementation classes that are used by blink's core/modules.
"""
import operator
from idl_types import IdlType
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import has_extended_attribute_value
DICTIONARY_H_INCLUDES = frozenset([
'bindings/core/v8/ToV8.h',
'bindings/core/v8/V8Binding.h',
'platform/heap/Handle.h',
])
DICTIONARY_CPP_INCLUDES = frozenset([
'bindings/core/v8/ExceptionState.h',
])
def setter_name_for_dictionary_member(member):
name = v8_utilities.cpp_name(member)
return 'set%s' % v8_utilities.capitalize(name)
def null_setter_name_for_dictionary_member(member):
if member.idl_type.is_nullable:
name = v8_utilities.cpp_name(member)
return 'set%sToNull' % v8_utilities.capitalize(name)
return None
def has_method_name_for_dictionary_member(member):
name = v8_utilities.cpp_name(member)
return 'has%s' % v8_utilities.capitalize(name)
def unwrap_nullable_if_needed(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type
return idl_type
# Context for V8 bindings
def dictionary_context(dictionary, interfaces_info):
includes.clear()
includes.update(DICTIONARY_CPP_INCLUDES)
cpp_class = v8_utilities.cpp_name(dictionary)
context = {
'cpp_class': cpp_class,
'header_includes': set(DICTIONARY_H_INCLUDES),
'members': [member_context(dictionary, member)
for member in sorted(dictionary.members,
key=operator.attrgetter('name'))],
'use_permissive_dictionary_conversion': 'PermissiveDictionaryConversion' in dictionary.extended_attributes,
'v8_class': v8_types.v8_type(cpp_class),
'v8_original_class': v8_types.v8_type(dictionary.name),
}
if dictionary.parent:
IdlType(dictionary.parent).add_includes_for_type()
parent_cpp_class = v8_utilities.cpp_name_from_interfaces_info(
dictionary.parent, interfaces_info)
context.update({
'parent_cpp_class': parent_cpp_class,
'parent_v8_class': v8_types.v8_type(parent_cpp_class),
})
return context
def member_context(dictionary, member):
extended_attributes = member.extended_attributes
idl_type = member.idl_type
idl_type.add_includes_for_type(extended_attributes)
unwrapped_idl_type = unwrap_nullable_if_needed(idl_type)
if member.is_required and member.default_value:
raise Exception(
'Required member %s must not have a default value.' % member.name)
def default_values():
if not member.default_value:
return None, None
if member.default_value.is_null:
return None, 'v8::Null(isolate)'
cpp_default_value = unwrapped_idl_type.literal_cpp_value(
member.default_value)
v8_default_value = unwrapped_idl_type.cpp_value_to_v8_value(
cpp_value=cpp_default_value, isolate='isolate',
creation_context='creationContext')
return cpp_default_value, v8_default_value
cpp_default_value, v8_default_value = default_values()
cpp_name = v8_utilities.cpp_name(member)
return {
'cpp_default_value': cpp_default_value,
'cpp_name': cpp_name,
'cpp_type': unwrapped_idl_type.cpp_type,
'cpp_value_to_v8_value': unwrapped_idl_type.cpp_value_to_v8_value(
cpp_value='impl.%s()' % cpp_name, isolate='isolate',
creation_context='creationContext',
extended_attributes=extended_attributes),
'deprecate_as': v8_utilities.deprecate_as(member),
'enum_type': idl_type.enum_type,
'enum_values': unwrapped_idl_type.enum_values,
'has_method_name': has_method_name_for_dictionary_member(member),
'idl_type': idl_type.base_type,
'is_interface_type': idl_type.is_interface_type and not idl_type.is_dictionary,
'is_nullable': idl_type.is_nullable,
'is_object': unwrapped_idl_type.name == 'Object',
'is_required': member.is_required,
'name': member.name,
'setter_name': setter_name_for_dictionary_member(member),
'null_setter_name': null_setter_name_for_dictionary_member(member),
'v8_default_value': v8_default_value,
'v8_value_to_local_cpp_value': unwrapped_idl_type.v8_value_to_local_cpp_value(
extended_attributes, member.name + 'Value',
member.name, isolate='isolate', use_exception_state=True),
}
# Context for implementation classes
def dictionary_impl_context(dictionary, interfaces_info):
def remove_duplicate_members(members):
# When [ImplementedAs] is used, cpp_name can conflict. For example,
# dictionary D { long foo; [ImplementedAs=foo, DeprecateAs=Foo] long oldFoo; };
# This function removes such duplications, checking they have the same type.
members_dict = {}
for member in members:
cpp_name = member['cpp_name']
duplicated_member = members_dict.get(cpp_name)
if duplicated_member and duplicated_member != member:
raise Exception('Member name conflict: %s' % cpp_name)
members_dict[cpp_name] = member
return sorted(members_dict.values(), key=lambda member: member['cpp_name'])
includes.clear()
header_includes = set(['platform/heap/Handle.h'])
members = [member_impl_context(member, interfaces_info, header_includes)
for member in dictionary.members]
members = remove_duplicate_members(members)
context = {
'header_includes': header_includes,
'cpp_class': v8_utilities.cpp_name(dictionary),
'members': members,
}
if dictionary.parent:
context['parent_cpp_class'] = v8_utilities.cpp_name_from_interfaces_info(
dictionary.parent, interfaces_info)
parent_interface_info = interfaces_info.get(dictionary.parent)
if parent_interface_info:
context['header_includes'].add(
parent_interface_info['include_path'])
return context
def member_impl_context(member, interfaces_info, header_includes):
idl_type = unwrap_nullable_if_needed(member.idl_type)
is_object = idl_type.name == 'Object'
cpp_name = v8_utilities.cpp_name(member)
def getter_expression():
if idl_type.impl_should_use_nullable_container:
return 'm_%s.get()' % cpp_name
return 'm_%s' % cpp_name
def has_method_expression():
if idl_type.impl_should_use_nullable_container or idl_type.is_enum or idl_type.is_string_type or idl_type.is_union_type:
return '!m_%s.isNull()' % cpp_name
elif is_object:
return '!(m_{0}.isEmpty() || m_{0}.isNull() || m_{0}.isUndefined())'.format(cpp_name)
else:
return 'm_%s' % cpp_name
def member_cpp_type():
member_cpp_type = idl_type.cpp_type_args(used_in_cpp_sequence=True)
if idl_type.impl_should_use_nullable_container:
return v8_types.cpp_template_type('Nullable', member_cpp_type)
return member_cpp_type
cpp_default_value = None
if member.default_value and not member.default_value.is_null:
cpp_default_value = idl_type.literal_cpp_value(member.default_value)
header_includes.update(idl_type.impl_includes_for_type(interfaces_info))
return {
'cpp_default_value': cpp_default_value,
'cpp_name': cpp_name,
'getter_expression': getter_expression(),
'has_method_expression': has_method_expression(),
'has_method_name': has_method_name_for_dictionary_member(member),
'is_object': is_object,
'is_traceable': idl_type.is_traceable,
'member_cpp_type': member_cpp_type(),
'null_setter_name': null_setter_name_for_dictionary_member(member),
'rvalue_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'setter_name': setter_name_for_dictionary_member(member),
}
|
bsd-3-clause
|
lz1988/django-web2015
|
tests/regressiontests/http_utils/tests.py
|
52
|
1551
|
from __future__ import unicode_literals
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.http.utils import conditional_content_removal
from django.test import TestCase
class HttpUtilTests(TestCase):
def test_conditional_content_removal(self):
"""
Tests that content is removed from regular and streaming responses with
a status_code of 100-199, 204, 304 or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
|
bsd-2-clause
|
Sonicbids/django
|
django/contrib/gis/geos/prototypes/topology.py
|
48
|
2756
|
"""
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_cascaded_union',
'geos_centroid', 'geos_convexhull', 'geos_difference',
'geos_envelope', 'geos_intersection', 'geos_linemerge',
'geos_pointonsurface', 'geos_preservesimplify', 'geos_simplify',
'geos_symdifference', 'geos_union', 'geos_relate',
'geos_project', 'geos_interpolate', 'geos_project_normalized',
'geos_interpolate_normalized']
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_minus_one, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args, **kwargs):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args:
argtypes += args
func.argtypes = argtypes
func.restype = kwargs.get('restype', GEOM_PTR)
func.errcheck = kwargs.get('errcheck', check_geom)
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Linear referencing routines
geos_project = topology(GEOSFunc('GEOSProject'), GEOM_PTR,
restype=c_double, errcheck=check_minus_one)
geos_interpolate = topology(GEOSFunc('GEOSInterpolate'), c_double)
geos_project_normalized = topology(GEOSFunc('GEOSProjectNormalized'),
GEOM_PTR, restype=c_double, errcheck=check_minus_one)
geos_interpolate_normalized = topology(GEOSFunc('GEOSInterpolateNormalized'), c_double)
|
bsd-3-clause
|
kuanyui/django-haystack
|
haystack/manager.py
|
27
|
3657
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from haystack.query import EmptySearchQuerySet, SearchQuerySet
class SearchIndexManager(object):
def __init__(self, using=None):
super(SearchIndexManager, self).__init__()
self.using = using
def get_search_queryset(self):
"""Returns a new SearchQuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return SearchQuerySet(using=self.using)
def get_empty_query_set(self):
return EmptySearchQuerySet(using=self.using)
def all(self):
return self.get_search_queryset()
def none(self):
return self.get_empty_query_set()
def filter(self, *args, **kwargs):
return self.get_search_queryset().filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return self.get_search_queryset().exclude(*args, **kwargs)
def filter_and(self, *args, **kwargs):
return self.get_search_queryset().filter_and(*args, **kwargs)
def filter_or(self, *args, **kwargs):
return self.get_search_queryset().filter_or(*args, **kwargs)
def order_by(self, *args):
return self.get_search_queryset().order_by(*args)
def highlight(self):
return self.get_search_queryset().highlight()
def boost(self, term, boost):
return self.get_search_queryset().boost(term, boost)
def facet(self, field):
return self.get_search_queryset().facet(field)
def within(self, field, point_1, point_2):
return self.get_search_queryset().within(field, point_1, point_2)
def dwithin(self, field, point, distance):
return self.get_search_queryset().dwithin(field, point, distance)
def distance(self, field, point):
return self.get_search_queryset().distance(field, point)
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
return self.get_search_queryset().date_facet(field, start_date, end_date, gap_by, gap_amount=1)
def query_facet(self, field, query):
return self.get_search_queryset().query_facet(field, query)
def narrow(self, query):
return self.get_search_queryset().narrow(query)
def raw_search(self, query_string, **kwargs):
return self.get_search_queryset().raw_search(query_string, **kwargs)
def load_all(self):
return self.get_search_queryset().load_all()
def auto_query(self, query_string, fieldname='content'):
return self.get_search_queryset().auto_query(query_string, fieldname=fieldname)
def autocomplete(self, **kwargs):
return self.get_search_queryset().autocomplete(**kwargs)
def using(self, connection_name):
return self.get_search_queryset().using(connection_name)
def count(self):
return self.get_search_queryset().count()
def best_match(self):
return self.get_search_queryset().best_match()
def latest(self, date_field):
return self.get_search_queryset().latest(date_field)
def more_like_this(self, model_instance):
return self.get_search_queryset().more_like_this(model_instance)
def facet_counts(self):
return self.get_search_queryset().facet_counts()
def spelling_suggestion(self, preferred_query=None):
return self.get_search_queryset().spelling_suggestion(preferred_query=None)
def values(self, *fields):
return self.get_search_queryset().values(*fields)
def values_list(self, *fields, **kwargs):
return self.get_search_queryset().values_list(*fields, **kwargs)
|
bsd-3-clause
|
windinthew/audacity
|
lib-src/lv2/lv2/waflib/Tools/xlc.py
|
330
|
1175
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_xlc(conf):
cc=conf.find_program(['xlc_r','xlc'],var='CC')
cc=conf.cmd_to_list(cc)
conf.get_xlc_version(cc)
conf.env.CC_NAME='xlc'
conf.env.CC=cc
@conf
def xlc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=[]
v['CC_TGT_F']=['-c','-o']
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=[]
v['CCLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']=[]
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']=[]
v['LINKFLAGS_cprogram']=['-Wl,-brtl']
v['cprogram_PATTERN']='%s'
v['CFLAGS_cshlib']=['-fPIC']
v['LINKFLAGS_cshlib']=['-G','-Wl,-brtl,-bexpfull']
v['cshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cstlib']=[]
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_xlc()
conf.find_ar()
conf.xlc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
gpl-2.0
|
kslundberg/pants
|
tests/python/pants_test/backend/jvm/tasks/jvm_compile/java/jvm_platform_integration_mixin.py
|
5
|
10415
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from subprocess import PIPE, Popen
from textwrap import dedent
from pants.fs.archive import ZIP
from pants.util.contextutil import temporary_dir
from pants_test.testutils.compile_strategy_utils import provide_compile_strategies
class JvmPlatformIntegrationMixin(object):
"""Mixin providing lots of JvmPlatform-related integration tests to java compilers (eg, jmake)."""
def get_pants_compile_args(self):
"""List of arguments to pants that determine what compiler to use.
The compiling task must be the last argument (eg, compile.java, compile.zinc-java).
"""
raise NotImplementedError
def determine_version(self, path):
"""Given the filepath to a class file, invokes the 'file' commandline to find its java version.
:param str path: filepath (eg, tempdir/Foo.class)
:return: A java version string (eg, '1.6').
"""
# Map of target version numbers to their equivalent class file versions, which are different.
version_map = {
'50.0': '1.6',
'51.0': '1.7',
'52.0': '1.8',
}
p = Popen(['file', path], stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
self.assertEqual(0, p.returncode, 'Failed to run file on {}.'.format(path))
match = re.search(r'version (\d+[.]\d+)', out)
self.assertTrue(match is not None, 'Could not determine version for {}'.format(path))
return version_map[match.group(1)]
def _get_jar_class_versions(self, jarname):
path = os.path.join('dist', jarname)
self.assertTrue(os.path.exists(path), '{} does not exist.'.format(path))
class_to_version = {}
with temporary_dir() as tempdir:
ZIP.extract(path, tempdir, filter_func=lambda f: f.endswith('.class'))
for root, dirs, files in os.walk(tempdir):
for name in files:
path = os.path.abspath(os.path.join(root, name))
class_to_version[os.path.relpath(path, tempdir)] = self.determine_version(path)
return class_to_version
def _get_compiled_class_versions(self, strategy, spec, more_args=None):
more_args = more_args or []
jar_name = os.path.basename(spec)
while jar_name.endswith(':'):
jar_name = jar_name[:-1]
if ':' in jar_name:
jar_name = jar_name[jar_name.find(':') + 1:]
with temporary_dir() as cache_dir:
config = {'cache.compile.java': {'write_to': [cache_dir]}}
with temporary_dir(root_dir=self.workdir_root()) as workdir:
pants_run = self.run_pants_with_workdir(
['binary'] + self.get_pants_compile_args()
+ ['--strategy={}'.format(strategy), 'compile.checkstyle', '--skip', spec]
+ more_args,
workdir, config)
self.assert_success(pants_run)
return self._get_jar_class_versions('{}.jar'.format(jar_name))
def assert_class_versions(self, expected, received):
def format_dict(d):
return ''.join('\n {} = {}'.format(key, val) for key, val in sorted(d.items()))
self.assertEqual(expected, received,
'Compiled class versions differed.\n expected: {}\n received: {}'
.format(format_dict(expected), format_dict(received)))
@provide_compile_strategies
def test_compile_java6(self, strategy):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java6'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java6/Six.class': '1.6',
}, self._get_compiled_class_versions(strategy, target_spec))
@provide_compile_strategies
def test_compile_java7(self, strategy):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java7/Seven.class': '1.7',
}, self._get_compiled_class_versions(strategy, target_spec))
@provide_compile_strategies
def test_compile_java7on6(self, strategy):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java7on6'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/java7on6/SevenOnSix.class': '1.7',
'org/pantsbuild/testproject/targetlevels/java6/Six.class': '1.6',
}, self._get_compiled_class_versions(strategy, target_spec))
@provide_compile_strategies
def test_compile_target_coercion(self, strategy):
target_spec = 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/unspecified'
self.assert_class_versions({
'org/pantsbuild/testproject/targetlevels/unspecified/Unspecified.class': '1.7',
'org/pantsbuild/testproject/targetlevels/unspecified/Six.class': '1.6',
}, self._get_compiled_class_versions(strategy, target_spec, more_args=[
'--jvm-platform-validate-check=warn',
'--jvm-platform-default-platform=java7',
]))
def _test_compile(self, target_level, class_name, source_contents, strategy, platform_args=None):
with temporary_dir(root_dir=os.path.abspath('.')) as tmpdir:
with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
f.write(dedent('''
java_library(name='{target_name}',
sources=['{class_name}.java'],
platform='{target_level}',
)
'''.format(target_name=os.path.basename(tmpdir),
class_name=class_name,
target_level=target_level)))
with open(os.path.join(tmpdir, '{}.java'.format(class_name)), 'w') as f:
f.write(source_contents)
platforms = str({
str(target_level): {
'source': str(target_level),
'target': str(target_level),
'args': platform_args or [],
}
})
command = []
command.extend(['--jvm-platform-platforms={}'.format(platforms),
'--jvm-platform-default-platform={}'.format(target_level)])
command.extend(self.get_pants_compile_args())
command.extend(['--strategy={}'.format(strategy), tmpdir])
pants_run = self.run_pants(command)
return pants_run
@provide_compile_strategies
def test_compile_diamond_operator_java7_works(self, strategy):
pants_run = self._test_compile('1.7', 'Diamond', dedent('''
public class Diamond<T> {
public static void main(String[] args) {
Diamond<String> diamond = new Diamond<>();
}
}
'''), strategy)
self.assert_success(pants_run)
@provide_compile_strategies
def test_compile_diamond_operator_java6_fails(self, strategy):
pants_run = self._test_compile('1.6', 'Diamond', dedent('''
public class Diamond<T> {
public static void main(String[] args) {
Diamond<String> diamond = new Diamond<>();
}
}
'''), strategy)
self.assert_failure(pants_run)
@provide_compile_strategies
def test_compile_with_javac_args(self, strategy):
pants_run = self._test_compile('1.7', 'LintyDiamond', dedent('''
public class LintyDiamond<T> {
public static void main(String[] args) {
LintyDiamond<String> diamond = new LintyDiamond<>();
}
}
'''), strategy, platform_args=['-C-Xlint:cast'])
self.assert_success(pants_run)
def test_compile_stale_platform_settings(self):
# Tests that targets are properly re-compiled when their source/target levels change.
# This currently fails because JMAKE doesn't realize that the old class files should be removed.
with temporary_dir(root_dir=os.path.abspath('.')) as tmpdir:
with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
f.write(dedent('''
java_library(name='diamond',
sources=['Diamond.java'],
)
'''))
with open(os.path.join(tmpdir, 'Diamond.java'), 'w') as f:
f.write(dedent('''
public class Diamond<T> {
public static void main(String[] args) {
// The diamond operator <> for generics was introduced in jdk7.
Diamond<String> shinyDiamond = new Diamond<>();
}
}
'''))
platforms = {
'java6': {'source': '6'},
'java7': {'source': '7'},
}
# We run these all in the same working directory, because we're testing caching behavior.
with temporary_dir(root_dir=self.workdir_root()) as workdir:
def compile_diamond(platform):
return self.run_pants_with_workdir(['--jvm-platform-platforms={}'.format(platforms),
'--jvm-platform-default-platform={}'.format(platform),
'-ldebug',
'compile'] + self.get_pants_compile_args() +
['--strategy=isolated',
'{}:diamond'.format(tmpdir)], workdir=workdir)
# We shouldn't be able to compile this with -source=6.
self.assert_failure(compile_diamond('java6'), 'Diamond.java was compiled successfully with '
'java6 starting from a fresh workdir, but '
'that should not be possible.')
# We should be able to compile this with -source=7.
self.assert_success(compile_diamond('java7'), 'Diamond.java failed to compile in java7, '
'which it should be able to.')
# We still shouldn't be able to compile this with -source=6. If the below passes, it means
# that we saved the cached run from java7 and didn't recompile, which is an error.
self.assert_failure(compile_diamond('java6'), 'Diamond.java erroneously compiled in java6,'
' which means that either compilation was'
' skipped due to bad fingerprinting/caching,'
' or the compiler (probably jmake) failed to'
' clean up the previous class from the java7'
' compile.')
|
apache-2.0
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/created_date_v30_rc2.py
|
1
|
3094
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreatedDateV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'datetime'
}
attribute_map = {
'value': 'value'
}
def __init__(self, value=None): # noqa: E501
"""CreatedDateV30Rc2 - a model defined in Swagger""" # noqa: E501
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def value(self):
"""Gets the value of this CreatedDateV30Rc2. # noqa: E501
:return: The value of this CreatedDateV30Rc2. # noqa: E501
:rtype: datetime
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this CreatedDateV30Rc2.
:param value: The value of this CreatedDateV30Rc2. # noqa: E501
:type: datetime
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CreatedDateV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatedDateV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
mit
|
OE-Backup/Infrastructure
|
site-cookbooks/pp2-ps/files/default/fo-prd/mail_app_running.py
|
1
|
1076
|
#!/usr/bin/python
import smtplib
def prompt(prompt):
return raw_input(prompt).strip()
fromaddr = '[email protected]'
toaddrs = '[email protected]'
msg = """From: [email protected]
To: [email protected]
Subject: [PRD] Payment Service app is UP in slave (10.0.111.15).
Payment Service is UP after promotion to SLAVE (10.0.111.15).
Check it put going to: http://tolkien:[email protected]:8080/payment_service/static/restapi.html
"""
print "Message length is " + repr(len(msg))
#Change according to your settings
smtp_server = 'email-smtp.us-east-1.amazonaws.com'
smtp_username = 'AKIAIR24K7DNISBVFDIA'
smtp_password = 'Apy+o8s5eDvKXjtFsNjTL/OCeKP+4gCMUQBRr0CRLX8z'
smtp_port = '587'
#smtp_port = '25'
smtp_do_tls = True
server = smtplib.SMTP(
host = smtp_server,
port = smtp_port,
#timeout = 10 # A Python 2.4 no le gusta esto...
)
server.set_debuglevel(10)
server.starttls()
server.ehlo()
server.login(smtp_username, smtp_password)
server.sendmail(fromaddr, toaddrs, msg)
print server.quit()
|
apache-2.0
|
matbu/ansible-modules-extras
|
cloud/amazon/ec2_vpc_nacl_facts.py
|
45
|
6624
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_vpc_nacl_facts
short_description: Gather facts about Network ACLs in an AWS VPC
description:
- Gather facts about Network ACLs in an AWS VPC
version_added: "2.2"
author: "Brad Davidson (@brandond)"
requires: [ boto3 ]
options:
nacl_ids:
description:
- A list of Network ACL IDs to retrieve facts about.
required: false
default: []
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \
names and values are case sensitive.
required: false
default: {}
notes:
- By default, the module will return all Network ACLs.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all Network ACLs:
- name: Get All NACLs
register: all_nacls
ec2_vpc_nacl_facts:
region: us-west-2
# Retrieve default Network ACLs:
- name: Get Default NACLs
register: default_nacls
ec2_vpc_nacl_facts:
region: us-west-2
filters:
'default': 'true'
'''
RETURN = '''
nacl:
description: Returns an array of complex objects as described below.
returned: success
type: list of complex
contains:
nacl_id:
description: The ID of the Network Access Control List.
returned: always
type: string
vpc_id:
description: The ID of the VPC that the NACL is attached to.
returned: always
type: string
is_default:
description: True if the NACL is the default for its VPC.
returned: always
type: boolean
tags:
description: A dict of tags associated with the NACL.
returned: always
type: dict
subnets:
description: A list of subnet IDs that are associated with the NACL.
returned: always
type: list of string
ingress:
description: A list of NACL ingress rules.
returned: always
type: list of list
egress:
description: A list of NACL egress rules.
returned: always
type: list of list
'''
try:
import boto3
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
def list_ec2_vpc_nacls(connection, module):
nacl_ids = module.params.get("nacl_ids")
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters)
except (ClientError, NoCredentialsError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_nacls = []
for nacl in nacls['NetworkAcls']:
snaked_nacls.append(camel_dict_to_snake_dict(nacl))
# Turn the boto3 result in to ansible friendly tag dictionary
for nacl in snaked_nacls:
if 'tags' in nacl:
nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'])
if 'entries' in nacl:
nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries']
if e['rule_number'] != 32767 and e['egress']]
nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries']
if e['rule_number'] != 32767 and not e['egress']]
del nacl['entries']
if 'associations' in nacl:
nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
del nacl['associations']
if 'network_acl_id' in nacl:
nacl['nacl_id'] = nacl['network_acl_id']
del nacl['network_acl_id']
module.exit_json(nacls=snaked_nacls)
def nacl_entry_to_list(entry):
elist = [entry['rule_number'],
PROTOCOL_NAMES[entry['protocol']],
entry['rule_action'],
entry['cidr_block']
]
if entry['protocol'] == '1':
elist = elist + [-1, -1]
else:
elist = elist + [None, None, None, None]
if 'icmp_type_code' in entry:
elist[4] = entry['icmp_type_code']['type']
elist[5] = entry['icmp_type_code']['code']
if 'port_range' in entry:
elist[6] = entry['port_range']['from']
elist[7] = entry['port_range']['to']
return elist
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
nacl_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['nacl_ids', 'filters']
]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_vpc_nacls(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
SebDieBln/QGIS
|
python/plugins/processing/core/GeoAlgorithm.py
|
1
|
20746
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
GeoAlgorithmExecutionException.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os.path
import traceback
import subprocess
import copy
from PyQt4.QtGui import QIcon
from PyQt4.QtCore import QCoreApplication, QSettings
from qgis.core import QGis, QgsRasterFileWriter
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.SilentProgress import SilentProgress
from processing.core.parameters import ParameterRaster, ParameterVector, ParameterMultipleInput, ParameterTable, Parameter
from processing.core.outputs import OutputVector, OutputRaster, OutputTable, OutputHTML, Output
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools import dataobjects, vector
from processing.tools.system import setTempOutput
from processing.algs.help import shortHelp
class GeoAlgorithm:
def __init__(self):
self._icon = QIcon(os.path.dirname(__file__) + '/../images/alg.png')
# Parameters needed by the algorithm
self.parameters = list()
# Outputs generated by the algorithm
self.outputs = list()
# Name and group for normal toolbox display
self.name, self.i18n_name = '', ''
self.group, self.i18n_group = '', ''
# The crs taken from input layers (if possible), and used when
# loading output layers
self.crs = None
# Change any of the following if your algorithm should not
# appear in the toolbox or modeler
self.showInToolbox = True
self.showInModeler = True
#if true, will show only loaded layers in parameters dialog.
#Also, if True, the algorithm does not run on the modeler
#or batch ptocessing interface
self.allowOnlyOpenedLayers = False
# False if it should not be run a a batch process
self.canRunInBatchMode = True
# To be set by the provider when it loads the algorithm
self.provider = None
# If the algorithm is run as part of a model, the parent model
# can be set in this variable, to allow for customized
# behaviour, in case some operations should be run differently
# when running as part of a model
self.model = None
self.defineCharacteristics()
def getCopy(self):
"""Returns a new instance of this algorithm, ready to be used
for being executed.
"""
newone = copy.copy(self)
newone.parameters = copy.deepcopy(self.parameters)
newone.outputs = copy.deepcopy(self.outputs)
return newone
# methods to overwrite when creating a custom geoalgorithm
def getIcon(self):
return self._icon
@staticmethod
def getDefaultIcon():
return GeoAlgorithm._icon
def _formatHelp(self, text):
return "<h2>%s</h2>%s" % (self.name, "".join(["<p>%s</p>" % s for s in text.split("\n")]))
def help(self):
return False, None
def shortHelp(self):
text = shortHelp.get(self.commandLineName(), None)
if text is not None:
text = self._formatHelp(text)
return text
def processAlgorithm(self, progress):
"""Here goes the algorithm itself.
There is no return value from this method.
A GeoAlgorithmExecutionException should be raised in case
something goes wrong.
"""
pass
def defineCharacteristics(self):
"""Here is where the parameters and outputs should be defined.
"""
pass
def getCustomParametersDialog(self):
"""If the algorithm has a custom parameters dialog, it should
be returned here, ready to be executed.
"""
return None
def getCustomModelerParametersDialog(self, modelAlg, algName=None):
"""If the algorithm has a custom parameters dialog when called
from the modeler, it should be returned here, ready to be
executed.
"""
return None
def getParameterDescriptions(self):
"""Returns a dict with param names as keys and detailed
descriptions of each param as value. These descriptions are
used as tool tips in the parameters dialog.
If a description does not exist, the parameter's
human-readable name is used.
"""
descs = {}
return descs
def checkBeforeOpeningParametersDialog(self):
"""If there is any check to perform before the parameters
dialog is opened, it should be done here.
This method returns an error message string if there is any
problem (for instance, an external app not configured yet),
or None if the parameters dialog can be opened.
Note that this check should also be done in the
processAlgorithm method, since algorithms can be called without
opening the parameters dialog.
"""
return None
def checkParameterValuesBeforeExecuting(self):
"""If there is any check to do before launching the execution
of the algorithm, it should be done here.
If values are not correct, a message should be returned
explaining the problem.
This check is called from the parameters dialog, and also when
calling from the console.
"""
return None
# =========================================================
def execute(self, progress=SilentProgress(), model=None):
"""The method to use to call a processing algorithm.
Although the body of the algorithm is in processAlgorithm(),
it should be called using this method, since it performs
some additional operations.
Raises a GeoAlgorithmExecutionException in case anything goes
wrong.
"""
self.model = model
try:
self.setOutputCRS()
self.resolveTemporaryOutputs()
self.resolveDataObjects()
self.checkOutputFileExtensions()
self.runPreExecutionScript(progress)
self.processAlgorithm(progress)
progress.setPercentage(100)
self.convertUnsupportedFormats(progress)
self.runPostExecutionScript(progress)
except GeoAlgorithmExecutionException as gaee:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, gaee.msg)
raise gaee
except Exception as e:
# If something goes wrong and is not caught in the
# algorithm, we catch it here and wrap it
lines = [self.tr('Uncaught error while executing algorithm')]
lines.append(traceback.format_exc())
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, lines)
raise GeoAlgorithmExecutionException(unicode(e) + self.tr('\nSee log for more details'), lines, e)
def _checkParameterValuesBeforeExecuting(self):
for param in self.parameters:
if isinstance(param, (ParameterRaster, ParameterVector,
ParameterMultipleInput)):
if param.value:
if isinstance(param, ParameterMultipleInput):
inputlayers = param.value.split(';')
else:
inputlayers = [param.value]
for inputlayer in inputlayers:
obj = dataobjects.getObject(inputlayer)
if obj is None:
return "Wrong parameter value: " + param.value
return self.checkParameterValuesBeforeExecuting()
def runPostExecutionScript(self, progress):
scriptFile = ProcessingConfig.getSetting(
ProcessingConfig.POST_EXECUTION_SCRIPT)
self.runHookScript(scriptFile, progress)
def runPreExecutionScript(self, progress):
scriptFile = ProcessingConfig.getSetting(
ProcessingConfig.PRE_EXECUTION_SCRIPT)
self.runHookScript(scriptFile, progress)
def runHookScript(self, filename, progress):
if filename is None or not os.path.exists(filename):
return
try:
script = 'import processing\n'
ns = {}
ns['progress'] = progress
ns['alg'] = self
f = open(filename)
lines = f.readlines()
for line in lines:
script += line
exec(script, ns)
except:
# A wrong script should not cause problems, so we swallow
# all exceptions
pass
def convertUnsupportedFormats(self, progress):
i = 0
progress.setText(self.tr('Converting outputs'))
for out in self.outputs:
if isinstance(out, OutputVector):
if out.compatible is not None:
layer = dataobjects.getObjectFromUri(out.compatible)
if layer is None:
# For the case of memory layer, if the
# getCompatible method has been called
continue
provider = layer.dataProvider()
writer = out.getVectorWriter(
provider.fields(),
provider.geometryType(), layer.crs()
)
features = vector.features(layer)
for feature in features:
writer.addFeature(feature)
elif isinstance(out, OutputRaster):
if out.compatible is not None:
layer = dataobjects.getObjectFromUri(out.compatible)
format = self.getFormatShortNameFromFilename(out.value)
orgFile = out.compatible
destFile = out.value
crsid = layer.crs().authid()
settings = QSettings()
path = unicode(settings.value('/GdalTools/gdalPath', ''))
envval = unicode(os.getenv('PATH'))
if not path.lower() in envval.lower().split(os.pathsep):
envval += '%s%s' % (os.pathsep, path)
os.putenv('PATH', envval)
command = 'gdal_translate -of %s -a_srs %s %s %s' % (format, crsid, orgFile, destFile)
if os.name == 'nt':
command = command.split(" ")
else:
command = [command]
proc = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False,
)
proc.communicate()
elif isinstance(out, OutputTable):
if out.compatible is not None:
layer = dataobjects.getObjectFromUri(out.compatible)
provider = layer.dataProvider()
writer = out.getTableWriter(provider.fields())
features = vector.features(layer)
for feature in features:
writer.addRecord(feature)
progress.setPercentage(100 * i / float(len(self.outputs)))
def getFormatShortNameFromFilename(self, filename):
ext = filename[filename.rfind('.') + 1:]
supported = GdalUtils.getSupportedRasters()
for name in supported.keys():
exts = supported[name]
if ext in exts:
return name
return 'GTiff'
def checkOutputFileExtensions(self):
"""Checks if the values of outputs are correct and have one of
the supported output extensions.
If not, it adds the first one of the supported extensions, which
is assumed to be the default one.
"""
for out in self.outputs:
if not out.hidden and out.value is not None:
if not os.path.isabs(out.value):
continue
if isinstance(out, OutputRaster):
exts = \
dataobjects.getSupportedOutputRasterLayerExtensions()
elif isinstance(out, OutputVector):
exts = \
dataobjects.getSupportedOutputVectorLayerExtensions()
elif isinstance(out, OutputTable):
exts = dataobjects.getSupportedOutputTableExtensions()
elif isinstance(out, OutputHTML):
exts = ['html', 'htm']
else:
continue
idx = out.value.rfind('.')
if idx == -1:
out.value = out.value + '.' + exts[0]
else:
ext = out.value[idx + 1:]
if ext not in exts:
out.value = out.value + '.' + exts[0]
def resolveTemporaryOutputs(self):
"""Sets temporary outputs (output.value = None) with a
temporary file instead.
"""
for out in self.outputs:
if not out.hidden and out.value is None:
setTempOutput(out, self)
def setOutputCRS(self):
layers = dataobjects.getAllLayers()
for param in self.parameters:
if isinstance(param, (ParameterRaster, ParameterVector, ParameterMultipleInput)):
if param.value:
if isinstance(param, ParameterMultipleInput):
inputlayers = param.value.split(';')
else:
inputlayers = [param.value]
for inputlayer in inputlayers:
for layer in layers:
if layer.source() == inputlayer:
self.crs = layer.crs()
return
p = dataobjects.getObjectFromUri(inputlayer)
if p is not None:
self.crs = p.crs()
p = None
return
try:
from qgis.utils import iface
self.crs = iface.mapCanvas().mapSettings().destinationCrs()
except:
pass
def resolveDataObjects(self):
layers = dataobjects.getAllLayers()
for param in self.parameters:
if isinstance(param, (ParameterRaster, ParameterVector, ParameterTable,
ParameterMultipleInput)):
if param.value:
if isinstance(param, ParameterMultipleInput):
inputlayers = param.value.split(';')
else:
inputlayers = [param.value]
for i, inputlayer in enumerate(inputlayers):
for layer in layers:
if layer.name() == inputlayer:
inputlayers[i] = layer.source()
break
param.setValue(";".join(inputlayers))
def checkInputCRS(self):
"""It checks that all input layers use the same CRS. If so,
returns True. False otherwise.
"""
crsList = []
for param in self.parameters:
if isinstance(param, (ParameterRaster, ParameterVector, ParameterMultipleInput)):
if param.value:
if isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
else:
layers = [param.value]
for item in layers:
crs = dataobjects.getObject(item).crs()
if crs not in crsList:
crsList.append(crs)
return len(crsList) < 2
def addOutput(self, output):
# TODO: check that name does not exist
if isinstance(output, Output):
self.outputs.append(output)
def addParameter(self, param):
# TODO: check that name does not exist
if isinstance(param, Parameter):
self.parameters.append(param)
def setParameterValue(self, paramName, value):
for param in self.parameters:
if param.name == paramName:
return param.setValue(value)
def setOutputValue(self, outputName, value):
for out in self.outputs:
if out.name == outputName:
out.setValue(value)
def getVisibleOutputsCount(self):
"""Returns the number of non-hidden outputs.
"""
i = 0
for out in self.outputs:
if not out.hidden:
i += 1
return i
def getVisibleParametersCount(self):
"""Returns the number of non-hidden parameters.
"""
i = 0
for param in self.parameters:
if not param.hidden:
i += 1
return i
def getHTMLOutputsCount(self):
"""Returns the number of HTML outputs.
"""
i = 0
for out in self.outputs:
if isinstance(out, OutputHTML):
i += 1
return i
def getOutputValuesAsDictionary(self):
d = {}
for out in self.outputs:
d[out.name] = out.value
return d
def __str__(self):
s = 'ALGORITHM: ' + self.name + '\n'
for param in self.parameters:
s += '\t' + unicode(param) + '\n'
for out in self.outputs:
s += '\t' + unicode(out) + '\n'
s += '\n'
return s
def commandLineName(self):
name = self.provider.getName().lower() + ':' + self.name.lower()
validChars = \
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789:'
name = ''.join(c for c in name if c in validChars)
return name
def removeOutputFromName(self, name):
for out in self.outputs:
if out.name == name:
self.outputs.remove(out)
def getOutputFromName(self, name):
for out in self.outputs:
if out.name == name:
return out
def getParameterFromName(self, name):
for param in self.parameters:
if param.name == name:
return param
def getParameterValue(self, name):
for param in self.parameters:
if param.name == name:
return param.value
return None
def getOutputValue(self, name):
for out in self.outputs:
if out.name == name:
return out.value
return None
def getAsCommand(self):
"""Returns the command that would run this same algorithm from
the console.
Should return None if the algorithm cannot be run from the
console.
"""
s = 'processing.runalg("' + self.commandLineName() + '",'
for param in self.parameters:
s += param.getValueAsCommandLineParameter() + ','
for out in self.outputs:
if not out.hidden:
s += out.getValueAsCommandLineParameter() + ','
s = s[:-1] + ')'
return s
def tr(self, string, context=''):
if context == '':
context = self.__class__.__name__
return QCoreApplication.translate(context, string)
def trAlgorithm(self, string, context=''):
if context == '':
context = self.__class__.__name__
return string, QCoreApplication.translate(context, string)
|
gpl-2.0
|
IronLanguages/ironpython3
|
Src/StdLib/Lib/encodings/cp424.py
|
272
|
12055
|
""" Python Character Mapping Codec cp424 generated from 'MAPPINGS/VENDORS/MISC/CP424.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp424',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x9c' # 0x04 -> SELECT
'\t' # 0x05 -> HORIZONTAL TABULATION
'\x86' # 0x06 -> REQUIRED NEW LINE
'\x7f' # 0x07 -> DELETE
'\x97' # 0x08 -> GRAPHIC ESCAPE
'\x8d' # 0x09 -> SUPERSCRIPT
'\x8e' # 0x0A -> REPEAT
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x9d' # 0x14 -> RESTORE/ENABLE PRESENTATION
'\x85' # 0x15 -> NEW LINE
'\x08' # 0x16 -> BACKSPACE
'\x87' # 0x17 -> PROGRAM OPERATOR COMMUNICATION
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x92' # 0x1A -> UNIT BACK SPACE
'\x8f' # 0x1B -> CUSTOMER USE ONE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
'\x80' # 0x20 -> DIGIT SELECT
'\x81' # 0x21 -> START OF SIGNIFICANCE
'\x82' # 0x22 -> FIELD SEPARATOR
'\x83' # 0x23 -> WORD UNDERSCORE
'\x84' # 0x24 -> BYPASS OR INHIBIT PRESENTATION
'\n' # 0x25 -> LINE FEED
'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
'\x1b' # 0x27 -> ESCAPE
'\x88' # 0x28 -> SET ATTRIBUTE
'\x89' # 0x29 -> START FIELD EXTENDED
'\x8a' # 0x2A -> SET MODE OR SWITCH
'\x8b' # 0x2B -> CONTROL SEQUENCE PREFIX
'\x8c' # 0x2C -> MODIFY FIELD ATTRIBUTE
'\x05' # 0x2D -> ENQUIRY
'\x06' # 0x2E -> ACKNOWLEDGE
'\x07' # 0x2F -> BELL
'\x90' # 0x30 -> <reserved>
'\x91' # 0x31 -> <reserved>
'\x16' # 0x32 -> SYNCHRONOUS IDLE
'\x93' # 0x33 -> INDEX RETURN
'\x94' # 0x34 -> PRESENTATION POSITION
'\x95' # 0x35 -> TRANSPARENT
'\x96' # 0x36 -> NUMERIC BACKSPACE
'\x04' # 0x37 -> END OF TRANSMISSION
'\x98' # 0x38 -> SUBSCRIPT
'\x99' # 0x39 -> INDENT TABULATION
'\x9a' # 0x3A -> REVERSE FORM FEED
'\x9b' # 0x3B -> CUSTOMER USE THREE
'\x14' # 0x3C -> DEVICE CONTROL FOUR
'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
'\x9e' # 0x3E -> <reserved>
'\x1a' # 0x3F -> SUBSTITUTE
' ' # 0x40 -> SPACE
'\u05d0' # 0x41 -> HEBREW LETTER ALEF
'\u05d1' # 0x42 -> HEBREW LETTER BET
'\u05d2' # 0x43 -> HEBREW LETTER GIMEL
'\u05d3' # 0x44 -> HEBREW LETTER DALET
'\u05d4' # 0x45 -> HEBREW LETTER HE
'\u05d5' # 0x46 -> HEBREW LETTER VAV
'\u05d6' # 0x47 -> HEBREW LETTER ZAYIN
'\u05d7' # 0x48 -> HEBREW LETTER HET
'\u05d8' # 0x49 -> HEBREW LETTER TET
'\xa2' # 0x4A -> CENT SIGN
'.' # 0x4B -> FULL STOP
'<' # 0x4C -> LESS-THAN SIGN
'(' # 0x4D -> LEFT PARENTHESIS
'+' # 0x4E -> PLUS SIGN
'|' # 0x4F -> VERTICAL LINE
'&' # 0x50 -> AMPERSAND
'\u05d9' # 0x51 -> HEBREW LETTER YOD
'\u05da' # 0x52 -> HEBREW LETTER FINAL KAF
'\u05db' # 0x53 -> HEBREW LETTER KAF
'\u05dc' # 0x54 -> HEBREW LETTER LAMED
'\u05dd' # 0x55 -> HEBREW LETTER FINAL MEM
'\u05de' # 0x56 -> HEBREW LETTER MEM
'\u05df' # 0x57 -> HEBREW LETTER FINAL NUN
'\u05e0' # 0x58 -> HEBREW LETTER NUN
'\u05e1' # 0x59 -> HEBREW LETTER SAMEKH
'!' # 0x5A -> EXCLAMATION MARK
'$' # 0x5B -> DOLLAR SIGN
'*' # 0x5C -> ASTERISK
')' # 0x5D -> RIGHT PARENTHESIS
';' # 0x5E -> SEMICOLON
'\xac' # 0x5F -> NOT SIGN
'-' # 0x60 -> HYPHEN-MINUS
'/' # 0x61 -> SOLIDUS
'\u05e2' # 0x62 -> HEBREW LETTER AYIN
'\u05e3' # 0x63 -> HEBREW LETTER FINAL PE
'\u05e4' # 0x64 -> HEBREW LETTER PE
'\u05e5' # 0x65 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0x66 -> HEBREW LETTER TSADI
'\u05e7' # 0x67 -> HEBREW LETTER QOF
'\u05e8' # 0x68 -> HEBREW LETTER RESH
'\u05e9' # 0x69 -> HEBREW LETTER SHIN
'\xa6' # 0x6A -> BROKEN BAR
',' # 0x6B -> COMMA
'%' # 0x6C -> PERCENT SIGN
'_' # 0x6D -> LOW LINE
'>' # 0x6E -> GREATER-THAN SIGN
'?' # 0x6F -> QUESTION MARK
'\ufffe' # 0x70 -> UNDEFINED
'\u05ea' # 0x71 -> HEBREW LETTER TAV
'\ufffe' # 0x72 -> UNDEFINED
'\ufffe' # 0x73 -> UNDEFINED
'\xa0' # 0x74 -> NO-BREAK SPACE
'\ufffe' # 0x75 -> UNDEFINED
'\ufffe' # 0x76 -> UNDEFINED
'\ufffe' # 0x77 -> UNDEFINED
'\u2017' # 0x78 -> DOUBLE LOW LINE
'`' # 0x79 -> GRAVE ACCENT
':' # 0x7A -> COLON
'#' # 0x7B -> NUMBER SIGN
'@' # 0x7C -> COMMERCIAL AT
"'" # 0x7D -> APOSTROPHE
'=' # 0x7E -> EQUALS SIGN
'"' # 0x7F -> QUOTATION MARK
'\ufffe' # 0x80 -> UNDEFINED
'a' # 0x81 -> LATIN SMALL LETTER A
'b' # 0x82 -> LATIN SMALL LETTER B
'c' # 0x83 -> LATIN SMALL LETTER C
'd' # 0x84 -> LATIN SMALL LETTER D
'e' # 0x85 -> LATIN SMALL LETTER E
'f' # 0x86 -> LATIN SMALL LETTER F
'g' # 0x87 -> LATIN SMALL LETTER G
'h' # 0x88 -> LATIN SMALL LETTER H
'i' # 0x89 -> LATIN SMALL LETTER I
'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\xb1' # 0x8F -> PLUS-MINUS SIGN
'\xb0' # 0x90 -> DEGREE SIGN
'j' # 0x91 -> LATIN SMALL LETTER J
'k' # 0x92 -> LATIN SMALL LETTER K
'l' # 0x93 -> LATIN SMALL LETTER L
'm' # 0x94 -> LATIN SMALL LETTER M
'n' # 0x95 -> LATIN SMALL LETTER N
'o' # 0x96 -> LATIN SMALL LETTER O
'p' # 0x97 -> LATIN SMALL LETTER P
'q' # 0x98 -> LATIN SMALL LETTER Q
'r' # 0x99 -> LATIN SMALL LETTER R
'\ufffe' # 0x9A -> UNDEFINED
'\ufffe' # 0x9B -> UNDEFINED
'\ufffe' # 0x9C -> UNDEFINED
'\xb8' # 0x9D -> CEDILLA
'\ufffe' # 0x9E -> UNDEFINED
'\xa4' # 0x9F -> CURRENCY SIGN
'\xb5' # 0xA0 -> MICRO SIGN
'~' # 0xA1 -> TILDE
's' # 0xA2 -> LATIN SMALL LETTER S
't' # 0xA3 -> LATIN SMALL LETTER T
'u' # 0xA4 -> LATIN SMALL LETTER U
'v' # 0xA5 -> LATIN SMALL LETTER V
'w' # 0xA6 -> LATIN SMALL LETTER W
'x' # 0xA7 -> LATIN SMALL LETTER X
'y' # 0xA8 -> LATIN SMALL LETTER Y
'z' # 0xA9 -> LATIN SMALL LETTER Z
'\ufffe' # 0xAA -> UNDEFINED
'\ufffe' # 0xAB -> UNDEFINED
'\ufffe' # 0xAC -> UNDEFINED
'\ufffe' # 0xAD -> UNDEFINED
'\ufffe' # 0xAE -> UNDEFINED
'\xae' # 0xAF -> REGISTERED SIGN
'^' # 0xB0 -> CIRCUMFLEX ACCENT
'\xa3' # 0xB1 -> POUND SIGN
'\xa5' # 0xB2 -> YEN SIGN
'\xb7' # 0xB3 -> MIDDLE DOT
'\xa9' # 0xB4 -> COPYRIGHT SIGN
'\xa7' # 0xB5 -> SECTION SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
'[' # 0xBA -> LEFT SQUARE BRACKET
']' # 0xBB -> RIGHT SQUARE BRACKET
'\xaf' # 0xBC -> MACRON
'\xa8' # 0xBD -> DIAERESIS
'\xb4' # 0xBE -> ACUTE ACCENT
'\xd7' # 0xBF -> MULTIPLICATION SIGN
'{' # 0xC0 -> LEFT CURLY BRACKET
'A' # 0xC1 -> LATIN CAPITAL LETTER A
'B' # 0xC2 -> LATIN CAPITAL LETTER B
'C' # 0xC3 -> LATIN CAPITAL LETTER C
'D' # 0xC4 -> LATIN CAPITAL LETTER D
'E' # 0xC5 -> LATIN CAPITAL LETTER E
'F' # 0xC6 -> LATIN CAPITAL LETTER F
'G' # 0xC7 -> LATIN CAPITAL LETTER G
'H' # 0xC8 -> LATIN CAPITAL LETTER H
'I' # 0xC9 -> LATIN CAPITAL LETTER I
'\xad' # 0xCA -> SOFT HYPHEN
'\ufffe' # 0xCB -> UNDEFINED
'\ufffe' # 0xCC -> UNDEFINED
'\ufffe' # 0xCD -> UNDEFINED
'\ufffe' # 0xCE -> UNDEFINED
'\ufffe' # 0xCF -> UNDEFINED
'}' # 0xD0 -> RIGHT CURLY BRACKET
'J' # 0xD1 -> LATIN CAPITAL LETTER J
'K' # 0xD2 -> LATIN CAPITAL LETTER K
'L' # 0xD3 -> LATIN CAPITAL LETTER L
'M' # 0xD4 -> LATIN CAPITAL LETTER M
'N' # 0xD5 -> LATIN CAPITAL LETTER N
'O' # 0xD6 -> LATIN CAPITAL LETTER O
'P' # 0xD7 -> LATIN CAPITAL LETTER P
'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
'R' # 0xD9 -> LATIN CAPITAL LETTER R
'\xb9' # 0xDA -> SUPERSCRIPT ONE
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\\' # 0xE0 -> REVERSE SOLIDUS
'\xf7' # 0xE1 -> DIVISION SIGN
'S' # 0xE2 -> LATIN CAPITAL LETTER S
'T' # 0xE3 -> LATIN CAPITAL LETTER T
'U' # 0xE4 -> LATIN CAPITAL LETTER U
'V' # 0xE5 -> LATIN CAPITAL LETTER V
'W' # 0xE6 -> LATIN CAPITAL LETTER W
'X' # 0xE7 -> LATIN CAPITAL LETTER X
'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
'\xb2' # 0xEA -> SUPERSCRIPT TWO
'\ufffe' # 0xEB -> UNDEFINED
'\ufffe' # 0xEC -> UNDEFINED
'\ufffe' # 0xED -> UNDEFINED
'\ufffe' # 0xEE -> UNDEFINED
'\ufffe' # 0xEF -> UNDEFINED
'0' # 0xF0 -> DIGIT ZERO
'1' # 0xF1 -> DIGIT ONE
'2' # 0xF2 -> DIGIT TWO
'3' # 0xF3 -> DIGIT THREE
'4' # 0xF4 -> DIGIT FOUR
'5' # 0xF5 -> DIGIT FIVE
'6' # 0xF6 -> DIGIT SIX
'7' # 0xF7 -> DIGIT SEVEN
'8' # 0xF8 -> DIGIT EIGHT
'9' # 0xF9 -> DIGIT NINE
'\xb3' # 0xFA -> SUPERSCRIPT THREE
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\ufffe' # 0xFD -> UNDEFINED
'\ufffe' # 0xFE -> UNDEFINED
'\x9f' # 0xFF -> EIGHT ONES
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
apache-2.0
|
mdhaman/superdesk-core
|
superdesk/__init__.py
|
2
|
5286
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Superdesk"""
import blinker
import logging as logging_lib
from flask import abort, json, Blueprint, current_app as app # noqa
from flask_script import Command as BaseCommand, Option # noqa
from werkzeug.exceptions import HTTPException
from eve.utils import config # noqa
from eve.methods.common import document_link # noqa
from .eve_backend import EveBackend
from .datalayer import SuperdeskDataLayer # noqa
from .services import BaseService as Service # noqa
from .resource import Resource # noqa
from .privilege import privilege, intrinsic_privilege, get_intrinsic_privileges # noqa
from .workflow import * # noqa
from .signals import * # noqa
__version__ = '1.28'
API_NAME = 'Superdesk API'
SCHEMA_VERSION = 0
DOMAIN = {}
COMMANDS = {}
JINJA_FILTERS = dict()
app_components = dict()
app_models = dict()
resources = dict()
eve_backend = EveBackend()
default_user_preferences = dict()
default_session_preferences = dict()
logger = logging_lib.getLogger(__name__)
class Command(BaseCommand):
"""Superdesk Command.
The Eve framework changes introduced with https://github.com/nicolaiarocci/eve/issues/213 make the commands fail.
Reason being the flask-script's run the commands using test_request_context() which is invalid.
That's the reason we are inheriting the Flask-Script's Command to overcome this issue.
"""
def __call__(self, _app=None, *args, **kwargs):
try:
with app.app_context():
res = self.run(*args, **kwargs)
logger.info('Command finished with: {}'.format(res))
return 0
except Exception as ex:
logger.info('Uhoh, an exception occured while running the command...')
logger.exception(ex)
return 1
def get_headers(self, environ=None):
"""Fix CORS for abort responses.
todo(petr): put in in custom flask error handler instead
"""
return [
('Content-Type', 'text/html'),
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', '*'),
]
setattr(HTTPException, 'get_headers', get_headers)
def domain(resource, res_config):
"""Register domain resource"""
DOMAIN[resource] = res_config
def command(name, command):
"""Register command"""
COMMANDS[name] = command
def blueprint(blueprint, app, **kwargs):
"""Register flask blueprint.
:param blueprint: blueprint instance
:param app: flask app instance
"""
blueprint.kwargs = kwargs
prefix = app.api_prefix or None
app.register_blueprint(blueprint, url_prefix=prefix, **kwargs)
def get_backend():
"""Returns the available backend, this will be changed in a factory if needed."""
return eve_backend
def get_resource_service(resource_name):
return resources[resource_name].service
def get_resource_privileges(resource_name):
attr = getattr(resources[resource_name], 'privileges', {})
return attr
def register_default_user_preference(preference_name, preference):
default_user_preferences[preference_name] = preference
def register_default_session_preference(preference_name, preference):
default_session_preferences[preference_name] = preference
def register_resource(name, resource, service=None, backend=None, privilege=None, _app=None):
"""Shortcut for registering resource and service together.
:param name: resource name
:param resource: resource class
:param service: service class
:param backend: backend instance
:param privilege: privilege to register with resource
:param _app: flask app
"""
if not backend:
backend = get_backend()
if not service:
service = Service
if privilege:
intrinsic_privilege(name, privilege)
if not _app:
_app = app
service_instance = service(name, backend=backend)
resource(name, app=_app, service=service_instance)
def register_jinja_filter(name, jinja_filter):
"""Register jinja filter
:param str name: name of the filter
:param jinja_filter: jinja filter function
"""
JINJA_FILTERS[name] = jinja_filter
def register_item_schema_field(name, schema, app, copy_on_rewrite=True):
"""Register new item schema field.
.. versionadded:: 1.28
:param str name: field name
:param dict schema: field schema
:param Flask app: flask app
:param bool copy_on_rewrite: copy field value when rewriting item
"""
for resource in ['ingest', 'archive', 'published', 'archive_autosave']:
app.config['DOMAIN'][resource]['schema'].update({name: schema})
app.config['DOMAIN']['content_templates_apply']['schema']['item']['schema'].update(
{name: schema}
)
if copy_on_rewrite:
app.config.setdefault('COPY_ON_REWRITE_FIELDS', [])
app.config['COPY_ON_REWRITE_FIELDS'].append(name)
from superdesk.search_provider import SearchProvider # noqa
from apps.search_providers import register_search_provider # noqa
|
agpl-3.0
|
manazag/hopper.pw
|
hopperpw/main/south_migrations/0004_updated_host_fields.py
|
4
|
4762
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Host.comment'
db.alter_column(u'main_host', 'comment', self.gf('django.db.models.fields.CharField')(max_length=256, null=True))
# Adding unique constraint on 'Host', fields ['fqdn']
db.create_unique(u'main_host', ['fqdn'])
def backwards(self, orm):
# Removing unique constraint on 'Host', fields ['fqdn']
db.delete_unique(u'main_host', ['fqdn'])
# Changing field 'Host.comment'
db.alter_column(u'main_host', 'comment', self.gf('django.db.models.fields.CharField')(max_length=256))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.host': {
'Meta': {'object_name': 'Host'},
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'update_secret': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['main']
|
bsd-3-clause
|
pombredanne/invenio-old
|
modules/elmsubmit/lib/elmsubmit_richtext2txt.py
|
4
|
13971
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
A text/richtext to text/plain converter.
Always returns a unicode string.
This is a module exporting a single function 'richtext2txt' which takes
a string of 'enriched text' and returns its conversion to 'plain
text'. 'rich text' is the text format as specified in RFC1341 for
use as an email payload with mime type text/richtext.
The code is based on the example parser given in appendix D of
RFC1341. It is a quite heavily modified version; the new code (aside
from being in Python not C):
1. Takes account of the <np> tag.
2. Deals better with soft newlines.
3. Deals better with the paragraph tag.
4. Takes account of the <iso-8859-x> tag.
The resulting code is something of a mishmash of the functional style
of programming that I prefer and the 'big while loop' proceedural
style in which the original C code is written.
With reference to point 4: Richtext is a pain because it allows
<ISO-8859-X></ISO-8859-X> markup tags to change charsets inside a
document. This means that if we get a text/richtext email payload
with 'Content-type' header specifying a charset e.g. 'us-ascii', we
can't simply decode to a unicode object; it is possible that bytes
inside the <ISO-8859-X></ISO-8859-X> will break the
unicode(str,'us-ascii') function call!
This is frustrating because:
1. Why bother to have a charset declaration outside a document only to
go and break it inside?
This might be understandable if text/richtext was designed
independantly of MIME and its Content-Type declarations but:
2. text/richtext is specified in the SAME RFC as the Content-type:
MIME header!
In fairness to the RFC writer(s), they were working at a time when
unicode/iso10646 was still in flux and so it was common for people
writing bilingual texts to want to use two charsets in one
document. It is interesting to note that the later text/enriched
specification (written when unicode had petrified) removes the
possibility of charset switching.
The existence of <iso-8859-x> tags makes the parser rather more
complicated.
Treatment notes:
> Second, the command "<nl>" is used to represent a required
> line break. (Otherwise, CRLFs in the data are treated as
> equivalent to a single SPACE character.)
2.
The RFC doesn't say to treat spaces as a special character; ie. that
they should be reproduced verbatim. This leads to the odd effect that
a string such as follows (where $SPACE$ in reality would be a space
character):
"<paragraph>Some text...</paragraph>$SPACE$<paragraph>More text...</paragraph>"
Is rendered as:
"Some text...
$SPACE$
More text..."
ie. The space is considered a string of text which must be separated
from the displayed paragraphs. This seems fairly odd behaviour to me,
but the RFC seems to suggest this is correct treatment.
"""
import re
import StringIO
def richtext2txt(str, charset='us-ascii', convert_iso_8859_tags=False, force_conversion=False):
return _richtext2txt(str, charset, convert_iso_8859_tags, force_conversion)
"""
Document options somewhere here.
##### 5. Make a note that the parsers assume \n not CRLF conventions so preconvert!!!
##### -------------------------------------------------------------------------------
"""
__revision__ = "$Id$"
def _richtext2txt(string, charset='us-ascii', convert_iso_8859_tags=False, force_conversion=False,
recursive=False, just_closed_para=True, output_file=None):
if type(string) == unicode and convert_iso_8859_tags:
# Doesn't make sense to have a unicode string
# containing mixed charsets.
raise ValueError("function richtext2txt cannot have both unicode input string and convert_iso_8859_tags=True.")
# f and g will be our input/output streams.
# Create file like object from string for input file.
f = StringIO.StringIO(string)
# Create another file like object from string for output file,
# unless we have been handed one by recursive call.
if output_file is None:
g = StringIO.StringIO(u'')
else:
g = output_file
# When comparing to the RFC1341 code, substitute:
# STDIN -> object f
# STDOUT -> object g
# EOF -> ''
# ungetc -> seek(-1,1)
# If we're not calling ourself from ISO-8859-X tag, then eat
# leading newlines:
if not recursive: _eat_all(f,'\n')
c = f.read(1)
# compile re for use in if then else. Matches 'iso-8859-XX' tags
# where xx are digits.
iso_re = re.compile(r'^iso-8859-([1-9][0-9]?)$', re.IGNORECASE)
iso_close_re = re.compile(r'^/iso-8859-([1-9][0-9]?)$', re.IGNORECASE)
while c != '':
if c == '<':
c, token = _read_token(f)
if c == '': break
if token == 'lt':
g.write('<')
just_closed_para = False
elif token == 'nl':
g.write('\n')
# Discard all 'soft newlines' following <nl> token:
_eat_all(f,'\n')
elif token == 'np':
g.write('\n\n\n')
# Discard all 'soft newlines' following <np> token:
_eat_all(f,'\n')
just_closed_para = True
elif token == 'paragraph':
# If we haven't just closed a paragraph tag, or done
# equivalent (eg. output an <np> tag) then produce
# newlines to offset paragraph:
if not just_closed_para: g.write('\n\n')
elif token == '/paragraph':
g.write('\n\n')
# Discard all 'soft newlines' following </paragraph> token:
_eat_all(f,'\n')
just_closed_para = True
elif token == 'comment':
commct=1
while commct > 0:
c = _throw_away_until(f,'<') # Bin characters until we get a '<'
if c == '': break
c, token = _read_token(f)
if c == '': break
if token == '/comment':
commct -= 1
elif token == 'comment':
commct += 1
elif iso_re.match(token):
if not convert_iso_8859_tags:
if not force_conversion:
raise ISO8859TagError("<iso-8859-x> tag found when convert_iso_8859_tags=False")
else:
pass
else:
# Read in from the input file, stopping to look at
# each tag. Keep reading until we have a balanced pair
# of <iso-8859-x></iso-8859-x> tags. Use tag_balance
# to keep track of how many open iso-8859 tags we
# have, since nesting is legal. When tag_balance hits
# 0 we have found a balanced pair.
tag_balance = 1
iso_str = ''
while tag_balance != 0:
c, next_str = _read_to_next_token(f)
iso_str += next_str
if c == '': break
c, next_token = _read_token(f)
if c == '': break
if next_token == token:
tag_balance += 1
elif next_token == '/' + token:
tag_balance -= 1
if tag_balance != 0:
iso_str += ('<' + next_token + '>')
# We now have a complete string of text in the
# foreign charset in iso_str, so we call ourself
# to process it. No need to consider return
# value, since we pass g and all the output gets
# written to this.
_richtext2txt(iso_str, charset, convert_iso_8859_tags, force_conversion,
True, just_closed_para, output_file=g)
#^^^^ = recursive
elif iso_close_re.match(token):
if force_conversion:
pass
else:
if convert_iso_8859_tags:
raise ISO8859TagError("closing </iso-8859-x> tag before opening tag")
else:
raise ISO8859TagError("</iso-8859-x> tag found when convert_iso_8859_tags=False")
else:
# Ignore unrecognized token.
pass
elif c == '\n':
# Read in contiguous string of newlines and output them as
# single space, unless we hit EOF, in which case output
# nothing.
_eat_all(f,'\n')
if _next_char(f) == '': break
# If we have just written a newline out, soft newlines
# should do nothing:
if _last_char(g) != '\n': g.write(' ')
else:
# We have a 'normal char' so just write it out:
_unicode_write(g, c, charset, force_conversion)
just_closed_para = False
c = f.read(1)
# Only output the terminating newline if we aren't being called
# recursively.
if not recursive:
g.write('\n')
return g.getvalue()
def _read_token(f):
"""
Read in token from inside a markup tag.
"""
token = ""
c = f.read(1)
while c != '' and c!= '>':
token += c
c = f.read(1)
token = token.lower()
return c, token
def _read_to_next_token(f):
out = ''
c = f.read(1)
while c != '<' and c != '':
out += c
c = f.read(1)
return c, out
def _eat_all(f,d):
"""
Discard all characters from input stream f of type d until we hit
a character that is not of type d. Return the most recent bit read
from the file.
"""
got_char = False
if _next_char(f) == d: got_char = True
while _next_char(f) == d: f.read(1)
if got_char:
return d
else:
return None
def _throw_away_until(f,d):
"""
Discard all characters from input stream f until we hit a
character of type d. Discard this char also. Return the most
recent bit read from the file (which will either be d or EOF).
"""
c = f.read(1)
while c != d and c != '': c = f.read(1)
return c
def _next_char(f):
"""
Return the next char in the file.
"""
# Get the char:
c = f.read(1)
# If it wasn't an EOF, backup one, otherwise stay put:
if c != '': f.seek(-1,1)
return c
def _last_char(g):
"""
Look at what the last character written to a file was.
"""
pos = g.tell()
if pos == 0:
# At the start of the file.
return None
else:
# Written at least one character, so step back one and read it
# off.
g.seek(-1,1)
return g.read(1)
def _unicode_write(g, string, charset, force_conversion):
strictness = { True : 'strict',
False: 'replace'}[force_conversion]
# Could raise a UnicodeDecodingError!
unicode_str = unicode(string, charset, strictness)
g.write(unicode_str)
class RichTextConversionError(Exception):
"""
An emtpy parent class for all errors in this module.
"""
pass
class ISO8859TagError(RichTextConversionError):
"""
This error is raised when we are doing a conversion with
strict=True, the input string is unicode and we get an iso-8859-x
tag. Unicode should not contain mixed charsets.
"""
pass
# The original C code direct from RFC1341, appendix D
# See: http://www.faqs.org/rfcs/rfc1341.html
# #include <stdio.h>
# #include <ctype.h>
# main() {
# int c, i;
# char token[50];
# while((c = getc(stdin)) != EOF) {
# if (c == '<') {
# for (i=0; (i<49 && (c = getc(stdin)) != '>' && c != EOF); ++i) {
# token[i] = isupper(c) ? tolower(c) : c;
# }
# if (c == EOF) break;
# if (c != '>') while ((c = getc(stdin)) != '>' && c != EOF) {;}
# if (c == EOF) break;
# token[i] = '\0';
# if (!strcmp(token, "lt")) {
# putc('<', stdout);
# } else if (!strcmp(token, "nl")) {
# putc('\n', stdout);
# } else if (!strcmp(token, "/paragraph")) {
# fputs("\n\n", stdout);
# } else if (!strcmp(token, "comment")) {
# int commct=1;
# while (commct > 0) {
# while ((c = getc(stdin)) != '<'
# && c != EOF) ;
# if (c == EOF) break;
# for (i=0; (c = getc(stdin)) != '>'
# && c != EOF; ++i) {
# token[i] = isupper(c) ?
# tolower(c) : c;
# }
# if (c== EOF) break;
# token[i] = NULL;
# if (!strcmp(token, "/comment")) --commct;
# if (!strcmp(token, "comment")) ++commct;
# }
# } /* Ignore all other tokens */
# } else if (c != '\n') putc(c, stdout);
# }
# putc('\n', stdout); /* for good measure */
# }
# data = open('sample.rtx','r')
# t = data.read()
|
gpl-2.0
|
nk113/django-ficuspumila
|
setup.py
|
1
|
1392
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# import to get rid of an error in atexit._run_exitfuncs
import logging
import multiprocessing
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='django-ficuspumila',
version='0.0.1',
description='A Django application suite which helps building media distribution service.',
long_description=open('README.rst', 'r').read(),
url='http://github.com/nk113/django-ficuspumila/',
packages=find_packages(),
package_data={'': ['*/fixtures/*.json']},
zip_safe=False,
tests_require=('mock', 'django_nose',),
test_suite = 'ficuspumila.runtests.runtests',
install_requires=[
'celery',
'Django',
'django-celery',
'django-tastypie',
'pycrypto',
'south',
'tastypie-queryset-client',
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
author='Nobu Kakegawa',
author_email='[email protected]',
)
|
bsd-3-clause
|
gustavomazevedo/tbackup-server
|
server/migrations/0004_auto__del_field_backup_origin__add_field_backup_user.py
|
1
|
9066
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Backup.origin'
db.delete_column(u'server_backup', 'origin_id')
# Adding field 'Backup.user'
db.add_column(u'server_backup', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Backup.origin'
raise RuntimeError("Cannot reverse this migration. 'Backup.origin' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'Backup.origin'
db.add_column(u'server_backup', 'origin',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['server.Origin']),
keep_default=False)
# Deleting field 'Backup.user'
db.delete_column(u'server_backup', 'user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'server.apidestination': {
'Meta': {'object_name': 'APIDestination', '_ormbases': ['server.BaseDestination']},
'base_uri': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'basedestination_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['server.BaseDestination']", 'unique': 'True', 'primary_key': 'True'}),
'get_uri': ('django.db.models.fields.CharField', [], {'default': "'/get/'", 'max_length': '1024'}),
'pubkey': ('django.db.models.fields.TextField', [], {}),
'set_uri': ('django.db.models.fields.CharField', [], {'default': "'/set/'", 'max_length': '1024'})
},
'server.backup': {
'Meta': {'object_name': 'Backup'},
'after_restore': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'before_restore': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'destination': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['server.BaseDestination']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'obs': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'related_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['server.Backup']", 'null': 'True', 'blank': 'True'}),
'restore_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'server.basedestination': {
'Meta': {'object_name': 'BaseDestination'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
'directory': ('django.db.models.fields.CharField', [], {'default': "u'~'", 'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
'server.localdestination': {
'Meta': {'object_name': 'LocalDestination', '_ormbases': ['server.BaseDestination']},
u'basedestination_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['server.BaseDestination']", 'unique': 'True', 'primary_key': 'True'})
},
u'server.origin': {
'Meta': {'object_name': 'Origin'},
'apikey': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'plan': ('django.db.models.fields.TextField', [], {})
},
'server.sftpdestination': {
'Meta': {'object_name': 'SFTPDestination', '_ormbases': ['server.BaseDestination']},
u'basedestination_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['server.BaseDestination']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'key_filename': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'port': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'default': "u'tbackup'", 'max_length': '80'})
}
}
complete_apps = ['server']
|
mit
|
jotes/pontoon
|
pontoon/sync/tests/test_checks.py
|
1
|
3190
|
from unittest.mock import patch, PropertyMock
from pontoon.base.utils import aware_datetime
from pontoon.base.tests import TranslationFactory
from pontoon.checks.models import (
Warning,
Error,
)
from pontoon.sync.tests import FakeCheckoutTestCase
class TestChangesetTranslationsChecks(FakeCheckoutTestCase):
"""
Semi-integration tests for translation checks during a sync.
"""
def setUp(self):
super(TestChangesetTranslationsChecks, self).setUp()
changed_translation_patch = patch(
"pontoon.sync.changeset.ChangeSet.changed_translations",
new_callable=PropertyMock,
)
self.mock_changed_translations = changed_translation_patch.start()
self.addCleanup(changed_translation_patch.stop)
def test_bulk_check_translations_no_translations(self):
self.mock_changed_translations.return_value = []
assert self.changeset.bulk_check_translations() == set()
assert not Error.objects.exists()
assert not Warning.objects.exists()
def test_bulk_check_valid_translations(self):
translation1, translation2 = TranslationFactory.create_batch(
2,
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1),
)
self.mock_changed_translations.return_value = [
translation1,
translation2,
]
assert self.changeset.bulk_check_translations() == {
translation1.pk,
translation2.pk,
}
assert not Error.objects.exists()
assert not Warning.objects.exists()
def test_bulk_check_invalid_translations(self):
"""
Test scenario:
* check if errors are detected
* check if only valid translation will land in the Translate Memory
"""
invalid_translation, valid_translation = TranslationFactory.create_batch(
2,
locale=self.translated_locale,
entity=self.main_db_entity,
approved=True,
date=aware_datetime(2015, 1, 1),
)
invalid_translation.string = "a\nb"
invalid_translation.save()
# Clear TM entries for those translations
invalid_translation.memory_entries.all().delete()
valid_translation.memory_entries.all().delete()
self.mock_changed_translations.return_value = [
invalid_translation,
valid_translation,
]
valid_translations = self.changeset.bulk_check_translations()
assert valid_translations == {valid_translation.pk}
(error,) = Error.objects.all()
assert error.library == "p"
assert error.message == "Newline characters are not allowed"
assert error.translation == invalid_translation
self.changeset.translations_to_update = {
valid_translation.pk: valid_translation
}
self.changeset.bulk_create_translation_memory_entries(valid_translations)
assert not invalid_translation.memory_entries.exists()
assert valid_translation.memory_entries.count() == 1
|
bsd-3-clause
|
mmauroy/SickRage
|
sickbeard/providers/thepiratebay.py
|
4
|
8954
|
# Author: Mr_Orange <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import re
import urllib
import datetime
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import db
from sickbeard import classes
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import helpers
from sickbeard.show_name_helpers import allPossibleShowNames, sanitizeSceneName
from unidecode import unidecode
class ThePirateBayProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "ThePirateBay")
self.supportsBacklog = True
self.enabled = False
self.ratio = None
self.confirmed = False
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.urls = {'base_url': 'https://thepiratebay.gd/'}
self.url = self.urls['base_url']
self.searchurl = self.url + 'search/%s/0/7/200' # order by seed
self.re_title_url = '/torrent/(?P<id>\d+)/(?P<title>.*?)//1".+?(?P<url>magnet.*?)//1".+?(?P<seeders>\d+)</td>.+?(?P<leechers>\d+)</td>'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'thepiratebay.png'
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.airdate).split('-')[0]
search_string['Season'].append(ep_string)
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%02d" % ep_obj.scene_absolute_number
search_string['Season'].append(ep_string)
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season)
search_string['Season'].append(ep_string)
ep_string = show_name + ' Season ' + str(ep_obj.scene_season) + ' -Ep*'
search_string['Season'].append(ep_string)
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', ' ')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%02i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + '|' + \
sickbeard.config.naming_ep_type[0] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
if mode != 'RSS':
searchURL = self.searchurl % (urllib.quote(search_string))
else:
searchURL = self.url + 'tv/latest/'
logger.log(u"Search string: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
re_title_url = self.proxy._buildRE(self.re_title_url).replace('&f=norefer', '')
matches = re.compile(re_title_url, re.DOTALL).finditer(urllib.unquote(data))
for torrent in matches:
title = torrent.group('title')
url = torrent.group('url')
id = int(torrent.group('id'))
seeders = int(torrent.group('seeders'))
leechers = int(torrent.group('leechers'))
#Filter unseeded torrent
if mode != 'RSS' and (seeders < self.minseed or leechers < self.minleech):
continue
#Accept Torrent only from Good People for every Episode Search
if self.confirmed and re.search('(VIP|Trusted|Helper|Moderator)', torrent.group(0)) is None:
logger.log(u"ThePirateBay Provider found result " + torrent.group(
'title') + " but that doesn't seem like a trusted result so I'm ignoring it", logger.DEBUG)
continue
if not title or not url:
continue
item = title, url, id, seeders, leechers
items[mode].append(item)
#For each search mode sort all the items by seeders
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url, id, seeders, leechers = item
if title:
title = self._clean_title_from_provider(title)
if url:
url = url.replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, search_date, self.show))
return results
def seedRatio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll ThePirateBay every 10 minutes max
self.minTime = 20
def _getRSSData(self):
search_params = {'RSS': ['rss']}
return {'entries': self.provider._doSearch(search_params)}
provider = ThePirateBayProvider()
|
gpl-3.0
|
moniqx4/bite-project
|
server/handlers/examples_handler.py
|
17
|
1145
|
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The examples handler."""
__author__ = '[email protected] (Po Hu)'
import os
import sys
import webapp2
from google.appengine.api import users
from common.handlers import base
class Error(Exception):
pass
class ExamplesHandler(base.BaseHandler):
"""The examples handler."""
def get(self):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
self.RenderTemplate('examples.html', {})
app = webapp2.WSGIApplication(
[('/examples', ExamplesHandler)],
debug=True)
|
apache-2.0
|
tlodge/dataware.nox
|
src/nox/netapps/user_event_log/networkeventsws.py
|
9
|
14432
|
# -*- coding: utf8 -*-
# Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
from nox.lib.core import *
from twisted.python.failure import Failure
from nox.webapps.webserver.webauth import Capabilities
from nox.ext.apps.commonui.authui import UISection, UIResource
from nox.webapps.webserver import webauth
from nox.webapps.webserver.webserver import *
from nox.webapps.webservice.webservice import *
from nox.netapps.user_event_log.pyuser_event_log import pyuser_event_log, \
LogLevel, LogEntry
from nox.netapps.bindings_storage.pybindings_storage import pybindings_storage
from nox.webapps.webservice.webservice import json_parse_message_body
from nox.lib.netinet.netinet import *
from nox.webapps.webservice.web_arg_utils import *
from twisted.internet.defer import Deferred
from nox.netapps.data.datatypes_impl import Datatypes
from nox.netapps.data.datacache_impl import DataCache
import simplejson
import types
import copy
import re
# matches each instance of a format string, to be used with
# fmt_pattern.findall(log_message) to get a list of all format
# strings used in a log message
fmt_pattern = re.compile('{[a-z]*}')
lg = logging.getLogger("networkeventsws")
# makes sure a path component is a currently valid logid value
class WSPathValidLogID(WSPathComponent):
def __init__(self, uel):
WSPathComponent.__init__(self)
self.uel = uel
def __str__(self):
return "<logid>"
def extract(self, pc, data):
if pc == None:
return WSPathExtractResult(error="End of requested URI")
try:
max_logid = self.uel.get_max_logid()
logid = long(pc)
if logid > 0 and logid <= max_logid:
return WSPathExtractResult(value=pc)
except:
pass
e = "Invalid LogID value '" + pc + "'. Must be number 0 < n <= " \
+ str(max_logid)
return WSPathExtractResult(error=e)
def string_for_name_type(datatypes, type, is_plural):
s = ""
if type == datatypes.USER: s = "user"
elif type == datatypes.HOST: s = "host"
elif type == datatypes.LOCATION: s = "location"
elif type == datatypes.SWITCH: s = "switch"
elif type == datatypes.USER_GROUP: s = "user group"
elif type == datatypes.HOST_GROUP: s = "host group"
elif type == datatypes.LOCATION_GROUP: s = "location group"
elif type == datatypes.SWITCH_GROUP: s = "switch group"
if is_plural:
if type == datatypes.SWITCH:
s += "es"
else:
s += "s"
return s
def get_matching_ids(type, all_ids):
for_use = []
for n in all_ids:
if(n["type"] == type):
for_use.append((n["id"],n["type"]))
return for_use
def get_name_str(ids, datacache):
for_use = []
for n in ids:
for_use.append(datacache.get_name(n[1], n[0]))
if len(for_use) == 0:
for_use.append("<unknown>")
n = ""
for i in range(len(for_use)):
n += str(for_use[i])
if i < len(for_use) - 1:
n += ","
return "'%s'" % n
def fill_in_msg(uel, datatypes, datacache, msg, src_names, dst_names):
fmts_used = fmt_pattern.findall(msg)
fmts_used = map(lambda s: s[1:-1],fmts_used) # remove braces
ids = []
for fmt in fmts_used:
if fmt not in uel.principal_format_map:
lg.error("invalid format string '%s' in message '%s'" % (fmt,msg))
continue
name_type,dir = uel.principal_format_map[fmt]
if dir == LogEntry.SRC:
name_list = src_names
else:
name_list = dst_names
matching_ids = get_matching_ids(name_type, name_list)
name = get_name_str(matching_ids, datacache)
msg = msg.replace("{"+fmt+"}", name)
if len(matching_ids) == 1:
ids.append(matching_ids[0])
else:
ids.append((-1,0))
return (msg, ids)
def make_entry(uel, datatypes, datacache, logid, ts, app,
level, msg, src_names, dst_names):
msg,ids = fill_in_msg(uel, datatypes, datacache, msg,src_names,dst_names)
return { "logid" : logid,
"timestamp" : ts,
"app" : app,
"level" : level,
"msg" : msg,
"ids" : ids
}
def err(failure, request, fn_name, msg):
lg.error('%s: %s' % (fn_name, str(failure)))
return internalError(request, msg)
def dump_list_to_json(ret_list,request):
request.write(simplejson.dumps({
"identifier" : "logid",
"items" : ret_list
} ))
request.finish()
# get all log entries associated with a 'name' (ie a host or user)
# uses get_logids_for_name() and then uses process_list_op
class process_name_op:
def __init__(self, uel,datatypes,datacache):
self.uel = uel
self.datatypes = datatypes
self.datacache = datacache
def start(self, uid, principal_type, filter):
self.filter = filter
self.d = Deferred()
self.uel.get_logids_for_name(uid,principal_type,self.callback)
return self.d
def callback(self,logids):
p = process_list_op(self.uel,self.datatypes,self.datacache)
list_op_d = p.start(logids, self.filter)
def on_success(res):
self.d.callback(res)
def on_error(res):
seld.d.errback(res)
list_op_d.addCallback(on_success)
list_op_d.addErrback(on_error)
# class to get all log entries and writes them
# in JSON to a request object.
# the dict 'filter' describes how these results
# can be filtered before being returned (see below)
class process_list_op:
def __init__(self,uel,datatypes,datacache):
self.got = 0
self.items = []
self.all_spawned = False
self.uel = uel
self.datatypes = datatypes
self.datacache = datacache
self.name_to_dpid = {}
self.name_to_port = {}
self.unique_dpids = {}
def start(self, logids, filter):
self.d = Deferred()
self.filter = filter
max = self.uel.get_max_logid()
if max == 0:
self.done()
return self.d
# if nothing was provided, return ALL entries
if logids is None:
min = self.uel.get_min_logid()
logids = range(min,max+1)
self.needs = 0
for id in logids:
if id > 0 and id <= max and id > filter["after"]:
self.needs += 1
self.uel.get_log_entry(id,self.log_callback)
# needed for common case when we call self.done() from self.log_callback()
self.all_spawned = True
if self.needs == self.got :
self.done() # nothing actually spawned, or everything already done
return self.d
def done(self):
filtered_list = filter_item_list(self.items, ["app","msg"],
self.filter)
ret_list = sort_and_slice_results(self.filter, filtered_list)
self.d.callback(ret_list)
def log_callback(self, logid, ts, app, level, msg, src_names, dst_names):
self.got += 1
if level != LogLevel.INVALID and level <= self.filter["max_level"]:
e = make_entry(self.uel, self.datatypes, self.datacache,
logid,ts,app,level,msg,src_names,dst_names)
self.items.append(e)
if self.all_spawned and self.needs == self.got:
self.done()
class networkeventsws(Component):
""" Web service for network events (aka user_event_log)"""
def __init__(self,ctx):
Component.__init__(self,ctx)
def getInterface(self):
return str(networkeventsws)
# this is mainly for debugging, though in the future it could be
# a way for remote apps to integrate logging into our system.
def handle_add(self,request,data):
try:
if authorization_failed(request, [set(["add-network-events"])]):
return NOT_DONE_YET
content = json_parse_message_body(request)
if content == None:
content = {}
app = "via-netevent-webservice"
if "app" in content:
app = str(content["app"])
msg = "default webservice message"
if "msg" in content:
msg = str(content["msg"])
level = LogEntry.INFO
if "level" in content:
level = int(content["level"])
self.uel.log(app,level, msg)
except Exception, e:
err(Failure(), request, "handle_add",
"Could not add log message")
request.write(simplejson.dumps("success"))
request.finish()
return NOT_DONE_YET
# this is mainly for debugging.
def handle_remove(self,request,data):
if authorization_failed(request, [set(["remove-network-events"])]):
return NOT_DONE_YET
try:
msg = ""
def cb():
try:
request.write(simplejson.dumps("success:" + msg))
request.finish()
except Exception, e:
err(Failure(), request, "handle_remove",
"Could not remove log messages.")
if(request.args.has_key("max_logid")):
max_logid = int(request.args["max_logid"][0])
msg = "cleared entries with logid <= " + str(max_logid)
self.uel.remove(max_logid,cb)
else :
msg = "cleared all entries"
self.uel.clear(cb)
except Exception, e:
err(Failure(), request, "handle_remove",
"Could not remove log messages.")
return NOT_DONE_YET
# returns a deferred that is called with the list of all log entries
# for principal with name 'name' and type 'name_type', filtered by
# 'filter'. If filter is not specified, all matching entries are returned
def get_logs_for_name(self,uid,principal_type, filter=None):
if filter is None:
filter = self.get_default_filter()
p = process_name_op(self.uel, self.datatypes,self.datacache)
return p.start(uid,principal_type,filter)
# returns all logs if logid_list is None, or only the logs with logids
# specified in 'logid_list'. These results will be filtered if 'filter'
# is specified.
def get_logs(self, logid_list = None, filter=None):
if filter is None:
filter = self.get_default_filter()
p = process_list_op(self.uel,self.datatypes,self.datacache)
return p.start(logid_list, filter)
def get_default_filter(self):
return parse_mandatory_args({}, self.get_default_filter_arr())
def get_default_filter_arr(self):
filter_arr = get_default_filter_arr("logid")
filter_arr.extend([("after",0), ("max_level",LogLevel.INFO)])
return filter_arr
def handle_get_all(self,request,data):
try :
if authorization_failed(request, [set(["get-network-events"])]):
return NOT_DONE_YET
filter = parse_mandatory_args(request,
self.get_default_filter_arr())
for s in ["app","msg"]:
if s in request.args:
filter[s] = request.args[s][-1]
# handles all requests that are filtering based on a particular
# principal name (e.g., host=sepl_directory;bob )
type_map = { "host" : self.datatypes.HOST,
"user" : self.datatypes.USER,
"location" : self.datatypes.LOCATION,
"switch" : self.datatypes.SWITCH,
"group" : self.datatypes.GROUP
}
for name, type in type_map.iteritems():
if(request.args.has_key(name)):
uid = int(request.args[name][0])
d = self.get_logs_for_name(uid,type,filter)
d.addCallback(dump_list_to_json, request)
d.addErrback(err, request, "get_all",
"Could not retrieve log messages")
return NOT_DONE_YET
# otherwise, we just query directory for logids
# we query either for a single logid or for all
logid_list = None # default to query for all
if(request.args.has_key("logid")):
logid = int(request.args["logid"][0])
max = self.uel.get_max_logid()
if logid >= 1 and logid <= max:
logid_list = (logid)
else:
logid_list = ()
d = self.get_logs(logid_list, filter)
d.addCallback(dump_list_to_json, request)
d.addErrback(err, request, "get_all",
"Could not retrieve log messages")
except Exception, e:
err(Failure(), request, "get_all",
"Could not retrieve log messages.")
return NOT_DONE_YET
def install(self):
rwRoles = set(["Superuser", "Admin", "Demo"])
roRoles = rwRoles | set(["Readonly"])
webauth.Capabilities.register('get-network-events',
'Get network event log messages', roRoles)
webauth.Capabilities.register('add-network-events',
'Add network event log messages', rwRoles)
webauth.Capabilities.register('remove-network-events',
'Remove network event log messages', rwRoles)
self.uel = self.resolve(pyuser_event_log)
self.datatypes = self.resolve(Datatypes)
self.datacache = self.resolve(DataCache)
ws = self.resolve(webservice)
v1 = ws.get_version("1")
# returns a JSON object:
#
# { 'identifier' : 'logid' , items : [ .... ] }
#
# Query Params:
# * supports standard 'start' 'count' for pagination
# * supports 'sort_descending' and
get_all_path = ( WSPathStaticString("networkevents"),)
v1.register_request(self.handle_get_all, "GET", get_all_path,
"""Get a set of messages from the network events log""")
remove_path = ( WSPathStaticString("networkevents"),
WSPathStaticString("remove"))
v1.register_request(self.handle_remove, "PUT", remove_path,
"""Permanently remove all (or just some) network event log entries""")
add_path = ( WSPathStaticString("networkevents"),
WSPathStaticString("add"))
v1.register_request(self.handle_add, "PUT", add_path,
"""Add a simple network event log message""")
def getFactory():
class Factory:
def instance(self,ctx):
return networkeventsws(ctx)
return Factory()
|
gpl-3.0
|
mahak/cinder
|
cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py
|
2
|
6375
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import socket
from unittest import mock
import ddt
from oslo_config import cfg
from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap.utils import utils
CONF = cfg.CONF
@ddt.ddt
class NetAppCDOTDataMotionTestCase(test.TestCase):
def setUp(self):
super(NetAppCDOTDataMotionTestCase, self).setUp()
self.backend = 'backend1'
self.mock_cmode_client = self.mock_object(client_cmode, 'Client')
self.config = fakes.get_fake_cmode_config(self.backend)
CONF.set_override('volume_backend_name', self.backend,
group=self.backend)
CONF.set_override('netapp_transport_type', 'https',
group=self.backend)
CONF.set_override('netapp_login', 'fake_user',
group=self.backend)
CONF.set_override('netapp_password', 'fake_password',
group=self.backend)
CONF.set_override('netapp_server_hostname', 'fake_hostname',
group=self.backend)
CONF.set_override('netapp_server_port', 8866,
group=self.backend)
CONF.set_override('netapp_api_trace_pattern', "fake_regex",
group=self.backend)
def test_get_backend_configuration(self):
self.mock_object(utils, 'CONF')
CONF.set_override('netapp_vserver', 'fake_vserver',
group=self.backend)
utils.CONF.list_all_sections.return_value = [self.backend]
config = utils.get_backend_configuration(self.backend)
self.assertEqual('fake_vserver', config.netapp_vserver)
def test_get_backend_configuration_different_backend_name(self):
self.mock_object(utils, 'CONF')
CONF.set_override('netapp_vserver', 'fake_vserver',
group=self.backend)
CONF.set_override('volume_backend_name', 'fake_backend_name',
group=self.backend)
utils.CONF.list_all_sections.return_value = [self.backend]
config = utils.get_backend_configuration(self.backend)
self.assertEqual('fake_vserver', config.netapp_vserver)
self.assertEqual('fake_backend_name', config.volume_backend_name)
@ddt.data([], ['fake_backend1', 'fake_backend2'])
def test_get_backend_configuration_not_configured(self, conf_sections):
self.mock_object(utils, 'CONF')
utils.CONF.list_all_sections.return_value = conf_sections
self.assertRaises(exception.ConfigNotFound,
utils.get_backend_configuration,
self.backend)
def test_get_client_for_backend(self):
self.mock_object(utils, 'get_backend_configuration',
return_value=self.config)
utils.get_client_for_backend(self.backend)
self.mock_cmode_client.assert_called_once_with(
hostname='fake_hostname', password='fake_password',
username='fake_user', transport_type='https', port=8866,
trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex")
def test_get_client_for_backend_with_vserver(self):
self.mock_object(utils, 'get_backend_configuration',
return_value=self.config)
CONF.set_override('netapp_vserver', 'fake_vserver',
group=self.backend)
utils.get_client_for_backend(self.backend)
self.mock_cmode_client.assert_called_once_with(
hostname='fake_hostname', password='fake_password',
username='fake_user', transport_type='https', port=8866,
trace=mock.ANY, vserver='fake_vserver',
api_trace_pattern="fake_regex")
@ddt.ddt
class NetAppDataOntapUtilsTestCase(test.TestCase):
def test_build_ems_log_message_0(self):
self.mock_object(
socket, 'gethostname', return_value='fake_hostname')
result = utils.build_ems_log_message_0(
'fake_driver_name', 'fake_app_version')
expected = {
'computer-name': 'fake_hostname',
'event-source': 'Cinder driver fake_driver_name',
'app-version': 'fake_app_version',
'category': 'provisioning',
'log-level': '5',
'auto-support': 'false',
'event-id': '0',
'event-description': 'OpenStack Cinder connected to cluster node',
}
self.assertEqual(expected, result)
def test_build_ems_log_message_1(self):
self.mock_object(
socket, 'gethostname', return_value='fake_hostname')
aggregate_pools = ['aggr1', 'aggr2']
flexvol_pools = ['vol1', 'vol2']
result = utils.build_ems_log_message_1(
'fake_driver_name', 'fake_app_version', 'fake_vserver',
flexvol_pools, aggregate_pools)
pool_info = {
'pools': {
'vserver': 'fake_vserver',
'aggregates': aggregate_pools,
'flexvols': flexvol_pools,
},
}
self.assertDictEqual(pool_info,
json.loads(result['event-description']))
result['event-description'] = ''
expected = {
'computer-name': 'fake_hostname',
'event-source': 'Cinder driver fake_driver_name',
'app-version': 'fake_app_version',
'category': 'provisioning',
'log-level': '5',
'auto-support': 'false',
'event-id': '1',
'event-description': '',
}
self.assertEqual(expected, result)
|
apache-2.0
|
mzizzi/ansible
|
lib/ansible/modules/network/eos/eos_user.py
|
20
|
12545
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: eos_user
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the collection of local users on EOS devices
description:
- This module provides declarative management of the local usernames
configured on Arista EOS devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
extends_documentation_fragment: eos
options:
users:
description:
- The set of username objects to be configured on the remote
Arista EOS device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(username) argument.
username:
description:
- The username to be configured on the remote Arista EOS
device. This argument accepts a stringv value and is mutually
exclusive with the C(users) argument.
Please note that this option is not same as C(provider username).
password:
description:
- The password to be configured on the remote Arista EOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
privilege:
description:
- The C(privilege) argument configures the privilege level of the
user when logged into the system. This argument accepts integer
values in the range of 1 to 15.
role:
description:
- Configures the role for the username in the
device running configuration. The argument accepts a string value
defining the role name. This argument does not check if the role
has been configured on the device.
sshkey:
description:
- Specifies the SSH public key to configure
for the given username. This argument accepts a valid SSH key value.
nopassword:
description:
- Defines the username without assigning
a password. This will allow the user to login to the system
without being authenticated by a password.
type: bool
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user which cannot be deleted per EOS constraints.
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
eos_user:
username: ansible
sshkey: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
state: present
- name: remove all users except admin
eos_user:
purge: yes
- name: set multiple users to privilege level 15
eos_user:
users:
- username: netop
- username: netend
privilege: 15
state: present
- name: Change Password for User netop
eos_user:
username: netop
password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password
- username admin secret admin
session_name:
description: The EOS config session name used to load the configuration
returned: when changed is True
type: str
sample: ansible_1479315771
"""
import re
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.eos import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.eos import eos_argument_spec, check_args
def validate_privilege(value, module):
if not 1 <= value <= 15:
module.fail_json(msg='privilege must be between 1 and 15, got %s' % value)
def map_obj_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
for update in updates:
want, have = update
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if 'name' in want:
add = lambda x: commands.append('username %s %s' % (want['name'], x))
else:
add = lambda x: commands.append('username %s %s' % (want['username'], x))
if want['state'] == 'absent':
if 'name' in want:
commands.append('no username %s' % want['name'])
else:
commands.append('no username %s' % want['username'])
continue
if needs_update('role'):
add('role %s' % want['role'])
if needs_update('privilege'):
add('privilege %s' % want['privilege'])
if needs_update('password'):
if update_password == 'always' or not have:
add('secret %s' % want['password'])
if needs_update('sshkey'):
add('sshkey %s' % want['sshkey'])
if needs_update('nopassword'):
if want['nopassword']:
add('nopassword')
else:
if 'name' in want:
add('no username %s nopassword' % want['name'])
else:
add('no username %s nopassword' % want['username'])
return commands
def parse_role(data):
match = re.search(r'role (\S+)', data, re.M)
if match:
return match.group(1)
def parse_sshkey(data):
match = re.search(r'sshkey (.+)$', data, re.M)
if match:
return match.group(1)
def parse_privilege(data):
match = re.search(r'privilege (\S+)', data, re.M)
if match:
return int(match.group(1))
def map_config_to_obj(module):
data = get_config(module, flags=['section username'])
match = re.findall(r'^username (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r'username %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'username': user,
'state': 'present',
'nopassword': 'nopassword' in cfg,
'password': None,
'sshkey': parse_sshkey(cfg),
'privilege': parse_privilege(cfg),
'role': parse_role(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['username'] and module.params['purge']:
return list()
elif not module.params['username']:
module.fail_json(msg='username is required')
else:
collection = [{'username': module.params['username']}]
else:
collection = list()
for item in users:
if not isinstance(item, dict):
collection.append({'username': item})
elif all(u not in item for u in ['username', 'name']):
module.fail_json(msg='username is required')
else:
collection.append(item)
objects = list()
for item in collection:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['nopassword'] = get_value('nopassword')
item['privilege'] = get_value('privilege')
item['role'] = get_value('role')
item['sshkey'] = get_value('sshkey')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
if 'name' in entry:
item = next((i for i in have if i['username'] == entry['name']), None)
else:
item = next((i for i in have if i['username'] == entry['username']), None)
if all((item is None, entry['state'] == 'present')):
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', aliases=['collection']),
username=dict(aliases=['name']),
password=dict(no_log=True),
nopassword=dict(type='bool'),
update_password=dict(default='always', choices=['on_create', 'always']),
privilege=dict(type='int'),
role=dict(),
sshkey=dict(),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
mutually_exclusive = [('username', 'users')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['username'] if 'username' in x else x['name'] for x in want]
have_users = [x['username'] for x in have]
for item in set(have_users).difference(want_users):
if item != 'admin':
commands.append('no username %s' % item)
result['commands'] = commands
# the eos cli prevents this by rule so capture it and display
# a nice failure message
if 'no username admin' in commands:
module.fail_json(msg='cannot delete the `admin` account')
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
jusdng/odoo
|
addons/l10n_be_coda/l10n_be_coda.py
|
340
|
1253
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns = {
'coda_note': fields.text('CODA Notes'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
krellsarah/bingo-generator
|
src/pdfkit/api.py
|
8
|
3119
|
# -*- coding: utf-8 -*-
from .pdfkit import PDFKit
from .pdfkit import Configuration
def from_url(url, output_path, options=None, toc=None, cover=None, configuration=None):
"""
Convert file of files from URLs to PDF document
:param url: URL or list of URLs to be saved
:param output_path: path to output PDF file. False means file will be returned as string.
:param options: (optional) dict with wkhtmltopdf global and page options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param configuration: (optional) instance of pdfkit.configuration.Configuration()
Returns: True on success
"""
r = PDFKit(url, 'url', options=options, toc=toc, cover=cover,
configuration=configuration)
return r.to_pdf(output_path)
def from_file(input, output_path, options=None, toc=None, cover=None, css=None,
configuration=None):
"""
Convert HTML file or files to PDF document
:param input: path to HTML file or list with paths or file-like object
:param output_path: path to output PDF file. False means file will be returned as string.
:param options: (optional) dict with wkhtmltopdf options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: (optional) string with path to css file which will be added to a single input file
:param configuration: (optional) instance of pdfkit.configuration.Configuration()
Returns: True on success
"""
r = PDFKit(input, 'file', options=options, toc=toc, cover=cover, css=css,
configuration=configuration)
return r.to_pdf(output_path)
def from_string(input, output_path, options=None, toc=None, cover=None, css=None,
configuration=None):
"""
Convert given string or strings to PDF document
:param input: string with a desired text. Could be a raw text or a html file
:param output_path: path to output PDF file. False means file will be returned as string.
:param options: (optional) dict with wkhtmltopdf options, with or w/o '--'
:param toc: (optional) dict with toc-specific wkhtmltopdf options, with or w/o '--'
:param cover: (optional) string with url/filename with a cover html page
:param css: (optional) string with path to css file which will be added to a input string
:param configuration: (optional) instance of pdfkit.configuration.Configuration()
Returns: True on success
"""
r = PDFKit(input, 'string', options=options, toc=toc, cover=cover, css=css,
configuration=configuration)
return r.to_pdf(output_path)
def configuration(**kwargs):
"""
Constructs and returns a :class:`Configuration` with given options
:param wkhtmltopdf: path to binary
:param meta_tag_prefix: the prefix for ``pdfkit`` specific meta tags
"""
return Configuration(**kwargs)
|
mit
|
ferittuncer/ITU-Turkish-NLP-Pipeline-Caller
|
setup.py
|
1
|
1031
|
import pipeline_caller
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(
name="ITU-Turkish-NLP-Pipeline-Caller",
version=pipeline_caller.version,
packages=find_packages(),
py_modules=['pipeline_caller'],
entry_points={
'console_scripts': [
'pipeline_caller = pipeline_caller:main',
],
},
author="Ferit Tunçer",
author_email="[email protected]",
description="A wrapper tool to use ITU Turkish NLP Pipeline API",
license="GPLv2",
keywords="ITU Turkish NLP Pipeline",
url="https://github.com/ferittuncer/ITU-Turkish-NLP-Pipeline-Caller",
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Topic :: Text Processing :: Linguistic',
'Natural Language :: Turkish',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
]
)
|
gpl-2.0
|
puracore/pura
|
qa/rpc-tests/test_framework/netutil.py
|
45
|
5014
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
from binascii import unhexlify, hexlify
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
|
mit
|
bholley/servo
|
tests/wpt/web-platform-tests/tools/webdriver/webdriver/command.py
|
258
|
3985
|
"""Dispatches requests to remote WebDriver endpoint."""
import exceptions
import httplib
import json
import urlparse
import webelement
class CommandExecutor(object):
"""Dispatches requests to remote WebDriver endpoint."""
_HEADERS = {
"User-Agent": "Python WebDriver Local End",
"Content-Type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json",
"Accept-Charset": "utf-8",
"Accept-Encoding": "identity",
"Connection": "close",
}
def __init__(self, url, mode='strict'):
self._parsed_url = urlparse.urlparse(url)
self._conn = httplib.HTTPConnection(self._parsed_url.hostname,
self._parsed_url.port)
self._mode = mode
def execute(self,
method,
path,
session_id,
name,
parameters=None,
object_hook=None):
"""Execute a command against the WebDriver endpoint.
Arguments:
method -- one of GET, POST, DELETE
path -- the path of the url endpoint (needs to include
session/<sessionId> if needed)
session_id -- the sessionId to include in the JSON body
name -- name of the command that is being executed to include in
the JSON body
parameters -- the JSON body to send with the command. Only used if
method is POST
object_hook -- function used by json.loads to properly deserialize
objects in the request
"""
if self._mode == 'strict':
return self._execute_strict(
method, path, session_id, name, parameters, object_hook)
elif self._mode == 'compatibility':
return self._execute_compatibility(
method, path, session_id, name, parameters, object_hook)
else:
raise Exception("Unknown mode: " + self._mode)
def _execute_compatibility(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {'sessionId': session_id, 'name': name }
if parameters:
body.update(parameters)
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = resp.read().decode('utf-8')
if data:
data = json.loads(data, object_hook = object_hook)
if data['status'] != 0:
raise exceptions.create_webdriver_exception_compatibility(
data['status'], data['value']['message'])
return data
if resp.status < 200 or resp.status > 299:
raise exceptions.create_webdriver_exception_compatibility(
resp.status, resp.reason)
def _execute_strict(self,
method,
path,
session_id,
name,
parameters,
object_hook):
body = {
'sessionId': session_id,
'name': name,
'parameters': parameters }
self._conn.request(
method,
self._parsed_url.path + path,
json.dumps(body, default = self._json_encode).encode('utf-8'),
self._HEADERS)
resp = self._conn.getresponse()
data = json.loads(
resp.read().decode('utf-8'), object_hook = object_hook)
if data['status'] != 'success':
raise exceptions.create_webdriver_exception_strict(
data['status'], data['value'])
return data
def _json_encode(self, obj):
return obj.to_json()
|
mpl-2.0
|
roadmapper/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_http_health_check.py
|
13
|
15217
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_http_health_check
description:
- An HttpHealthCheck resource. This resource defines a template for how individual
VMs should be checked for health, via HTTP.
short_description: Creates a GCP HttpHealthCheck
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
check_interval_sec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
required: false
default: '5'
type: int
aliases:
- check_interval_seconds
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
required: false
type: int
host:
description:
- The value of the host header in the HTTP health check request. If left empty
(default value), the public IP on behalf of which this health check is performed
will be used.
required: false
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
required: false
type: int
request_path:
description:
- The request path of the HTTP health check request.
- The default value is /.
required: false
type: str
timeout_sec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater
value than checkIntervalSec.
required: false
type: int
aliases:
- timeout_seconds
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
required: false
type: int
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/httpHealthChecks)'
- 'Adding Health Checks: U(https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a HTTP health check
gcp_compute_http_health_check:
name: test_object
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
returned: success
type: int
host:
description:
- The value of the host header in the HTTP health check request. If left empty (default
value), the public IP on behalf of which this health check is performed will be
used.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
returned: success
type: int
requestPath:
description:
- The request path of the HTTP health check request.
- The default value is /.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater value
than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
returned: success
type: int
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
check_interval_sec=dict(default=5, type='int', aliases=['check_interval_seconds']),
description=dict(type='str'),
healthy_threshold=dict(type='int'),
host=dict(type='str'),
name=dict(required=True, type='str'),
port=dict(type='int'),
request_path=dict(type='str'),
timeout_sec=dict(type='int', aliases=['timeout_seconds']),
unhealthy_threshold=dict(type='int'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#httpHealthCheck'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#httpHealthCheck',
u'checkIntervalSec': module.params.get('check_interval_sec'),
u'description': module.params.get('description'),
u'healthyThreshold': module.params.get('healthy_threshold'),
u'host': module.params.get('host'),
u'name': module.params.get('name'),
u'port': module.params.get('port'),
u'requestPath': module.params.get('request_path'),
u'timeoutSec': module.params.get('timeout_sec'),
u'unhealthyThreshold': module.params.get('unhealthy_threshold'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'checkIntervalSec': response.get(u'checkIntervalSec'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'healthyThreshold': response.get(u'healthyThreshold'),
u'host': response.get(u'host'),
u'id': response.get(u'id'),
u'name': module.params.get('name'),
u'port': response.get(u'port'),
u'requestPath': response.get(u'requestPath'),
u'timeoutSec': response.get(u'timeoutSec'),
u'unhealthyThreshold': response.get(u'unhealthyThreshold'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#httpHealthCheck')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
|
gpl-3.0
|
integree/django-congo
|
congo/maintenance/tests/unused_user_accounts.py
|
1
|
1266
|
# -*- coding: utf-8 -*-
from congo.conf import settings
from congo.maintenance.tests import BaseTest
from datetime import timedelta
from django.apps import apps
from django.utils import timezone
from django.utils.formats import date_format
from django.utils.translation import ugettext_lazy as _
from congo.templatetags.admin_utils import admin_change_url
class Test(BaseTest):
def __init__(self):
super(Test, self).__init__()
self.description = _("User accounts, which have not logged on at least 90 days")
def _run(self, *args, **kwargs):
model_name = settings.AUTH_USER_MODEL
model = apps.get_model(*model_name.split('.', 1))
login_date = timezone.now() - timedelta(days = 90)
queryset = model.objects.filter(last_login__lt = login_date)
result = not bool(queryset.count())
details = ""
for user in queryset:
change_url = admin_change_url(user)
last_login = date_format(user.last_login, 'SHORT_DATE_FORMAT')
details += """<a href="%s">%s</a> [ID: %s], last login: %s<br />""" % (change_url, user, user.id, last_login)
return {
'result': result,
'details': details,
}
|
mit
|
dermoth/gramps
|
gramps/gen/db/__init__.py
|
10
|
2522
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Gramps Database API.
Database Architecture
=====================
Access to the database is made through Python classes. Exactly
what functionality you have is dependent on the properties of the
database. For example, if you are accessing a read-only view, then
you will only have access to a subset of the methods available.
At the root of any database interface is either :py:class:`.DbReadBase` and/or
:py:class:`.DbWriteBase`. These define the methods to read and write to a
database, respectively.
The full database hierarchy is:
- :py:class:`.DbBsddb` - read and write implementation to BSDDB databases
* :py:class:`.DbWriteBase` - virtual and implementation-independent methods
for reading data
* :py:class:`.DbBsddbRead` - read-only (accessors, getters) implementation
to BSDDB databases
+ :py:class:`.DbReadBase` - virtual and implementation-independent
methods for reading data
+ :py:class:`.Callback` - callback and signal functions
* :py:class:`.UpdateCallback` - callback functionality
DbBsddb
=======
The :py:class:`.DbBsddb` interface defines a hierarchical database
(non-relational) written in
`PyBSDDB <http://www.jcea.es/programacion/pybsddb.htm>`_. There is no
such thing as a database schema, and the meaning of the data is
defined in the Python classes above. The data is stored as pickled
tuples and unserialized into the primary data types (below).
More details can be found in the manual's
`Using database API <http://www.gramps-project.org/wiki/index.php?title=Using_database_API>`_.
"""
from .base import *
from .dbconst import *
from .txn import *
from .exceptions import *
from .undoredo import *
from .utils import *
from .generic import *
|
gpl-2.0
|
yonilx/show-attend-and-tell-django
|
showattendtell/cococaption/pycocoevalcap/rouge/rouge.py
|
10
|
3643
|
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <[email protected]>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(gts.keys() == res.keys())
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
|
mit
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/pip/_vendor/requests/sessions.py
|
439
|
24250
|
# -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .packages.urllib3._collections import RecentlyUsedContainer
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
REDIRECT_CACHE_SIZE = 1000
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in request_setting.items():
if v is None:
del merged_setting[k]
merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses."""
i = 0
hist = [] # keep track of history
while resp.is_redirect:
prepared_request = req.copy()
if i > 0:
# Update history and keep track of redirects.
hist.append(resp)
new_hist = list(hist)
resp.history = new_hist
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# Cache the url, unless it redirects to itself.
if resp.is_permanent_redirect and req.url != prepared_request.url:
self.redirect_cache[req.url] = prepared_request.url
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
# Only store 1000 redirects to prevent using infinite memory
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
method = to_native_string(method)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
json = json,
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
checked_urls = set()
while request.url in self.redirect_cache:
checked_urls.add(request.url)
new_url = self.redirect_cache.get(request.url)
if new_url in checked_urls:
break
request.url = new_url
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""Check the environment and merge it with some settings."""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
state['redirect_cache'] = dict(self.redirect_cache)
return state
def __setstate__(self, state):
redirect_cache = state.pop('redirect_cache', {})
for attr, value in state.items():
setattr(self, attr, value)
self.redirect_cache = RecentlyUsedContainer(REDIRECT_CACHE_SIZE)
for redirect, to in redirect_cache.items():
self.redirect_cache[redirect] = to
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
|
bsd-3-clause
|
JTCunning/sentry
|
tests/sentry/web/frontend/projects/tests.py
|
14
|
8141
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import mock
import logging
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import ProjectKey, ProjectKeyStatus, ProjectOption, TagKey
from sentry.testutils import TestCase
logger = logging.getLogger(__name__)
class ManageProjectKeysTest(TestCase):
@fixture
def path(self):
return reverse('sentry-manage-project-keys', args=[self.organization.slug, self.project.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/projects/keys.html')
assert 'key_list' in resp.context
assert 'can_add_key' in resp.context
class NewProjectKeyTest(TestCase):
@fixture
def path(self):
return reverse('sentry-new-project-key', args=[self.organization.slug, self.project.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_generates_new_key_and_redirects(self):
keycount = ProjectKey.objects.filter(project=self.project).count()
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 302
newkeycount = ProjectKey.objects.filter(project=self.project).count()
assert newkeycount == keycount + 1
class RemoveProjectKeyTest(TestCase):
def setUp(self):
super(RemoveProjectKeyTest, self).setUp()
self.key = ProjectKey.objects.create(project=self.project)
@fixture
def path(self):
return reverse('sentry-remove-project-key', args=[self.organization.slug, self.project.id, self.key.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path, 'POST')
def test_does_not_respond_to_get(self):
resp = self.client.get(self.path)
assert resp.status_code == 405
def test_removes_key_and_redirects(self):
self.login_as(self.user)
resp = self.client.post(self.path)
assert resp.status_code == 302
assert not ProjectKey.objects.filter(id=self.key.id).exists()
class EnableProjectKeyTest(TestCase):
def setUp(self):
super(EnableProjectKeyTest, self).setUp()
self.key = ProjectKey.objects.create(
project=self.project,
status=ProjectKeyStatus.INACTIVE,
)
@fixture
def path(self):
return reverse('sentry-enable-project-key', args=[self.organization.slug, self.project.id, self.key.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path, 'POST')
def test_does_not_respond_to_get(self):
resp = self.client.get(self.path)
assert resp.status_code == 405
def test_does_enable(self):
self.login_as(self.user)
resp = self.client.post(self.path)
assert resp.status_code == 302
key = ProjectKey.objects.get(id=self.key.id)
assert key.status == ProjectKeyStatus.ACTIVE
class DisableProjectKeyTest(TestCase):
def setUp(self):
super(DisableProjectKeyTest, self).setUp()
self.key = ProjectKey.objects.create(
project=self.project,
status=ProjectKeyStatus.ACTIVE,
)
@fixture
def path(self):
return reverse('sentry-disable-project-key', args=[self.organization.slug, self.project.id, self.key.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path, 'POST')
def test_does_not_respond_to_get(self):
resp = self.client.get(self.path)
assert resp.status_code == 405
def test_does_enable(self):
self.login_as(self.user)
resp = self.client.post(self.path)
assert resp.status_code == 302
key = ProjectKey.objects.get(id=self.key.id)
assert key.status == ProjectKeyStatus.INACTIVE
class DashboardTest(TestCase):
@fixture
def path(self):
return reverse('sentry-team-dashboard', args=[self.organization.slug, self.team.slug])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
@mock.patch('sentry.web.frontend.groups.can_create_projects')
def test_redirects_to_create_project_if_none_and_can_create_projects(self, can_create_projects):
self.login_as(self.user)
can_create_projects.return_value = True
resp = self.client.get(self.path)
can_create_projects.assert_called_once_with(self.user, team=self.team)
url = reverse('sentry-create-project', args=[self.organization.slug])
assert resp.status_code == 302
assert resp['Location'] == 'http://testserver%s?team=%s' % (url, self.team.slug)
@mock.patch('sentry.web.frontend.groups.can_create_projects')
def test_does_not_reidrect_if_missing_project_permission(self, can_create_projects):
self.login_as(self.user)
can_create_projects.return_value = False
resp = self.client.get(self.path)
can_create_projects.assert_called_once_with(self.user, team=self.team)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/dashboard.html')
@mock.patch('sentry.web.frontend.groups.can_create_projects')
def test_does_not_redirect_if_has_projects(self, can_create_projects):
self.login_as(self.user)
# HACK: force creation
self.project
resp = self.client.get(self.path)
assert not can_create_projects.called
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/dashboard.html')
assert resp.context['organization'] == self.organization
assert resp.context['team'] == self.team
assert resp.context['project_list'] == [self.project]
class GetStartedTest(TestCase):
@fixture
def path(self):
return reverse('sentry-get-started', args=[self.organization.slug, self.project.slug])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_renders_with_required_context(self):
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/get_started.html')
assert resp.context['project'] == self.project
assert resp.context['team'] == self.team
assert resp.context['organization'] == self.organization
class ManageProjectTagsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-manage-project-tags', args=[self.organization.slug, self.project.id])
def test_requires_authentication(self):
self.assertRequiresAuthentication(self.path)
def test_simple(self):
TagKey.objects.create(project=self.project, key='site')
TagKey.objects.create(project=self.project, key='url')
TagKey.objects.create(project=self.project, key='os')
self.login_as(self.user)
resp = self.client.get(self.path)
assert resp.status_code == 200
self.assertTemplateUsed('sentry/projects/manage_tags.html')
assert resp.context['organization'] == self.organization
assert resp.context['team'] == self.team
assert resp.context['project'] == self.project
tag_list = [t.key for t in resp.context['tag_list']]
assert 'site' in tag_list
assert 'url' in tag_list
assert 'os' in tag_list
resp = self.client.post(self.path, {
'filters': ['site', 'url'],
'annotations': ['os'],
})
assert resp.status_code == 302
enabled_filters = ProjectOption.objects.get_value(
self.project, 'tags')
assert sorted(enabled_filters) == ['site', 'url']
enabled_annotations = ProjectOption.objects.get_value(
self.project, 'annotations')
assert enabled_annotations == ['os']
|
bsd-3-clause
|
maxkoryukov/headphones
|
lib/mako/ext/babelplugin.py
|
60
|
2064
|
# ext/babelplugin.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""gettext message extraction via Babel: http://babel.edgewall.org/"""
from babel.messages.extract import extract_python
from mako.ext.extract import MessageExtractor
class BabelMakoExtractor(MessageExtractor):
def __init__(self, keywords, comment_tags, options):
self.keywords = keywords
self.options = options
self.config = {
'comment-tags': u' '.join(comment_tags),
'encoding': options.get('input_encoding',
options.get('encoding', None)),
}
super(BabelMakoExtractor, self).__init__()
def __call__(self, fileobj):
return self.process_file(fileobj)
def process_python(self, code, code_lineno, translator_strings):
comment_tags = self.config['comment-tags']
for lineno, funcname, messages, python_translator_comments \
in extract_python(code,
self.keywords, comment_tags, self.options):
yield (code_lineno + (lineno - 1), funcname, messages,
translator_strings + python_translator_comments)
def extract(fileobj, keywords, comment_tags, options):
"""Extract messages from Mako templates.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
extractor = BabelMakoExtractor(keywords, comment_tags, options)
for message in extractor(fileobj):
yield message
|
gpl-3.0
|
marcoarruda/MissionPlanner
|
Lib/lib2to3/fixes/fix_filter.py
|
61
|
2183
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes filter(F, X) into list(filter(F, X)).
We avoid the transformation if the filter() call is directly contained
in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
for V in <>:.
NOTE: This is still not correct if the original code was depending on
filter(F, X) to return a string if X is a string and a tuple if X is a
tuple. That would require type inference, which we don't do. Let
Python 2.6 figure it out.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
class FixFilter(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
filter_lambda=power<
'filter'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'filter'
trailer< '(' arglist< none='None' ',' seq=any > ')' >
>
|
power<
'filter'
args=trailer< '(' [any] ')' >
>
"""
skip_on = "future_builtins.filter"
def transform(self, node, results):
if self.should_skip(node):
return
if "filter_lambda" in results:
new = ListComp(results.get("fp").clone(),
results.get("fp").clone(),
results.get("it").clone(),
results.get("xp").clone())
elif "none" in results:
new = ListComp(Name(u"_f"),
Name(u"_f"),
results["seq"].clone(),
Name(u"_f"))
else:
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
gpl-3.0
|
pas256/ansible
|
test/units/playbook/test_attribute.py
|
219
|
1824
|
# (c) 2015, Marius Gedminas <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.playbook.attribute import Attribute
class TestAttribute(unittest.TestCase):
def setUp(self):
self.one = Attribute(priority=100)
self.two = Attribute(priority=0)
def test_eq(self):
self.assertTrue(self.one == self.one)
self.assertFalse(self.one == self.two)
def test_ne(self):
self.assertFalse(self.one != self.one)
self.assertTrue(self.one != self.two)
def test_lt(self):
self.assertFalse(self.one < self.one)
self.assertTrue(self.one < self.two)
self.assertFalse(self.two < self.one)
def test_gt(self):
self.assertFalse(self.one > self.one)
self.assertFalse(self.one > self.two)
self.assertTrue(self.two > self.one)
def test_le(self):
self.assertTrue(self.one <= self.one)
self.assertTrue(self.one <= self.two)
self.assertFalse(self.two <= self.one)
def test_ge(self):
self.assertTrue(self.one >= self.one)
self.assertFalse(self.one >= self.two)
self.assertTrue(self.two >= self.one)
|
gpl-3.0
|
davidzchen/tensorflow
|
tensorflow/python/debug/lib/debug_data_test.py
|
14
|
11805
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tfdbg module debug_data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import platform
import tempfile
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class DeviceNamePathConversionTest(test_util.TensorFlowTestCase):
def testDeviceNameToDevicePath(self):
self.assertEqual(
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_ps,replica_1,task_2,cpu_0",
debug_data.device_name_to_device_path("/job:ps/replica:1/task:2/cpu:0"))
def testDevicePathToDeviceName(self):
self.assertEqual(
"/job:ps/replica:1/task:2/cpu:0",
debug_data.device_path_to_device_name(
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_ps,replica_1,task_2,cpu_0"))
class HasNanOrInfTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dummy_datum = dummy_datum = debug_data.DebugTensorDatum(
"/foo", "bar_0_DebugIdentity_42")
def testNaN(self):
a = np.array([np.nan, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInf(self):
a = np.array([np.inf, np.inf, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNanAndInf(self):
a = np.array([np.inf, np.nan, 7.0])
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testNoNanOrInf(self):
a = np.array([0.0, 0.0, 7.0])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testEmpty(self):
a = np.array([])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testInconvertibleTensorProto(self):
self.assertFalse(debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(tensor_pb2.TensorProto(),
initialized=False)))
self.assertFalse(debug_data.has_inf_or_nan(
self._dummy_datum,
debug_data.InconvertibleTensorProto(tensor_pb2.TensorProto(),
initialized=True)))
def testDTypeComplexWorks(self):
a = np.array([1j, 3j, 3j, 7j], dtype=np.complex128)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
b = np.array([1j, 3j, 3j, 7j, np.nan], dtype=np.complex128)
self.assertTrue(debug_data.has_inf_or_nan(self._dummy_datum, b))
def testDTypeIntegerWorks(self):
a = np.array([1, 3, 3, 7], dtype=np.int16)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeStringGivesFalse(self):
"""isnan and isinf are not applicable to strings."""
a = np.array(["s", "p", "a", "m"])
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
def testDTypeObjectGivesFalse(self):
dt = np.dtype([("spam", np.str_, 16), ("eggs", np.float64, (2,))])
a = np.array([("spam", (8.0, 7.0)), ("eggs", (6.0, 5.0))], dtype=dt)
self.assertFalse(debug_data.has_inf_or_nan(self._dummy_datum, a))
class DebugTensorDatumTest(test_util.TensorFlowTestCase):
def testDebugDatum(self):
dump_root = "/tmp/tfdbg_1"
debug_dump_rel_path = (
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0" +
"/ns1/ns2/node_a_1_2_DebugIdentity_1472563253536385")
datum = debug_data.DebugTensorDatum(dump_root, debug_dump_rel_path)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertEqual("ns1/ns2/node_a_1", datum.node_name)
self.assertEqual(2, datum.output_slot)
self.assertEqual("ns1/ns2/node_a_1:2", datum.tensor_name)
self.assertEqual(1472563253536385, datum.timestamp)
self.assertEqual("ns1/ns2/node_a_1:2:DebugIdentity", datum.watch_key)
self.assertEqual(
os.path.join(dump_root, debug_dump_rel_path), datum.file_path)
self.assertEqual(
"{DebugTensorDatum (/job:localhost/replica:0/task:0/cpu:0) "
"%s:%d @ %s @ %d}" % (datum.node_name,
datum.output_slot,
datum.debug_op,
datum.timestamp), str(datum))
self.assertEqual(
"{DebugTensorDatum (/job:localhost/replica:0/task:0/cpu:0) "
"%s:%d @ %s @ %d}" % (datum.node_name,
datum.output_slot,
datum.debug_op,
datum.timestamp), repr(datum))
def testDumpSizeBytesIsNoneForNonexistentFilePath(self):
dump_root = "/tmp/tfdbg_1"
debug_dump_rel_path = "ns1/ns2/node_foo_1_2_DebugIdentity_1472563253536385"
datum = debug_data.DebugTensorDatum(dump_root, debug_dump_rel_path)
self.assertIsNone(datum.dump_size_bytes)
class DebugDumpDirTest(test_util.TensorFlowTestCase):
def setUp(self):
self._dump_root = tempfile.mktemp()
os.mkdir(self._dump_root)
def tearDown(self):
# Tear down temporary dump directory.
file_io.delete_recursively(self._dump_root)
def _makeDataDirWithMultipleDevicesAndDuplicateNodeNames(self):
cpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0")
gpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,device_GPU_0")
gpu_1_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,device_GPU_1")
os.makedirs(cpu_0_dir)
os.makedirs(gpu_0_dir)
os.makedirs(gpu_1_dir)
open(os.path.join(
cpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536386"), "wb")
open(os.path.join(
gpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536385"), "wb")
open(os.path.join(
gpu_1_dir, "node_foo_1_2_DebugIdentity_1472563253536387"), "wb")
def testDebugDumpDir_nonexistentDumpRoot(self):
with self.assertRaisesRegex(IOError, "does not exist"):
debug_data.DebugDumpDir(tempfile.mktemp() + "_foo")
def testDebugDumpDir_invalidFileNamingPattern(self):
# File name with too few underscores should lead to an exception.
device_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX + debug_data.DEVICE_TAG +
",job_localhost,replica_0,task_0,cpu_0")
os.makedirs(device_dir)
open(os.path.join(device_dir, "node1_DebugIdentity_1234"), "wb")
with self.assertRaisesRegex(ValueError,
"does not conform to the naming pattern"):
debug_data.DebugDumpDir(self._dump_root)
def testDebugDumpDir_validDuplicateNodeNamesWithMultipleDevices(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
dump_dir = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1])
self.assertItemsEqual(
["/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1"], dump_dir.devices())
self.assertEqual(1472563253536385, dump_dir.t0)
self.assertEqual(3, dump_dir.size)
with self.assertRaisesRegex(ValueError, r"Invalid device name: "):
dump_dir.nodes("/job:localhost/replica:0/task:0/device:GPU:2")
self.assertItemsEqual(["node_foo_1", "node_foo_1", "node_foo_1"],
dump_dir.nodes())
self.assertItemsEqual(
["node_foo_1"],
dump_dir.nodes(device_name="/job:localhost/replica:0/task:0/cpu:0"))
def testDuplicateNodeNamesInGraphDefOfSingleDeviceRaisesException(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
node = graph_gpu_1.node.add() # Here is the duplicate.
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
with self.assertRaisesRegex(ValueError, r"Duplicate node name on device "):
debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1])
def testDebugDumpDir_emptyDumpDir(self):
dump_dir = debug_data.DebugDumpDir(self._dump_root)
self.assertIsNone(dump_dir.t0)
self.assertEqual([], dump_dir.dumped_tensor_data)
def testDebugDumpDir_usesGfileGlob(self):
if platform.system() == "Windows":
self.skipTest("gfile.Glob is not used on Windows.")
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
def fake_gfile_glob(glob_pattern):
del glob_pattern
return []
with test.mock.patch.object(
gfile, "Glob", side_effect=fake_gfile_glob, autospec=True) as fake:
debug_data.DebugDumpDir(self._dump_root)
expected_calls = [
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.CORE_METADATA_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.FETCHES_INFO_FILE_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.FEED_KEYS_INFO_FILE_TAG + "*"))),
test.mock.call(os.path.join(
self._dump_root,
(debug_data.METADATA_FILE_PREFIX +
debug_data.DEVICE_TAG + "*")))]
fake.assert_has_calls(expected_calls, any_order=True)
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
jordiclariana/ansible
|
lib/ansible/modules/commands/raw.py
|
7
|
3364
|
# this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: raw
short_description: Executes a low-down and dirty SSH command
version_added: historical
options:
free_form:
description:
- the raw module takes a free form command to run. There is no parameter actually named 'free form'; see the examples!
required: true
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
- when using privilege escalation (C(become)), a default shell will be assigned if one is not provided
as privilege escalation requires a shell.
required: false
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem. This is useful and should only be done in two cases. The
first case is installing C(python-simplejson) on older (Python 2.4 and
before) hosts that need it as a dependency to run modules, since nearly
all core modules require it. Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate. Arguments
given to M(raw) are run directly through the configured remote shell.
Standard output, error output and return code are returned when
available. There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
notes:
- "If using raw from a playbook, you may need to disable fact gathering
using C(gather_facts: no) if you're using C(raw) to bootstrap python
onto the machine."
- If you want to execute a command securely and predictably, it may be
better to use the M(command) or M(shell) modules instead.
- the C(environment) keyword does not work with raw normally, it requires a shell
which means it only works if C(executable) is set or using the module
with privilege escalation (C(become)).
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Bootstrap a legacy python 2.4 host
- raw: yum -y install python-simplejson
# Bootstrap a host without python2 installed
- raw: dnf install -y python2 python2-dnf libselinux-python
# Run a command that uses non-posix shell-isms (in this example /bin/sh
# doesn't handle redirection and wildcards together but bash does)
- raw: cat < /tmp/*txt
args:
executable: /bin/bash
'''
|
gpl-3.0
|
mayfield/snowflake-connector-python
|
file_compression_type.py
|
1
|
3100
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2017 Snowflake Computing Inc. All right reserved.
#
class FileCompressionType():
def __init__(self):
pass
Types = {
u'GZIP': {
u'name': u'GZIP',
u'file_extension': u'.gz',
u'mime_type': u'application',
u'mime_subtypes': [u'gzip', u'x-gzip'],
u'is_supported': True,
},
u'DEFLATE': {
u'name': u'DEFLATE',
u'file_extention': u'.deflate',
u'mime_type': u'application',
u'mime_subtypes': [u'zlib', u'deflate'],
u'is_supported': True,
},
u'RAW_DEFLATE': {
u'name': u'RAW_DEFLATE',
u'file_extention': u'.raw_deflate',
u'mime_type': u'application',
u'mime_subtypes': [u'raw_deflate'],
u'is_supported': True,
},
u'BZIP2': {
u'name': u'BZIP2',
u'file_extention': u'.bz2',
u'mime_type': u'application',
u'mime_subtypes': [u'bzip2', u'x-bzip2', u'x-bz2', u'x-bzip', u'bz2'],
u'is_supported': True,
},
u'LZIP': {
u'name': u'LZIP',
u'file_extention': u'.lz',
u'mime_type': u'application',
u'mime_subtypes': [u'lzip', u'x-lzip'],
u'is_supported': False,
},
u'LZMA': {
u'name': u'LZMA',
u'file_extention': u'.lzma',
u'mime_type': u'application',
u'mime_subtypes': [u'lzma', u'x-lzma'],
u'is_supported': False,
},
u'LZO': {
u'name': u'LZO',
u'file_extention': u'.lzo',
u'mime_type': u'application',
u'mime_subtypes': [u'lzo', u'x-lzo'],
u'is_supported': False,
},
u'XZ': {
u'name': u'XZ',
u'file_extention': u'.xz',
u'mime_type': u'application',
u'mime_subtypes': [u'xz', u'x-xz'],
u'is_supported': False,
},
u'COMPRESS': {
u'name': u'COMPRESS',
u'file_extention': u'.Z',
u'mime_type': u'application',
u'mime_subtypes': [u'compress', u'x-compress'],
u'is_supported': False,
},
u'PARQUET': {
u'name': u'PARQUET',
u'file_extention': u'.parquet',
u'mime_type': u'snowflake',
u'mime_subtypes': [u'parquet'],
u'is_supported': True,
},
}
subtype_to_meta = {}
# TODO: Snappy avro doen't need to be compressed again
@classmethod
def init(cls):
for meta in cls.Types.values():
for ms in meta[u'mime_subtypes']:
cls.subtype_to_meta[ms] = meta
@classmethod
def lookupByMimeSubType(cls, mime_subtype):
if mime_subtype.lower() in cls.subtype_to_meta:
return cls.subtype_to_meta[mime_subtype]
else:
return None
# do init once
FileCompressionType.init()
|
apache-2.0
|
shakamunyi/os-cloud-config
|
os_cloud_config/cmd/init_keystone.py
|
1
|
3490
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import textwrap
from os_cloud_config.cmd.utils import environment
from os_cloud_config.keystone import initialize
def parse_args():
description = textwrap.dedent("""
Perform initial setup of keystone for a new cloud.
This will create the admin and service tenants, the admin role, the admin
user, configure certificates and finally register the initial identity
endpoint, after which Keystone may be used with normal authentication.
This command will wait for a user-specified amount of time for a Keystone
service to be running on the specified host. The default is a 10 minute
wait time with 10 seconds between poll attempts.
""")
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description)
parser.add_argument('-o', '--host', dest='host', required=True,
help="ip/hostname of node where Keystone is running")
parser.add_argument('-t', '--admin-token', dest='admin_token',
help="admin token to use with Keystone's admin "
"endpoint", required=True)
parser.add_argument('-e', '--admin-email', dest='admin_email',
help="admin user's e-mail address to be set",
required=True)
parser.add_argument('-p', '--admin-password', dest='admin_password',
help="admin user's password to be set",
required=True)
parser.add_argument('-r', '--region', dest='region', default='regionOne',
help="region to create the endpoint in")
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--ssl', dest='ssl',
help="ip/hostname to use as the ssl endpoint, if "
"required")
group.add_argument('--public', dest='public',
help="ip/hostname to use as the public endpoint, if "
"the default is not suitable")
parser.add_argument('-u', '--user', dest='user', required=True,
help="user to connect to the Keystone node via ssh")
parser.add_argument('--timeout', dest='timeout', default=600, type=int,
help="Total seconds to wait for keystone to be ready")
parser.add_argument('--poll-interval', dest='pollinterval',
default=10, type=int,
help="Seconds to wait between keystone checks")
environment._add_logging_arguments(parser)
return parser.parse_args()
def main():
args = parse_args()
environment._configure_logging(args)
initialize(args.host, args.admin_token, args.admin_email,
args.admin_password, args.region, args.ssl, args.public,
args.user, args.timeout, args.pollinterval)
|
apache-2.0
|
rdo-management/neutron
|
neutron/plugins/metaplugin/meta_models_v2.py
|
45
|
1594
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import Column, String
from neutron.db import models_v2
class NetworkFlavor(models_v2.model_base.BASEV2):
"""Represents a binding of network_id to flavor."""
flavor = Column(String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
def __repr__(self):
return "<NetworkFlavor(%s,%s)>" % (self.flavor, self.network_id)
class RouterFlavor(models_v2.model_base.BASEV2):
"""Represents a binding of router_id to flavor."""
flavor = Column(String(255))
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id',
ondelete="CASCADE"),
primary_key=True)
def __repr__(self):
return "<RouterFlavor(%s,%s)>" % (self.flavor, self.router_id)
|
apache-2.0
|
dsajkl/reqiop
|
common/djangoapps/track/views/tests/test_segmentio.py
|
2
|
13634
|
"""Ensure we can parse events sent to us from the segment.io webhook integration"""
from datetime import datetime
import json
from ddt import ddt, data, unpack
from mock import sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase
from track.views import segmentio
SECRET = 'anything'
ENDPOINT = '/segmentio/test/event'
USER_ID = 10
MOBILE_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
},
{
'ENGINE': 'track.shim.VideoEventProcessor'
}
]
def expect_failure_with_message(message):
"""Ensure the test raises an exception and does not emit an event"""
def test_decorator(func):
def test_decorated(self, *args, **kwargs):
self.assertRaisesRegexp(segmentio.EventValidationError, message, func, self, *args, **kwargs)
self.assert_no_events_emitted()
return test_decorated
return test_decorator
@ddt
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=SECRET,
TRACKING_IGNORE_URL_PATTERNS=[ENDPOINT],
TRACKING_SEGMENTIO_ALLOWED_TYPES=['track'],
TRACKING_SEGMENTIO_SOURCE_MAP={'test-app': 'mobile'},
EVENT_TRACKING_PROCESSORS=MOBILE_SHIM_PROCESSOR,
)
class SegmentIOTrackingTestCase(EventTrackingTestCase):
"""Test processing of segment.io events"""
def setUp(self):
super(SegmentIOTrackingTestCase, self).setUp()
self.request_factory = RequestFactory()
def test_get_request(self):
request = self.request_factory.get(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 405)
self.assert_no_events_emitted()
@override_settings(
TRACKING_SEGMENTIO_WEBHOOK_SECRET=None
)
def test_no_secret_config(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_no_secret_provided(self):
request = self.request_factory.post(ENDPOINT)
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def test_secret_mismatch(self):
request = self.create_request(key='y')
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 401)
self.assert_no_events_emitted()
def create_request(self, key=None, **kwargs):
"""Create a fake request that emulates a request from the segment.io servers to ours"""
if key is None:
key = SECRET
request = self.request_factory.post(ENDPOINT + "?key=" + key, **kwargs)
if 'data' in kwargs:
request.json = json.loads(kwargs['data'])
return request
@data('identify', 'Group', 'Alias', 'Page', 'identify', 'screen')
@expect_failure_with_message(segmentio.WARNING_IGNORED_TYPE)
def test_segmentio_ignore_actions(self, action):
self.post_segmentio_event(action=action)
def post_segmentio_event(self, **kwargs):
"""Post a fake segment.io event to the view that processes it"""
request = self.create_request(
data=self.create_segmentio_event_json(**kwargs),
content_type='application/json'
)
segmentio.track_segmentio_event(request)
def create_segmentio_event(self, **kwargs):
"""Populate a fake segment.io event with data of interest"""
action = kwargs.get('action', 'Track')
sample_event = {
"userId": kwargs.get('user_id', USER_ID),
"event": "Did something",
"properties": {
'name': kwargs.get('name', str(sentinel.name)),
'data': kwargs.get('data', {}),
'context': {
'course_id': kwargs.get('course_id') or '',
}
},
"channel": 'server',
"context": {
"library": {
"name": kwargs.get('library_name', 'test-app'),
"version": "unknown"
},
'userAgent': str(sentinel.user_agent),
},
"receivedAt": "2014-08-27T16:33:39.100Z",
"timestamp": "2014-08-27T16:33:39.215Z",
"type": action.lower(),
"projectId": "u0j33yjkr8",
"messageId": "qy52hwp4",
"version": 2,
"integrations": {},
"options": {
"library": "unknown",
"providers": {}
},
"action": action
}
if 'context' in kwargs:
sample_event['properties']['context'].update(kwargs['context'])
return sample_event
def create_segmentio_event_json(self, **kwargs):
"""Return a json string containing a fake segment.io event"""
return json.dumps(self.create_segmentio_event(**kwargs))
@expect_failure_with_message(segmentio.WARNING_IGNORED_SOURCE)
def test_segmentio_ignore_unknown_libraries(self):
self.post_segmentio_event(library_name='foo')
@expect_failure_with_message(segmentio.ERROR_USER_NOT_EXIST)
def test_no_user_for_user_id(self):
self.post_segmentio_event(user_id=40)
@expect_failure_with_message(segmentio.ERROR_INVALID_USER_ID)
def test_invalid_user_id(self):
self.post_segmentio_event(user_id='foobar')
@data('foo/bar/baz', 'course-v1:foo+bar+baz')
def test_success(self, course_id):
middleware = TrackMiddleware()
request = self.create_request(
data=self.create_segmentio_event_json(data={'foo': 'bar'}, course_id=course_id),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
# The middleware normally emits an event, make sure it doesn't in this case.
self.assert_no_events_emitted()
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event = {
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': str(sentinel.name),
'name': str(sentinel.name),
'event': {'foo': 'bar'},
'agent': str(sentinel.user_agent),
'page': None,
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
}
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
},
}
finally:
middleware.process_response(request, None)
self.assertEquals(self.get_event(), expected_event)
def test_invalid_course_id(self):
request = self.create_request(
data=self.create_segmentio_event_json(course_id='invalid'),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
self.assert_events_emitted()
@expect_failure_with_message(segmentio.ERROR_MISSING_NAME)
def test_missing_name(self):
sample_event_raw = self.create_segmentio_event()
del sample_event_raw['properties']['name']
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_TIMESTAMP)
def test_missing_timestamp(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
@expect_failure_with_message(segmentio.ERROR_MISSING_RECEIVED_AT)
def test_missing_received_at(self):
sample_event_raw = self.create_event_without_fields('receivedAt')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
segmentio.track_segmentio_event(request)
def create_event_without_fields(self, *fields):
"""Create a fake event and remove some fields from it"""
event = self.create_segmentio_event()
for field in fields:
if field in event:
del event[field]
return event
def test_string_user_id(self):
User.objects.create(pk=USER_ID, username=str(sentinel.username))
self.post_segmentio_event(user_id=str(USER_ID))
self.assert_events_emitted()
def test_hiding_failure(self):
sample_event_raw = self.create_event_without_fields('timestamp')
request = self.create_request(
data=json.dumps(sample_event_raw),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
self.assert_no_events_emitted()
@data(
('edx.video.played', 'play_video'),
('edx.video.paused', 'pause_video'),
('edx.video.stopped', 'stop_video'),
('edx.video.loaded', 'load_video'),
('edx.video.transcript.shown', 'show_transcript'),
('edx.video.transcript.hidden', 'hide_transcript'),
)
@unpack
def test_video_event(self, name, event_type):
course_id = 'foo/bar/baz'
middleware = TrackMiddleware()
input_payload = {
'current_time': 132.134456,
'module_id': 'i4x://foo/bar/baz/some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
del input_payload['current_time']
request = self.create_request(
data=self.create_segmentio_event_json(
name=name,
data=input_payload,
context={
'course_id': course_id,
'browser_page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity/2',
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
}),
content_type='application/json'
)
User.objects.create(pk=USER_ID, username=str(sentinel.username))
middleware.process_request(request)
try:
response = segmentio.segmentio_event(request)
self.assertEquals(response.status_code, 200)
expected_event_without_payload = {
'username': str(sentinel.username),
'ip': '',
'session': '',
'event_source': 'mobile',
'event_type': event_type,
'name': name,
'agent': str(sentinel.user_agent),
'page': 'https://testserver/courses/foo/bar/baz/courseware/Week_1/Activity',
'time': datetime.strptime("2014-08-27T16:33:39.215Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'host': 'testserver',
'context': {
'user_id': USER_ID,
'course_id': course_id,
'org_id': 'foo',
'path': ENDPOINT,
'client': {
'library': {
'name': 'test-app',
'version': 'unknown'
}
},
'received_at': datetime.strptime("2014-08-27T16:33:39.100Z", "%Y-%m-%dT%H:%M:%S.%fZ"),
'application': {
'name': 'edx.mobileapp.android',
'version': '29',
'component': 'videoplayer'
}
},
}
expected_payload = {
'currentTime': 132.134456,
'id': 'i4x-foo-bar-baz-some_module',
'code': 'mobile'
}
if name == 'edx.video.loaded':
del expected_payload['currentTime']
finally:
middleware.process_response(request, None)
actual_event = dict(self.get_event())
payload = json.loads(actual_event.pop('event'))
self.assertEquals(actual_event, expected_event_without_payload)
self.assertEquals(payload, expected_payload)
|
agpl-3.0
|
sargas/scipy
|
scipy/optimize/nonlin.py
|
1
|
46280
|
r"""
.. module:: scipy.optimize.nonlin
=================
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
========
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy.lib.six import callable, exec_
from scipy.lib.six.moves import xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# autoscale the initial Jacobian parameter
self.alpha = 0.5*max(norm(x0), 1) / norm(f0)
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [vR]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain`, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by numerical
differentiation:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [KK]_,
and for the LGMRES sparse inverse method, see [BJM]_.
References
----------
.. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
|
bsd-3-clause
|
Jet-Streaming/framework
|
deps/googletest/googlemock/scripts/fuse_gmock_files.py
|
242
|
8631
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gmock_files.py v0.1.0
Fuses Google Mock and Google Test source code into two .h files and a .cc file.
SYNOPSIS
fuse_gmock_files.py [GMOCK_ROOT_DIR] OUTPUT_DIR
Scans GMOCK_ROOT_DIR for Google Mock and Google Test source
code, assuming Google Test is in the GMOCK_ROOT_DIR/../googletest
directory, and generates three files:
OUTPUT_DIR/gtest/gtest.h, OUTPUT_DIR/gmock/gmock.h, and
OUTPUT_DIR/gmock-gtest-all.cc. Then you can build your tests
by adding OUTPUT_DIR to the include search path and linking
with OUTPUT_DIR/gmock-gtest-all.cc. These three files contain
everything you need to use Google Mock. Hence you can
"install" Google Mock by copying them to wherever you want.
GMOCK_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gmock_files.py fused_gmock
./fuse_gmock_files.py path/to/unpacked/gmock fused_gmock
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Mock or Google Test headers. Please
report any problems to [email protected]. You can read
http://code.google.com/p/googlemock/wiki/CookBook for more
information.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Mock root directory.
DEFAULT_GMOCK_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# We need to call into googletest/scripts/fuse_gtest_files.py.
sys.path.append(os.path.join(DEFAULT_GMOCK_ROOT_DIR, '../googletest/scripts'))
import fuse_gtest_files
gtest = fuse_gtest_files
# Regex for matching '#include "gmock/..."'.
INCLUDE_GMOCK_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gmock/.+)"')
# Where to find the source seed files.
GMOCK_H_SEED = 'include/gmock/gmock.h'
GMOCK_ALL_CC_SEED = 'src/gmock-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GMOCK_H_OUTPUT = 'gmock/gmock.h'
GMOCK_GTEST_ALL_CC_OUTPUT = 'gmock-gtest-all.cc'
def GetGTestRootDir(gmock_root):
"""Returns the root directory of Google Test."""
return os.path.join(gmock_root, '../googletest')
def ValidateGMockRootDir(gmock_root):
"""Makes sure gmock_root points to a valid gmock root directory.
The function aborts the program on failure.
"""
gtest.ValidateGTestRootDir(GetGTestRootDir(gmock_root))
gtest.VerifyFileExists(gmock_root, GMOCK_H_SEED)
gtest.VerifyFileExists(gmock_root, GMOCK_ALL_CC_SEED)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
gtest.VerifyOutputFile(output_dir, gtest.GTEST_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_H_OUTPUT)
gtest.VerifyOutputFile(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT)
def FuseGMockH(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock/gmock.h in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gmock headers we've processed.
def ProcessFile(gmock_header_path):
"""Processes the given gmock header file."""
# We don't process the same header twice.
if gmock_header_path in processed_files:
return
processed_files.add(gmock_header_path)
# Reads each line in the given gmock header.
for line in file(os.path.join(gmock_root, gmock_header_path), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'. We translate it to
# "gtest/gtest.h", regardless of what foo is, since all
# gtest headers are fused into gtest/gtest.h.
# There is no need to #include gtest.h twice.
if not gtest.GTEST_H_SEED in processed_files:
processed_files.add(gtest.GTEST_H_SEED)
output_file.write('#include "%s"\n' % (gtest.GTEST_H_OUTPUT,))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_H_SEED)
output_file.close()
def FuseGMockAllCcToFile(gmock_root, output_file):
"""Scans folder gmock_root to fuse gmock-all.cc into output_file."""
processed_files = sets.Set()
def ProcessFile(gmock_source_file):
"""Processes the given gmock source file."""
# We don't process the same #included file twice.
if gmock_source_file in processed_files:
return
processed_files.add(gmock_source_file)
# Reads each line in the given gmock source file.
for line in file(os.path.join(gmock_root, gmock_source_file), 'r'):
m = INCLUDE_GMOCK_FILE_REGEX.match(line)
if m:
# It's '#include "gmock/foo.h"'. We treat it as '#include
# "gmock/gmock.h"', as all other gmock headers are being fused
# into gmock.h and cannot be #included directly.
# There is no need to #include "gmock/gmock.h" more than once.
if not GMOCK_H_SEED in processed_files:
processed_files.add(GMOCK_H_SEED)
output_file.write('#include "%s"\n' % (GMOCK_H_OUTPUT,))
else:
m = gtest.INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."'.
# There is no need to #include gtest.h as it has been
# #included by gtest-all.cc.
pass
else:
m = gtest.INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GMOCK_ALL_CC_SEED)
def FuseGMockGTestAllCc(gmock_root, output_dir):
"""Scans folder gmock_root to generate gmock-gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GMOCK_GTEST_ALL_CC_OUTPUT), 'w')
# First, fuse gtest-all.cc into gmock-gtest-all.cc.
gtest.FuseGTestAllCcToFile(GetGTestRootDir(gmock_root), output_file)
# Next, append fused gmock-all.cc to gmock-gtest-all.cc.
FuseGMockAllCcToFile(gmock_root, output_file)
output_file.close()
def FuseGMock(gmock_root, output_dir):
"""Fuses gtest.h, gmock.h, and gmock-gtest-all.h."""
ValidateGMockRootDir(gmock_root)
ValidateOutputDir(output_dir)
gtest.FuseGTestH(GetGTestRootDir(gmock_root), output_dir)
FuseGMockH(gmock_root, output_dir)
FuseGMockGTestAllCc(gmock_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gmock_files.py OUTPUT_DIR
FuseGMock(DEFAULT_GMOCK_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gmock_files.py GMOCK_ROOT_DIR OUTPUT_DIR
FuseGMock(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
mpl-2.0
|
ashishnerkar1/scrapy
|
tests/test_utils_iterators.py
|
11
|
13686
|
import os
from twisted.trial import unittest
from scrapy.utils.iterators import csviter, xmliter, _body_or_str
from scrapy.contrib_exp.iterators import xmliter_lxml
from scrapy.http import XmlResponse, TextResponse, Response
from tests import get_testdata
FOOBAR_NL = u"foo" + os.linesep + u"bar"
class XmliterTestCase(unittest.TestCase):
xmliter = staticmethod(xmliter)
def test_xmliter(self):
body = """<?xml version="1.0" encoding="UTF-8"?>\
<products xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="someschmea.xsd">\
<product id="001">\
<type>Type 1</type>\
<name>Name 1</name>\
</product>\
<product id="002">\
<type>Type 2</type>\
<name>Name 2</name>\
</product>\
</products>"""
response = XmlResponse(url="http://example.com", body=body)
attrs = []
for x in self.xmliter(response, 'product'):
attrs.append((x.xpath("@id").extract(), x.xpath("name/text()").extract(), x.xpath("./type/text()").extract()))
self.assertEqual(attrs,
[(['001'], ['Name 1'], ['Type 1']), (['002'], ['Name 2'], ['Type 2'])])
def test_xmliter_text(self):
body = u"""<?xml version="1.0" encoding="UTF-8"?><products><product>one</product><product>two</product></products>"""
self.assertEqual([x.xpath("text()").extract() for x in self.xmliter(body, 'product')],
[[u'one'], [u'two']])
def test_xmliter_namespaces(self):
body = """\
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:g="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<g:image_link>http://www.mydummycompany.com/images/item1.jpg</g:image_link>
<g:id>ITEM_1</g:id>
<g:price>400</g:price>
</item>
</channel>
</rss>
"""
response = XmlResponse(url='http://mydummycompany.com', body=body)
my_iter = self.xmliter(response, 'item')
node = next(my_iter)
node.register_namespace('g', 'http://base.google.com/ns/1.0')
self.assertEqual(node.xpath('title/text()').extract(), ['Item 1'])
self.assertEqual(node.xpath('description/text()').extract(), ['This is item 1'])
self.assertEqual(node.xpath('link/text()').extract(), ['http://www.mydummycompany.com/items/1'])
self.assertEqual(node.xpath('g:image_link/text()').extract(), ['http://www.mydummycompany.com/images/item1.jpg'])
self.assertEqual(node.xpath('g:id/text()').extract(), ['ITEM_1'])
self.assertEqual(node.xpath('g:price/text()').extract(), ['400'])
self.assertEqual(node.xpath('image_link/text()').extract(), [])
self.assertEqual(node.xpath('id/text()').extract(), [])
self.assertEqual(node.xpath('price/text()').extract(), [])
def test_xmliter_exception(self):
body = u"""<?xml version="1.0" encoding="UTF-8"?><products><product>one</product><product>two</product></products>"""
iter = self.xmliter(body, 'product')
next(iter)
next(iter)
self.assertRaises(StopIteration, next, iter)
def test_xmliter_encoding(self):
body = '<?xml version="1.0" encoding="ISO-8859-9"?>\n<xml>\n <item>Some Turkish Characters \xd6\xc7\xde\xdd\xd0\xdc \xfc\xf0\xfd\xfe\xe7\xf6</item>\n</xml>\n\n'
response = XmlResponse('http://www.example.com', body=body)
self.assertEqual(
self.xmliter(response, 'item').next().extract(),
u'<item>Some Turkish Characters \xd6\xc7\u015e\u0130\u011e\xdc \xfc\u011f\u0131\u015f\xe7\xf6</item>'
)
class LxmlXmliterTestCase(XmliterTestCase):
xmliter = staticmethod(xmliter_lxml)
def test_xmliter_iterate_namespace(self):
body = """\
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<image_link>http://www.mydummycompany.com/images/item1.jpg</image_link>
<image_link>http://www.mydummycompany.com/images/item2.jpg</image_link>
</item>
</channel>
</rss>
"""
response = XmlResponse(url='http://mydummycompany.com', body=body)
no_namespace_iter = self.xmliter(response, 'image_link')
self.assertEqual(len(list(no_namespace_iter)), 0)
namespace_iter = self.xmliter(response, 'image_link', 'http://base.google.com/ns/1.0')
node = next(namespace_iter)
self.assertEqual(node.xpath('text()').extract(), ['http://www.mydummycompany.com/images/item1.jpg'])
node = next(namespace_iter)
self.assertEqual(node.xpath('text()').extract(), ['http://www.mydummycompany.com/images/item2.jpg'])
class UtilsCsvTestCase(unittest.TestCase):
sample_feeds_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sample_data', 'feeds')
sample_feed_path = os.path.join(sample_feeds_dir, 'feed-sample3.csv')
sample_feed2_path = os.path.join(sample_feeds_dir, 'feed-sample4.csv')
sample_feed3_path = os.path.join(sample_feeds_dir, 'feed-sample5.csv')
def test_csviter_defaults(self):
body = get_testdata('feeds', 'feed-sample3.csv')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
result = [row for row in csv]
self.assertEqual(result,
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
# explicit type check cuz' we no like stinkin' autocasting! yarrr
for result_row in result:
self.assert_(all((isinstance(k, unicode) for k in result_row.keys())))
self.assert_(all((isinstance(v, unicode) for v in result_row.values())))
def test_csviter_delimiter(self):
body = get_testdata('feeds', 'feed-sample3.csv').replace(',', '\t')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, delimiter='\t')
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
def test_csviter_quotechar(self):
body1 = get_testdata('feeds', 'feed-sample6.csv')
body2 = get_testdata('feeds', 'feed-sample6.csv').replace(",", '|')
response1 = TextResponse(url="http://example.com/", body=body1)
csv1 = csviter(response1, quotechar="'")
self.assertEqual([row for row in csv1],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
response2 = TextResponse(url="http://example.com/", body=body2)
csv2 = csviter(response2, delimiter="|", quotechar="'")
self.assertEqual([row for row in csv2],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
def test_csviter_wrong_quotechar(self):
body = get_testdata('feeds', 'feed-sample6.csv')
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
self.assertEqual([row for row in csv],
[{u"'id'": u"1", u"'name'": u"'alpha'", u"'value'": u"'foobar'"},
{u"'id'": u"2", u"'name'": u"'unicode'", u"'value'": u"'\xfan\xedc\xf3d\xe9\u203d'"},
{u"'id'": u"'3'", u"'name'": u"'multi'", u"'value'": u"'foo"},
{u"'id'": u"4", u"'name'": u"'empty'", u"'value'": u""}])
def test_csviter_delimiter_binary_response_assume_utf8_encoding(self):
body = get_testdata('feeds', 'feed-sample3.csv').replace(',', '\t')
response = Response(url="http://example.com/", body=body)
csv = csviter(response, delimiter='\t')
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
def test_csviter_headers(self):
sample = get_testdata('feeds', 'feed-sample3.csv').splitlines()
headers, body = sample[0].split(','), '\n'.join(sample[1:])
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response, headers=headers)
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': u'foo\nbar'},
{u'id': u'4', u'name': u'empty', u'value': u''}])
def test_csviter_falserow(self):
body = get_testdata('feeds', 'feed-sample3.csv')
body = '\n'.join((body, 'a,b', 'a,b,c,d'))
response = TextResponse(url="http://example.com/", body=body)
csv = csviter(response)
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'alpha', u'value': u'foobar'},
{u'id': u'2', u'name': u'unicode', u'value': u'\xfan\xedc\xf3d\xe9\u203d'},
{u'id': u'3', u'name': u'multi', u'value': FOOBAR_NL},
{u'id': u'4', u'name': u'empty', u'value': u''}])
def test_csviter_exception(self):
body = get_testdata('feeds', 'feed-sample3.csv')
response = TextResponse(url="http://example.com/", body=body)
iter = csviter(response)
next(iter)
next(iter)
next(iter)
next(iter)
self.assertRaises(StopIteration, next, iter)
def test_csviter_encoding(self):
body1 = get_testdata('feeds', 'feed-sample4.csv')
body2 = get_testdata('feeds', 'feed-sample5.csv')
response = TextResponse(url="http://example.com/", body=body1, encoding='latin1')
csv = csviter(response)
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'latin1', u'value': u'test'},
{u'id': u'2', u'name': u'something', u'value': u'\xf1\xe1\xe9\xf3'}])
response = TextResponse(url="http://example.com/", body=body2, encoding='cp852')
csv = csviter(response)
self.assertEqual([row for row in csv],
[{u'id': u'1', u'name': u'cp852', u'value': u'test'},
{u'id': u'2', u'name': u'something', u'value': u'\u255a\u2569\u2569\u2569\u2550\u2550\u2557'}])
class TestHelper(unittest.TestCase):
bbody = b'utf8-body'
ubody = bbody.decode('utf8')
txtresponse = TextResponse(url='http://example.org/', body=bbody, encoding='utf-8')
response = Response(url='http://example.org/', body=bbody)
def test_body_or_str(self):
for obj in (self.bbody, self.ubody, self.txtresponse, self.response):
r1 = _body_or_str(obj)
self._assert_type_and_value(r1, self.ubody, obj)
r2 = _body_or_str(obj, unicode=True)
self._assert_type_and_value(r2, self.ubody, obj)
r3 = _body_or_str(obj, unicode=False)
self._assert_type_and_value(r3, self.bbody, obj)
self.assertTrue(type(r1) is type(r2))
self.assertTrue(type(r1) is not type(r3))
def _assert_type_and_value(self, a, b, obj):
self.assertTrue(type(a) is type(b),
'Got {}, expected {} for {!r}'.format(type(a), type(b), obj))
self.assertEqual(a, b)
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
jehutting/kivy
|
kivy/input/providers/tuio.py
|
43
|
12049
|
'''
TUIO Input Provider
===================
TUIO is the de facto standard network protocol for the transmission of
touch and fiducial information between a server and a client. To learn
more about TUIO (which is itself based on the OSC protocol), please
refer to http://tuio.org -- The specification should be of special
interest.
Configure a TUIO provider in the config.ini
-------------------------------------------
The TUIO provider can be configured in the configuration file in the
``[input]`` section::
[input]
# name = tuio,<ip>:<port>
multitouchtable = tuio,192.168.0.1:3333
Configure a TUIO provider in the App
------------------------------------
You must add the provider before your application is run, like this::
from kivy.app import App
from kivy.config import Config
class TestApp(App):
def build(self):
Config.set('input', 'multitouchscreen1', 'tuio,0.0.0.0:3333')
# You can also add a second TUIO listener
# Config.set('input', 'source2', 'tuio,0.0.0.0:3334')
# Then do the usual things
# ...
return
'''
__all__ = ('TuioMotionEventProvider', 'Tuio2dCurMotionEvent',
'Tuio2dObjMotionEvent')
from kivy.lib import osc
from collections import deque
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.motionevent import MotionEvent
from kivy.input.shape import ShapeRect
from kivy.logger import Logger
class TuioMotionEventProvider(MotionEventProvider):
'''The TUIO provider listens to a socket and handles some of the incoming
OSC messages:
* /tuio/2Dcur
* /tuio/2Dobj
You can easily extend the provider to handle new TUIO paths like so::
# Create a class to handle the new TUIO type/path
# Replace NEWPATH with the pathname you want to handle
class TuioNEWPATHMotionEvent(MotionEvent):
def __init__(self, id, args):
super(TuioNEWPATHMotionEvent, self).__init__(id, args)
def depack(self, args):
# In this method, implement 'unpacking' for the received
# arguments. you basically translate from TUIO args to Kivy
# MotionEvent variables. If all you receive are x and y
# values, you can do it like this:
if len(args) == 2:
self.sx, self.sy = args
self.profile = ('pos', )
self.sy = 1 - self.sy
super(TuioNEWPATHMotionEvent, self).depack(args)
# Register it with the TUIO MotionEvent provider.
# You obviously need to replace the PATH placeholders appropriately.
TuioMotionEventProvider.register('/tuio/PATH', TuioNEWPATHMotionEvent)
.. note::
The class name is of no technical importance. Your class will be
associated with the path that you pass to the ``register()``
function. To keep things simple, you should name your class after the
path that it handles, though.
'''
__handlers__ = {}
def __init__(self, device, args):
super(TuioMotionEventProvider, self).__init__(device, args)
args = args.split(',')
if len(args) <= 0:
Logger.error('Tuio: Invalid configuration for TUIO provider')
Logger.error('Tuio: Format must be ip:port (eg. 127.0.0.1:3333)')
err = 'Tuio: Current configuration is <%s>' % (str(','.join(args)))
Logger.error(err)
return None
ipport = args[0].split(':')
if len(ipport) != 2:
Logger.error('Tuio: Invalid configuration for TUIO provider')
Logger.error('Tuio: Format must be ip:port (eg. 127.0.0.1:3333)')
err = 'Tuio: Current configuration is <%s>' % (str(','.join(args)))
Logger.error(err)
return None
self.ip, self.port = args[0].split(':')
self.port = int(self.port)
self.handlers = {}
self.oscid = None
self.tuio_event_q = deque()
self.touches = {}
@staticmethod
def register(oscpath, classname):
'''Register a new path to handle in TUIO provider'''
TuioMotionEventProvider.__handlers__[oscpath] = classname
@staticmethod
def unregister(oscpath, classname):
'''Unregister a path to stop handling it in the TUIO provider'''
if oscpath in TuioMotionEventProvider.__handlers__:
del TuioMotionEventProvider.__handlers__[oscpath]
@staticmethod
def create(oscpath, **kwargs):
'''Create a touch event from a TUIO path'''
if oscpath not in TuioMotionEventProvider.__handlers__:
raise Exception('Unknown %s touch path' % oscpath)
return TuioMotionEventProvider.__handlers__[oscpath](**kwargs)
def start(self):
'''Start the TUIO provider'''
self.oscid = osc.listen(self.ip, self.port)
for oscpath in TuioMotionEventProvider.__handlers__:
self.touches[oscpath] = {}
osc.bind(self.oscid, self._osc_tuio_cb, oscpath)
def stop(self):
'''Stop the TUIO provider'''
osc.dontListen(self.oscid)
def update(self, dispatch_fn):
'''Update the TUIO provider (pop events from the queue)'''
# deque osc queue
osc.readQueue(self.oscid)
# read the Queue with event
while True:
try:
value = self.tuio_event_q.pop()
except IndexError:
# queue is empty, we're done for now
return
self._update(dispatch_fn, value)
def _osc_tuio_cb(self, *incoming):
message = incoming[0]
oscpath, types, args = message[0], message[1], message[2:]
self.tuio_event_q.appendleft([oscpath, args, types])
def _update(self, dispatch_fn, value):
oscpath, args, types = value
command = args[0]
# verify commands
if command not in ['alive', 'set']:
return
# move or create a new touch
if command == 'set':
id = args[1]
if id not in self.touches[oscpath]:
# new touch
touch = TuioMotionEventProvider.__handlers__[oscpath](
self.device, id, args[2:])
self.touches[oscpath][id] = touch
dispatch_fn('begin', touch)
else:
# update a current touch
touch = self.touches[oscpath][id]
touch.move(args[2:])
dispatch_fn('update', touch)
# alive event, check for deleted touch
if command == 'alive':
alives = args[1:]
to_delete = []
for id in self.touches[oscpath]:
if not id in alives:
# touch up
touch = self.touches[oscpath][id]
if not touch in to_delete:
to_delete.append(touch)
for touch in to_delete:
dispatch_fn('end', touch)
del self.touches[oscpath][touch.id]
class TuioMotionEvent(MotionEvent):
'''Abstraction for TUIO touches/fiducials.
Depending on the tracking software you use (e.g. Movid, CCV, etc.) and its
TUIO implementation, the TuioMotionEvent object can support multiple
profiles such as:
* Fiducial ID: profile name 'markerid', attribute ``.fid``
* Position: profile name 'pos', attributes ``.x``, ``.y``
* Angle: profile name 'angle', attribute ``.a``
* Velocity vector: profile name 'mov', attributes ``.X``, ``.Y``
* Rotation velocity: profile name 'rot', attribute ``.A``
* Motion acceleration: profile name 'motacc', attribute ``.m``
* Rotation acceleration: profile name 'rotacc', attribute ``.r``
'''
__attrs__ = ('a', 'b', 'c', 'X', 'Y', 'Z', 'A', 'B', 'C', 'm', 'r')
def __init__(self, device, id, args):
super(TuioMotionEvent, self).__init__(device, id, args)
# Default argument for TUIO touches
self.a = 0.0
self.b = 0.0
self.c = 0.0
self.X = 0.0
self.Y = 0.0
self.Z = 0.0
self.A = 0.0
self.B = 0.0
self.C = 0.0
self.m = 0.0
self.r = 0.0
angle = property(lambda self: self.a)
mot_accel = property(lambda self: self.m)
rot_accel = property(lambda self: self.r)
xmot = property(lambda self: self.X)
ymot = property(lambda self: self.Y)
zmot = property(lambda self: self.Z)
class Tuio2dCurMotionEvent(TuioMotionEvent):
'''A 2dCur TUIO touch.'''
def __init__(self, device, id, args):
super(Tuio2dCurMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
if len(args) < 5:
self.sx, self.sy = list(map(float, args[0:2]))
self.profile = ('pos', )
elif len(args) == 5:
self.sx, self.sy, self.X, self.Y, self.m = list(map(float,
args[0:5]))
self.Y = -self.Y
self.profile = ('pos', 'mov', 'motacc')
else:
self.sx, self.sy, self.X, self.Y = list(map(float, args[0:4]))
self.m, width, height = list(map(float, args[4:7]))
self.Y = -self.Y
self.profile = ('pos', 'mov', 'motacc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = width
self.shape.height = height
self.sy = 1 - self.sy
super(Tuio2dCurMotionEvent, self).depack(args)
class Tuio2dObjMotionEvent(TuioMotionEvent):
'''A 2dObj TUIO object.
'''
def __init__(self, device, id, args):
super(Tuio2dObjMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
if len(args) < 5:
self.sx, self.sy = args[0:2]
self.profile = ('pos', )
elif len(args) == 9:
self.fid, self.sx, self.sy, self.a, self.X, self.Y = args[:6]
self.A, self.m, self.r = args[6:9]
self.Y = -self.Y
self.profile = ('markerid', 'pos', 'angle', 'mov', 'rot',
'motacc', 'rotacc')
else:
self.fid, self.sx, self.sy, self.a, self.X, self.Y = args[:6]
self.A, self.m, self.r, width, height = args[6:11]
self.Y = -self.Y
self.profile = ('markerid', 'pos', 'angle', 'mov', 'rot', 'rotacc',
'acc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = width
self.shape.height = height
self.sy = 1 - self.sy
super(Tuio2dObjMotionEvent, self).depack(args)
class Tuio2dBlbMotionEvent(TuioMotionEvent):
'''A 2dBlb TUIO object.
# FIXME 3d shape are not supported
/tuio/2Dobj set s i x y a X Y A m r
/tuio/2Dblb set s x y a w h f X Y A m r
'''
def __init__(self, device, id, args):
super(Tuio2dBlbMotionEvent, self).__init__(device, id, args)
def depack(self, args):
self.is_touch = True
self.sx, self.sy, self.a, self.X, self.Y, sw, sh, sd, \
self.A, self.m, self.r = args
self.Y = -self.Y
self.profile = ('pos', 'angle', 'mov', 'rot', 'rotacc',
'acc', 'shape')
if self.shape is None:
self.shape = ShapeRect()
self.shape.width = sw
self.shape.height = sh
self.sy = 1 - self.sy
super(Tuio2dBlbMotionEvent, self).depack(args)
# registers
TuioMotionEventProvider.register('/tuio/2Dcur', Tuio2dCurMotionEvent)
TuioMotionEventProvider.register('/tuio/2Dobj', Tuio2dObjMotionEvent)
TuioMotionEventProvider.register('/tuio/2Dblb', Tuio2dBlbMotionEvent)
MotionEventFactory.register('tuio', TuioMotionEventProvider)
|
mit
|
NeCTAR-RC/nova
|
nova/db/sqlalchemy/migrate_repo/versions/294_add_service_heartbeat.py
|
72
|
1061
|
# Copyright (c) 2015 Wind River Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, DateTime
BASE_TABLE_NAME = 'services'
NEW_COLUMN_NAME = 'last_seen_up'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, DateTime, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
apache-2.0
|
ahamilton55/ansible
|
lib/ansible/module_utils/facts/system/date_time.py
|
197
|
2597
|
# Data and time related facts collection for ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import time
from ansible.module_utils.facts.collector import BaseFactCollector
class DateTimeFactCollector(BaseFactCollector):
name = 'date_time'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
facts_dict = {}
date_time_facts = {}
now = datetime.datetime.now()
date_time_facts['year'] = now.strftime('%Y')
date_time_facts['month'] = now.strftime('%m')
date_time_facts['weekday'] = now.strftime('%A')
date_time_facts['weekday_number'] = now.strftime('%w')
date_time_facts['weeknumber'] = now.strftime('%W')
date_time_facts['day'] = now.strftime('%d')
date_time_facts['hour'] = now.strftime('%H')
date_time_facts['minute'] = now.strftime('%M')
date_time_facts['second'] = now.strftime('%S')
date_time_facts['epoch'] = now.strftime('%s')
if date_time_facts['epoch'] == '' or date_time_facts['epoch'][0] == '%':
# NOTE: in this case, the epoch wont match the rest of the date_time facts? ie, it's a few milliseconds later..? -akl
date_time_facts['epoch'] = str(int(time.time()))
date_time_facts['date'] = now.strftime('%Y-%m-%d')
date_time_facts['time'] = now.strftime('%H:%M:%S')
date_time_facts['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
date_time_facts['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
date_time_facts['tz'] = time.strftime("%Z")
date_time_facts['tz_offset'] = time.strftime("%z")
facts_dict['date_time'] = date_time_facts
return facts_dict
|
gpl-3.0
|
Sintendo/dolphin
|
Tools/CleanFiles.py
|
161
|
2838
|
import codecs
import os
import glob
standard_sections = [
"Core",
"EmuState",
"OnLoad",
"OnFrame",
"ActionReplay",
"Video",
"Video_Settings",
"Video_Enhancements",
"Video_Hacks",
"Speedhacks",
]
standard_comments = {
"Core": "Values set here will override the main dolphin settings.",
"EmuState": "The Emulation State. 1 is worst, 5 is best, 0 is not set.",
"OnLoad": "Add memory patches to be loaded once on boot here.",
"OnFrame": "Add memory patches to be applied every frame here.",
"ActionReplay": "Add action replay cheats here.",
"Video": "",
"Video_Settings": "",
"Video_Enhancements": "",
"Video_Hacks": "",
"Speedhacks": "",
}
def normalize_comment(line):
line = line.strip().lstrip('#').lstrip()
if line:
return "# %s" % (line,)
else:
return ""
def normalize_ini_file(in_, out):
sections = {}
current_section = None
toplevel_comment = ""
wants_comment = False
for line in in_:
line = line.strip()
# strip utf8 bom
line = line.lstrip(u'\ufeff')
if line.startswith('#'):
line = normalize_comment(line)
if current_section is None:
toplevel_comment += line
continue
if line.startswith('['):
end = line.find(']')
section_name = line[1:end]
if section_name not in standard_sections:
continue
current_section = []
sections[section_name] = current_section
wants_comment = False
continue
if current_section is None and line:
raise ValueError("invalid junk")
if current_section is None:
continue
if line.startswith('#') and not wants_comment:
continue
current_section.append(line)
if line:
wants_comment = True
out.write(toplevel_comment.strip() + "\n\n")
for section in standard_sections:
lines = '\n'.join(sections.get(section, "")).strip()
comments = standard_comments[section]
if not lines and not comments:
continue
out.write("[%s]\n" % (section,))
if comments:
out.write("# %s\n" % (comments,))
if lines:
out.write(lines)
out.write('\n')
out.write('\n')
def main():
base_path = os.path.dirname(__file__)
pattern = os.path.join(base_path, "../Data/User/GameConfig/??????.ini")
for name in glob.glob(pattern):
in__name = name
out_name = name + '.new'
in_ = codecs.open(in__name, 'r', 'utf8')
out = codecs.open(out_name, 'w', 'utf8')
normalize_ini_file(in_, out)
os.rename(out_name, in__name)
if __name__ == "__main__":
main()
|
gpl-2.0
|
jjanssen/django-cms-timetravel
|
cms_timetravel/managers/plugins.py
|
1
|
1902
|
import logging
from django.core.exceptions import ValidationError
from cms.models import CMSPlugin
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, render_plugin as _render_plugin
from ..utils import get_timetravel_date
from ..models import Schedulable
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if not self.published():
return ""
if not isinstance(placeholder, Placeholder):
placeholder = instance.placeholder
placeholder_slot = placeholder.slot
context = PluginContext(context, instance, placeholder)
context = plugin.render(context, instance, placeholder_slot)
if plugin.render_plugin:
template = hasattr(instance, 'render_template') and instance.render_template or plugin.render_template
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
return _render_plugin(context, instance, placeholder, template, processors)
return ""
def published(self):
"""
Checks if the plugin should be published, depending on the publication
start and/or end date (if available).
"""
ref_date = get_timetravel_date()
instance, plugin = self.get_plugin_instance()
if isinstance(instance, Schedulable):
logging.debug("The current plugin instance is Schedulable.")
return (instance.publication_date is None or instance.publication_date < ref_date) and \
(instance.publication_end_date is None or instance.publication_end_date >= ref_date)
else:
return True
CMSPlugin.published = published
CMSPlugin.render_plugin = render_plugin
|
apache-2.0
|
shoopio/shoop
|
shuup_tests/admin/test_product_package.py
|
2
|
4243
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.forms import formset_factory
from shuup.admin.modules.products.forms import (
PackageChildForm, PackageChildFormSet
)
from shuup.admin.modules.products.utils import clear_existing_package
from shuup.admin.modules.products.views import ProductPackageView
from shuup.core.models import ProductMode, ShopProduct
from shuup.simple_supplier.module import SimpleSupplierModule
from shuup.testing.factories import (
create_package_product, create_product, get_default_shop, get_supplier
)
from shuup.testing.utils import apply_all_middleware
from shuup.utils.excs import Problem
from shuup_tests.utils import printable_gibberish
from shuup_tests.utils.forms import get_form_data
@pytest.mark.django_db
def test_package_child_formset():
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child = create_product(printable_gibberish())
# No products in the package
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
assert not parent.get_all_package_children()
data = dict(get_form_data(formset, True), **{"form-0-child": child.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert parent.get_all_package_children()
clear_existing_package(parent)
assert not parent.get_all_package_children()
@pytest.mark.django_db
def test_product_not_in_normal_mode():
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child_1 = create_product(printable_gibberish())
child_1.link_to_parent(parent)
child_2 = create_product(printable_gibberish())
parent.verify_mode()
assert parent.mode == ProductMode.SIMPLE_VARIATION_PARENT
# Trying to create a package from a non-normal mode product
with pytest.raises(Problem):
formset = FormSet(parent_product=parent)
data = dict(get_form_data(formset, True), **{"form-0-child": child_2.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
@pytest.mark.django_db
def test_cannot_add_product_to_own_package(rf):
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
# No products in the package
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
assert not parent.get_all_package_children()
# Try to add a product to its own package
data = dict(get_form_data(formset, True), **{"form-0-child": parent.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert not parent.get_all_package_children()
@pytest.mark.parametrize("supplier_enabled", [True, False])
@pytest.mark.django_db
def test_package_edit_view(admin_user, rf, supplier_enabled):
shop = get_default_shop()
supplier = get_supplier(SimpleSupplierModule.identifier, shop=shop, stock_managed=True)
supplier.enabled = supplier_enabled
supplier.save()
package = create_package_product(printable_gibberish(), shop, supplier)
request = apply_all_middleware(rf.get("/"), user=admin_user)
response = ProductPackageView.as_view()(request=request, pk=package.pk)
product_ids = []
for shop_product in ShopProduct.objects.filter(suppliers=supplier, product__mode=ProductMode.NORMAL):
supplier.adjust_stock(product_id=shop_product.product_id, delta=shop_product.product_id)
product_ids.append(shop_product.product_id)
assert response.status_code == 200
response.render()
content = response.content.decode("utf-8")
for product_id in product_ids:
is_inside = ("Logical count: %s" % product_id) in content
assert is_inside == supplier_enabled
|
agpl-3.0
|
gazpachoking/Flexget
|
flexget/plugins/operate/sequence.py
|
2
|
1822
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import itertools
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('sequence')
class PluginSequence(object):
""" Allows the same plugin to be configured multiple times in a task.
Example:
sequence:
- rss: http://feeda.com
- rss: http://feedb.com
"""
schema = {'type': 'array', 'items': {'$ref': '/schema/plugins'}}
def __getattr__(self, item):
"""Returns a function for all on_task_* events, that runs all the configured plugins."""
for phase, method in plugin.phase_methods.items():
if item == method and phase not in ['accept', 'reject', 'fail']:
break
else:
raise AttributeError(item)
def handle_phase(task, config):
"""Function that runs all of the configured plugins which act on the current phase."""
# Keep a list of all results, for input plugin combining
results = []
for item in config:
for plugin_name, plugin_config in item.items():
if phase in plugin.get_phases_by_plugin(plugin_name):
method = plugin.get_plugin_by_name(plugin_name).phase_handlers[phase]
log.debug('Running plugin %s' % plugin_name)
result = method(task, plugin_config)
if phase == 'input' and result:
results.append(result)
return itertools.chain(*results)
return handle_phase
@event('plugin.register')
def register_plugin():
plugin.register(PluginSequence, 'sequence', api_ver=2, debug=True)
|
mit
|
mjirayu/sit_academy
|
lms/djangoapps/mailing/management/commands/mailchimp_sync_course.py
|
155
|
12352
|
"""
Synchronizes a mailchimp list with the students of a course.
"""
import logging
import math
import random
import itertools
from itertools import chain
from optparse import make_option
from collections import namedtuple
from django.core.management.base import BaseCommand, CommandError
from mailsnake import MailSnake
from student.models import UserProfile, unique_id_for_user
from opaque_keys.edx.keys import CourseKey
BATCH_SIZE = 15000
# If you try to subscribe with too many users at once
# the transaction times out on the mailchimp side.
SUBSCRIBE_BATCH_SIZE = 1000
log = logging.getLogger('edx.mailchimp')
FIELD_TYPES = {'EDX_ID': 'text'}
class Command(BaseCommand):
"""
Synchronizes a mailchimp list with the students of a course.
"""
args = '<mailchimp_key mailchimp_list course_id>'
help = 'Synchronizes a mailchimp list with the students of a course.'
option_list = BaseCommand.option_list + (
make_option('--key', action='store', help='mailchimp api key'),
make_option('--list', action='store', dest='list_id',
help='mailchimp list id'),
make_option('--course', action='store', dest='course_id',
help='xmodule course_id'),
make_option('--segments', action='store', dest='segments',
default=0, type=int,
help='number of static random segments to create'),
)
def parse_options(self, options):
"""Parses `options` of the command."""
if not options['key']:
raise CommandError('missing key')
if not options['list_id']:
raise CommandError('missing list id')
if not options['course_id']:
raise CommandError('missing course id')
return (options['key'], options['list_id'],
options['course_id'], options['segments'])
def handle(self, *args, **options):
"""Synchronizes a mailchimp list with the students of a course."""
key, list_id, course_id, nsegments = self.parse_options(options)
log.info('Syncronizing email list for %s', course_id)
mailchimp = connect_mailchimp(key)
subscribed = get_subscribed(mailchimp, list_id)
unsubscribed = get_unsubscribed(mailchimp, list_id)
cleaned = get_cleaned(mailchimp, list_id)
non_subscribed = unsubscribed.union(cleaned)
enrolled = get_enrolled_students(course_id)
exclude = subscribed.union(non_subscribed)
to_subscribe = get_student_data(enrolled, exclude=exclude)
tag_names = set(chain.from_iterable(d.keys() for d in to_subscribe))
update_merge_tags(mailchimp, list_id, tag_names)
subscribe_with_data(mailchimp, list_id, to_subscribe)
enrolled_emails = set(enrolled.values_list('user__email', flat=True))
non_enrolled_emails = list(subscribed.difference(enrolled_emails))
unsubscribe(mailchimp, list_id, non_enrolled_emails)
subscribed = subscribed.union(set(d['EMAIL'] for d in to_subscribe))
make_segments(mailchimp, list_id, nsegments, subscribed)
def connect_mailchimp(api_key):
"""
Initializes connection to the mailchimp api
"""
mailchimp = MailSnake(api_key)
result = mailchimp.ping()
log.debug(result)
return mailchimp
def verify_list(mailchimp, list_id, course_id):
"""
Verifies that the given list_id corresponds to the course_id
Returns boolean: whether or not course_id matches list_id
"""
lists = mailchimp.lists(filters={'list_id': list_id})['data']
if len(lists) != 1:
log.error('incorrect list id')
return False
list_name = lists[0]['name']
log.debug('list name: %s', list_name)
# check that we are connecting to the correct list
parts = course_id.replace('_', ' ').replace('/', ' ').split()
count = sum(1 for p in parts if p in list_name)
if count < 3:
log.info(course_id)
log.info(list_name)
log.error('course_id does not match list name')
return False
return True
def get_student_data(students, exclude=None):
"""
Given a QuerySet of Django users, extracts id, username, and is_anonymous data.
Excludes any users provided in the optional `exclude` set.
Returns a list of dictionaries for each user, where the dictionary has keys
'EMAIL', 'FULLNAME', and 'EDX_ID'.
"""
# To speed the query, we won't retrieve the full User object, only
# two of its values. The namedtuple simulates the User object.
FakeUser = namedtuple('Fake', 'id username is_anonymous') # pylint: disable=invalid-name
exclude = exclude if exclude else set()
def make(svalue):
"""
Given a User value entry `svalue`, extracts the student's email and fullname,
and provides a unique id for the user.
Returns a dictionary with keys 'EMAIL', 'FULLNAME', and 'EDX_ID'.
"""
fake_user = FakeUser(svalue['user_id'], svalue['user__username'], lambda: True)
entry = {
'EMAIL': svalue['user__email'],
'FULLNAME': svalue['name'].title(),
'EDX_ID': unique_id_for_user(fake_user)
}
return entry
fields = 'user__email', 'name', 'user_id', 'user__username'
values = students.values(*fields)
# TODO: Since `students` is a QuerySet, can we chain a filter here that would be more
# performant than calling a lambda for every user?
exclude_func = lambda s: s['user__email'] in exclude
return [make(s) for s in values if not exclude_func(s)]
def get_enrolled_students(course_id):
"""
Given a course_id, returns a QuerySet of all the active students
in the course.
"""
objects = UserProfile.objects
course_key = CourseKey.from_string(course_id)
students = objects.filter(user__courseenrollment__course_id=course_key,
user__courseenrollment__is_active=True)
return students
def get_subscribed(mailchimp, list_id):
"""Returns a set of email addresses subscribed to `list_id`"""
return get_members(mailchimp, list_id, 'subscribed')
def get_unsubscribed(mailchimp, list_id):
"""Returns a set of email addresses that have unsubscribed from `list_id`"""
return get_members(mailchimp, list_id, 'unsubscribed')
def get_cleaned(mailchimp, list_id):
"""
Returns a set of email addresses that have been cleaned from `list_id`
These email addresses may be invalid or have caused bounces, so you don't want
to re-add them back to the list.
"""
return get_members(mailchimp, list_id, 'cleaned')
def get_members(mailchimp, list_id, status):
"""
Given a mailchimp list id and a user status to filter on, returns all
members of the mailchimp list with that status.
Returns a set of email addresses.
"""
mc_get_members = mailchimp.listMembers
members = set()
for page in itertools.count():
response = mc_get_members(id=list_id,
status=status,
start=page,
limit=BATCH_SIZE)
data = response.get('data', [])
if not data:
break
members.update(d['email'] for d in data)
return members
def unsubscribe(mailchimp, list_id, emails):
"""
Batch unsubscribe the given email addresses from the list represented
by `list_id`
"""
batch_unsubscribe = mailchimp.listBatchUnsubscribe
result = batch_unsubscribe(id=list_id,
emails=emails,
send_goodbye=False,
delete_member=False)
log.debug(result)
def update_merge_tags(mailchimp, list_id, tag_names):
"""
This function is rather inscrutable. Given tag_names, which
in this code seems to be a list of ['FULLNAME', 'EMAIL', 'EDX_ID'],
we grab tags from the mailchimp list, then we verify tag_names has
'FULLNAME' and 'EMAIL' present, we get more data from mailchimp, then
sync the variables up to mailchimp using `listMergeVarAdd`.
The purpose of this function is unclear.
"""
mc_vars = mailchimp.listMergeVars(id=list_id)
mc_names = set(v['name'] for v in mc_vars)
mc_merge = mailchimp.listMergeVarAdd
tags = [v['tag'] for v in mc_vars]
for name in tag_names:
tag = name_to_tag(name)
# verify FULLNAME is present
# TODO: Why is this under the for loop? It does nothing with the loop
# variable and seems like things would work if this was executed before or
# after the loop.
if 'FULLNAME' not in tags:
result = mc_merge(id=list_id,
tag='FULLNAME',
name='Full Name',
options={'field_type': 'text',
'public': False})
tags.append('FULLNAME')
log.debug(result)
# add extra tags if not present
if name not in mc_names and tag not in ['EMAIL', 'FULLNAME']:
ftype = FIELD_TYPES.get(name, 'number')
result = mc_merge(id=list_id,
tag=tag,
name=name,
options={'field_type': ftype,
'public': False})
tags.append(tag)
log.debug(result)
def subscribe_with_data(mailchimp, list_id, user_data):
"""
Given user_data in the form of a list of dictionaries for each user,
where the dictionary has keys 'EMAIL', 'FULLNAME', and 'EDX_ID', batch
subscribe the users to the given `list_id` via a Mailchimp api method.
Returns None
"""
format_entry = lambda e: {name_to_tag(k): v for k, v in e.iteritems()}
formated_data = list(format_entry(e) for e in user_data)
# send the updates in batches of a fixed size
for batch in chunk(formated_data, SUBSCRIBE_BATCH_SIZE):
result = mailchimp.listBatchSubscribe(id=list_id,
batch=batch,
double_optin=False,
update_existing=True)
log.debug(
"Added: %s Error on: %s", result['add_count'], result['error_count']
)
def make_segments(mailchimp, list_id, count, emails):
"""
Segments the list of email addresses `emails` into `count` segments,
if count is nonzero.
For unknown historical reasons, lost to the winds of time, this is done with
a random order to the email addresses.
First, existing 'random_' mailchimp segments are deleted.
Then, the list of emails (the whole, large list) is shuffled.
Finally, the shuffled emails are chunked into `count` segments and re-uploaded
to mailchimp as 'random_'-prefixed segments.
"""
if count > 0:
# reset segments
segments = mailchimp.listStaticSegments(id=list_id)
for seg in segments:
if seg['name'].startswith('random'):
mailchimp.listStaticSegmentDel(id=list_id, seg_id=seg['id'])
# shuffle and split emails
emails = list(emails)
random.shuffle(emails) # Why do we do this?
chunk_size = int(math.ceil(float(len(emails)) / count))
chunks = list(chunk(emails, chunk_size))
# create segments and add emails
for seg in xrange(count):
name = 'random_{0:002}'.format(seg)
seg_id = mailchimp.listStaticSegmentAdd(id=list_id, name=name)
for batch in chunk(chunks[seg], BATCH_SIZE):
mailchimp.listStaticSegmentMembersAdd(
id=list_id,
seg_id=seg_id,
batch=batch
)
def name_to_tag(name):
"""
Returns sanitized str `name`: no more than 10 characters,
with spaces replaced with `_`
"""
if len(name) > 10:
name = name[:10]
return name.replace(' ', '_').strip()
def chunk(elist, size):
"""
Generator. Yields a list of size `size` of the given list `elist`,
or a shorter list if at the end of the input.
"""
for i in xrange(0, len(elist), size):
yield elist[i:i + size]
|
agpl-3.0
|
joeyespo/django-extensions
|
django_extensions/management/commands/sqldsn.py
|
12
|
5124
|
# -*- coding: utf-8 -*-
"""
sqldns.py - Prints Data Source Name on stdout
"""
import sys
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-R', '--router', action='store',
dest='router', default='default',
help='Use this router-database other then default'),
make_option('-s', '--style', action='store',
dest='style', default=None,
help='DSN format style: keyvalue, uri, pgpass, all'),
make_option('-a', '--all', action='store_true',
dest='all', default=False,
help='Show DSN for all database routes'),
make_option('-q', '--quiet', action='store_true',
dest='quiet', default=False,
help='Quiet mode only show DSN'),
)
help = """Prints DSN on stdout, as specified in settings.py
./manage.py sqldsn [--router=<routername>] [--style=pgpass]"""
requires_system_checks = False
can_import_settings = True
def handle(self, *args, **options):
self.style = color_style()
all_routers = options.get('all')
if all_routers:
routers = settings.DATABASES.keys()
else:
routers = [options.get('router')]
for i, router in enumerate(routers):
if i != 0:
sys.stdout.write("\n")
self.show_dsn(router, options)
def show_dsn(self, router, options):
dbinfo = settings.DATABASES.get(router)
quiet = options.get('quiet')
dsn_style = options.get('style')
if dbinfo is None:
raise CommandError("Unknown database router %s" % router)
engine = dbinfo.get('ENGINE').split('.')[-1]
dbuser = dbinfo.get('USER')
dbpass = dbinfo.get('PASSWORD')
dbname = dbinfo.get('NAME')
dbhost = dbinfo.get('HOST')
dbport = dbinfo.get('PORT')
dsn = []
if engine == 'mysql':
dsnstr = 'host="{0}", db="{2}", user="{3}", passwd="{4}"'
if dbport is not None:
dsnstr = dsnstr + ', port="{1}"'
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass))
elif engine == 'postgresql_psycopg2':
dsn = self.postgresql(dbhost, dbport, dbname, dbuser, dbpass, dsn_style=dsn_style)
elif engine == 'sqlite3':
dsn.append('{}'.format(dbname))
else:
dsn.append(self.style.ERROR('Unknown database, can''t generate DSN'))
if not quiet:
sys.stdout.write(self.style.SQL_TABLE("DSN for router '%s' with engine '%s':\n" % (router, engine)))
for output in dsn:
sys.stdout.write("{}\n".format(output))
def postgresql(self, dbhost, dbport, dbname, dbuser, dbpass, dsn_style=None):
"""PostgreSQL psycopg2 driver accepts two syntaxes
Plus a string for .pgpass file
"""
dsn = []
if dsn_style is None or dsn_style == 'all' or dsn_style == 'keyvalue':
dsnstr = "host='{0}' dbname='{2}' user='{3}' password='{4}'"
if dbport is not None:
dsnstr = dsnstr + " port='{1}'"
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass,))
if dsn_style == 'all' or dsn_style == 'kwargs':
dsnstr = "host='{0}', database='{2}', user='{3}', password='{4}'"
if dbport is not None:
dsnstr = dsnstr + ", port='{1}'"
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass))
if dsn_style == 'all' or dsn_style == 'uri':
if dbport is not None:
dsnstr = "postgresql://{3}:{4}@{0}:{1}/{2}"
else:
dsnstr = "postgresql://{3}:{4}@{0}/{2}"
dsn.append(dsnstr.format(dbhost,
dbport,
dbname,
dbuser,
dbpass,))
if dsn_style == 'all' or dsn_style == 'pgpass':
if dbport is not None:
dbport = 5432
dsn.append('{0}:{1}:{2}:{3}:{4}'.format(dbhost,
dbport,
dbname,
dbuser,
dbpass))
return dsn
|
mit
|
ntim/hyperion
|
effects/knight-rider.py
|
2
|
1331
|
import hyperion
import time
import colorsys
# Get the parameters
speed = float(hyperion.args.get('speed', 1.0))
fadeFactor = float(hyperion.args.get('fadeFactor', 0.7))
color = hyperion.args.get('color', (255,0,0))
# Check parameters
speed = max(0.0001, speed)
fadeFactor = max(0.0, min(fadeFactor, 1.0))
# Initialize the led data
width = 25
imageData = bytearray(width * (0,0,0))
imageData[0] = color[0]
imageData[1] = color[1]
imageData[2] = color[2]
# Calculate the sleep time and rotation increment
increment = 1
sleepTime = 1.0 / (speed * width)
while sleepTime < 0.05:
increment *= 2
sleepTime *= 2
# Start the write data loop
position = 0
direction = 1
while not hyperion.abort():
hyperion.setImage(width, 1, imageData)
# Move data into next state
for i in range(increment):
position += direction
if position == -1:
position = 1
direction = 1
elif position == width:
position = width-2
direction = -1
# Fade the old data
for j in range(width):
imageData[3*j] = int(fadeFactor * imageData[3*j])
imageData[3*j+1] = int(fadeFactor * imageData[3*j+1])
imageData[3*j+2] = int(fadeFactor * imageData[3*j+2])
# Insert new data
imageData[3*position] = color[0]
imageData[3*position+1] = color[1]
imageData[3*position+2] = color[2]
# Sleep for a while
time.sleep(sleepTime)
|
mit
|
zengenti/ansible
|
lib/ansible/modules/network/aos/aos_blueprint.py
|
13
|
9007
|
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: aos_blueprint
author: [email protected] (@jeremyschulman)
version_added: "2.3"
short_description: Manage AOS blueprint instance
description:
- Apstra AOS Blueprint module let you manage your Blueprint easily. You can create
create and delete Blueprint by Name or ID. You can also use it to retrieve
all data from a blueprint. This module is idempotent
and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Blueprint to manage.
Only one of I(name) or I(id) can be set.
id:
description:
- AOS Id of the IP Pool to manage (can't be used to create a new IP Pool).
Only one of I(name) or I(id) can be set.
state:
description:
- Indicate what is the expected state of the Blueprint.
choices: ['present', 'absent', 'build-ready']
default: present
timeout:
description:
- When I(state=build-ready), this timeout identifies timeout in seconds to wait before
declaring a failure.
default: 5
template:
description:
- When creating a blueprint, this value identifies, by name, an existing engineering
design template within the AOS-server.
reference_arch:
description:
- When creating a blueprint, this value identifies a known AOS reference
architecture value. I(Refer to AOS-server documentation for available values).
'''
EXAMPLES = '''
- name: Creating blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
template: "my-template"
reference_arch: two_stage_l3clos
state: present
- name: Access a blueprint and get content
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
template: "{{ blueprint_template }}"
state: present
register: bp
- name: Delete a blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
state: absent
- name: Await blueprint build-ready, and obtain contents
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
state: build-ready
register: bp
'''
RETURNS = '''
name:
description: Name of the Blueprint
returned: always
type: str
sample: My-Blueprint
id:
description: AOS unique ID assigned to the Blueprint
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Information about the Blueprint
returned: always
type: dict
sample: {'...'}
contents:
description: Blueprint contents data-dictionary
returned: always
type: dict
sample: { ... }
build_errors:
description: When state='build-ready', and build errors exist, this contains list of errors
returned: only when build-ready returns fail
type: list
sample: [{...}, {...}]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, check_aos_version, find_collection_item
from ansible.module_utils.pycompat24 import get_exception
def create_blueprint(module, aos, name):
margs = module.params
try:
template_id = aos.DesignTemplates[margs['template']].id
# Create a new Object based on the name
blueprint = aos.Blueprints[name]
blueprint.create(template_id, reference_arch=margs['reference_arch'])
except:
exc = get_exception()
if 'UNPROCESSABLE ENTITY' in exc.message:
msg = 'likely missing dependencies'
else:
msg = exc.message
module.fail_json(msg="Unable to create blueprint: %s" % exc.message)
return blueprint
def ensure_absent(module, aos, blueprint):
if blueprint.exists is False:
module.exit_json(changed=False)
else:
if not module.check_mode:
try:
blueprint.delete()
except:
exc = get_exception()
module.fail_json(msg='Unable to delete blueprint, %s' % exc.message)
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name)
def ensure_present(module, aos, blueprint):
margs = module.params
if blueprint.exists:
module.exit_json(changed=False,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
# Check if template is defined and is valid
if margs['template'] is None:
module.fail_json(msg="You must define a 'template' name to create a new blueprint, currently missing")
elif aos.DesignTemplates.find(label=margs['template']) is None:
module.fail_json(msg="You must define a Valid 'template' name to create a new blueprint, %s is not valid" % margs['template'])
# Check if reference_arch
if margs['reference_arch'] is None:
module.fail_json(msg="You must define a 'reference_arch' to create a new blueprint, currently missing")
if not module.check_mode:
blueprint = create_blueprint(module, aos, margs['name'])
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
module.exit_json(changed=True,
name=margs['name'])
def ensure_build_ready(module, aos, blueprint):
margs = module.params
if not blueprint.exists:
module.fail_json(msg='blueprint %s does not exist' % blueprint.name)
if blueprint.await_build_ready(timeout=margs['timeout']*1000):
module.exit_json(contents=blueprint.contents)
else:
module.fail_json(msg='blueprint %s has build errors',
build_erros=blueprint.build_errors)
def aos_blueprint(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
try:
my_blueprint = find_collection_item(aos.Blueprints,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
ensure_absent(module, aos, my_blueprint)
elif margs['state'] == 'present':
ensure_present(module, aos, my_blueprint)
elif margs['state'] == 'build-ready':
ensure_build_ready(module, aos, my_blueprint)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False ),
state=dict(choices=[
'present', 'absent', 'build-ready'],
default='present'),
timeout=dict(type="int", default=5),
template=dict(required=False),
reference_arch=dict(required=False)
),
mutually_exclusive = [('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_blueprint(module)
if __name__ == '__main__':
main()
|
gpl-3.0
|
raymondxyang/tensorflow
|
tensorflow/python/kernel_tests/depthtospace_op_test.py
|
90
|
8089
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for DepthToSpace op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class DepthToSpaceTest(test.TestCase):
def _testOne(self, inputs, block_size, outputs):
with self.test_session(use_gpu=True):
x_tf = array_ops.depth_to_space(math_ops.to_float(inputs), block_size)
self.assertAllEqual(x_tf.eval(), outputs)
def testBasic(self):
x_np = [[[[1, 2, 3, 4]]]]
block_size = 2
x_out = [[[[1], [2]], [[3], [4]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testBlockSize2(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 2
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
def testBlockSize2Batch10(self):
block_size = 2
def batch_input_elt(i):
return [[[1 * i, 2 * i, 3 * i, 4 * i],
[5 * i, 6 * i, 7 * i, 8 * i]],
[[9 * i, 10 * i, 11 * i, 12 * i],
[13 * i, 14 * i, 15 * i, 16 * i]]]
def batch_output_elt(i):
return [[[1 * i], [2 * i], [5 * i], [6 * i]],
[[3 * i], [4 * i], [7 * i], [8 * i]],
[[9 * i], [10 * i], [13 * i], [14 * i]],
[[11 * i], [12 * i], [15 * i], [16 * i]]]
batch_size = 10
x_np = [batch_input_elt(i) for i in range(batch_size)]
x_out = [batch_output_elt(i) for i in range(batch_size)]
self._testOne(x_np, block_size, x_out)
# Tests for different width and height.
def testNonSquare(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]],
[[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]],
[[5, 50], [6, 60]],
[[7, 70], [8, 80]],
[[9, 90], [10, 100]],
[[11, 110], [12, 120]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input dimensions. To make sure elements are
# correctly ordered spatially.
def testBlockSize4FlatInput(self):
x_np = [[[[1, 2, 5, 6, 3, 4, 7, 8, 9, 10, 13, 14, 11, 12, 15, 16]]]]
block_size = 4
x_out = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleaved(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20]],
[[3, 30], [4, 40]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths. Here an odd depth.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedDepth3(self):
x_np = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
block_size = 2
x_out = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
self._testOne(x_np, block_size, x_out)
# Tests for larger input depths.
# To make sure elements are properly interleaved in depth.
def testDepthInterleavedLarger(self):
x_np = [[[[1, 10, 2, 20, 3, 30, 4, 40],
[5, 50, 6, 60, 7, 70, 8, 80]],
[[9, 90, 10, 100, 11, 110, 12, 120],
[13, 130, 14, 140, 15, 150, 16, 160]]]]
block_size = 2
x_out = [[[[1, 10], [2, 20], [5, 50], [6, 60]],
[[3, 30], [4, 40], [7, 70], [8, 80]],
[[9, 90], [10, 100], [13, 130], [14, 140]],
[[11, 110], [12, 120], [15, 150], [16, 160]]]]
self._testOne(x_np, block_size, x_out)
# Error handling:
# Tests for a block larger for the depth. In this case should raise an
# exception.
def testBlockSizeTooLarge(self):
x_np = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
block_size = 4
# Raise an exception, since th depth is only 4 and needs to be
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 0.
def testBlockSize0(self):
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
# Test when the block size is 1. The block size should be > 1.
def testBlockSizeOne(self):
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
x_np = [[[[1], [2]],
[[3], [4]]]]
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.
x_np = [[[[1, 1, 1, 1],
[2, 2, 2, 2]],
[[3, 3, 3, 3],
[4, 4, 4, 4]]]]
block_size = 3
with self.assertRaises(ValueError):
_ = array_ops.space_to_depth(x_np, block_size)
def testUnknownShape(self):
t = array_ops.depth_to_space(array_ops.placeholder(dtypes.float32), block_size=4)
self.assertEqual(4, t.get_shape().ndims)
class DepthToSpaceGradientTest(test.TestCase):
# Check the gradients.
def _checkGrad(self, x, block_size):
assert 4 == x.ndim
with self.test_session(use_gpu=True):
tf_x = ops.convert_to_tensor(x)
tf_y = array_ops.depth_to_space(tf_x, block_size)
epsilon = 1e-2
((x_jacob_t, x_jacob_n)) = gradient_checker.compute_gradient(
tf_x,
x.shape,
tf_y,
tf_y.get_shape().as_list(),
x_init_value=x,
delta=epsilon)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=epsilon)
# Tests a gradient for depth_to_space of x which is a four dimensional
# tensor of shape [b, h, w, d * block_size * block_size].
def _compare(self, b, h, w, d, block_size):
block_size_sq = block_size * block_size
x = np.random.normal(
0, 1, b * h * w * d * block_size_sq).astype(np.float32).reshape(
[b, h, w, d * block_size_sq])
self._checkGrad(x, block_size)
# Don't use very large numbers as dimensions here, as the result is tensor
# with cartesian product of the dimensions.
def testSmall(self):
block_size = 2
self._compare(3, 2, 5, 3, block_size)
def testSmall2(self):
block_size = 3
self._compare(1, 2, 3, 2, block_size)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
xcasper/python_koans
|
python3/koans/about_asserts.py
|
37
|
2280
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(False) # This should be true
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(False, "This should be true -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(__, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = __
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = __
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert False
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "naval". What is it's class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "naval".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(__, "naval".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# http://bit.ly/__class__
|
mit
|
rdblue/Impala
|
infra/python/bootstrap_virtualenv.py
|
12
|
5526
|
# Copyright (c) 2015 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module will create a python virtual env and install external dependencies. If
# the virtualenv already exists and the list of dependencies matches the list of
# installed dependencies, nothing will be done.
#
# This module can be run with python >= 2.4 but python >= 2.6 must be installed on the
# system. If the default 'python' command refers to < 2.6, python 2.6 will be used
# instead.
import glob
import logging
import optparse
import os
import shutil
import subprocess
import tarfile
import tempfile
import textwrap
import urllib
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
DEPS_DIR = os.path.join(os.path.dirname(__file__), "deps")
ENV_DIR = os.path.join(os.path.dirname(__file__), "env")
# Generated using "pip install --download <DIR> -r requirements.txt"
REQS_PATH = os.path.join(DEPS_DIR, "requirements.txt")
# After installing, the requirements.txt will be copied into the virtualenv to
# record what was installed.
INSTALLED_REQS_PATH = os.path.join(ENV_DIR, "installed-requirements.txt")
def delete_virtualenv_if_exist():
if os.path.exists(ENV_DIR):
shutil.rmtree(ENV_DIR)
def create_virtualenv():
LOG.info("Creating python virtualenv")
build_dir = tempfile.mkdtemp()
file = tarfile.open(find_file(DEPS_DIR, "virtualenv*.tar.gz"), "r:gz")
for member in file.getmembers():
file.extract(member, build_dir)
file.close()
python_cmd = detect_python_cmd()
exec_cmd([python_cmd, find_file(build_dir, "virtualenv*", "virtualenv.py"), "--quiet",
"--python", python_cmd, ENV_DIR])
shutil.rmtree(build_dir)
def exec_cmd(args):
'''Executes a command and waits for it to finish, raises an exception if the return
status is not zero.
'args' uses the same format as subprocess.Popen().
'''
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = process.communicate()[0]
if process.returncode != 0:
raise Exception("Command returned non-zero status\nCommand: %s\nOutput: %s"
% (args, output))
def find_file(*paths):
'''Returns the path specified by the glob 'paths', raises an exception if no file is
found.
Ex: find_file('/etc', 'h*sts') --> /etc/hosts
'''
path = os.path.join(*paths)
files = glob.glob(path)
if len(files) > 1:
raise Exception("Found too many files at %s: %s" % (path, files))
if len(files) == 0:
raise Exception("No file found at %s" % path)
return files[0]
def detect_python_cmd():
'''Returns the system command that provides python 2.6 or greater.'''
paths = os.getenv("PATH").split(os.path.pathsep)
for cmd in ("python", "python27", "python2.7", "python-27", "python-2.7", "python26",
"python2.6", "python-26", "python-2.6"):
for path in paths:
cmd_path = os.path.join(path, cmd)
if not os.path.exists(cmd_path) or not os.access(cmd_path, os.X_OK):
continue
exit = subprocess.call([cmd_path, "-c", textwrap.dedent("""
import sys
sys.exit(int(sys.version_info[:2] < (2, 6)))""")])
if exit == 0:
return cmd_path
raise Exception("Could not find minimum required python version 2.6")
def install_deps():
LOG.info("Installing packages into virtualenv")
# Don't call the virtualenv pip directly, it uses a hashbang to to call the python
# virtualenv using an absolute path. If the path to the virtualenv is very long, the
# hashbang won't work.
# --no-cache-dir is used because the dev version of Impyla may be the same even though
# the contents are different. Since the version doesn't change, pip may use its cached
# build.
exec_cmd([os.path.join(ENV_DIR, "bin", "python"), os.path.join(ENV_DIR, "bin", "pip"),
"install", "--no-cache-dir", "--no-index", "--find-links",
"file://%s" % urllib.pathname2url(os.path.abspath(DEPS_DIR)), "-r", REQS_PATH])
shutil.copyfile(REQS_PATH, INSTALLED_REQS_PATH)
def deps_are_installed():
if not os.path.exists(INSTALLED_REQS_PATH):
return False
installed_reqs_file = open(INSTALLED_REQS_PATH)
try:
reqs_file = open(REQS_PATH)
try:
if reqs_file.read() == installed_reqs_file.read():
return True
else:
LOG.info("Virtualenv upgrade needed")
return False
finally:
reqs_file.close()
finally:
installed_reqs_file.close()
def setup_virtualenv_if_not_exists():
if not deps_are_installed():
delete_virtualenv_if_exist()
create_virtualenv()
install_deps()
LOG.info("Virtualenv setup complete")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = optparse.OptionParser()
parser.add_option("-r", "--rebuild", action="store_true", help="Force a rebuild of"
" the virtualenv even if it exists and appears to be completely up-to-date.")
options, args = parser.parse_args()
if options.rebuild:
delete_virtualenv_if_exist()
setup_virtualenv_if_not_exists()
|
apache-2.0
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/models/image_data_disk_py3.py
|
1
|
2698
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageDataDisk(Model):
"""Describes a data disk.
All required parameters must be populated in order to send to Azure.
:param lun: Required. Specifies the logical unit number of the data disk.
This value is used to identify data disks within the VM and therefore must
be unique for each data disk attached to a VM.
:type lun: int
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk:
~azure.mgmt.compute.v2016_04_30_preview.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or
~azure.mgmt.compute.v2016_04_30_preview.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
"""
_validation = {
'lun': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(self, *, lun: int, snapshot=None, managed_disk=None, blob_uri: str=None, caching=None, disk_size_gb: int=None, **kwargs) -> None:
super(ImageDataDisk, self).__init__(**kwargs)
self.lun = lun
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
|
mit
|
rhndg/openedx
|
common/lib/xmodule/xmodule/modulestore/tests/test_mixed_modulestore.py
|
2
|
117042
|
# pylint: disable=no-member
"""
Unit tests for the Mixed Modulestore, with DDT for the various stores (Split, Draft, XML)
"""
from collections import namedtuple
import datetime
import logging
import ddt
import itertools
import mimetypes
from unittest import skip
from uuid import uuid4
from contextlib import contextmanager
from mock import patch
# Mixed modulestore depends on django, so we'll manually configure some django settings
# before importing the module
# TODO remove this import and the configuration -- xmodule should not depend on django!
from django.conf import settings
# This import breaks this test file when run separately. Needs to be fixed! (PLAT-449)
from mock_django import mock_signal_receiver
from nose.plugins.attrib import attr
import pymongo
from pytz import UTC
from shutil import rmtree
from tempfile import mkdtemp
from xmodule.x_module import XModuleMixin
from xmodule.modulestore.edit_info import EditInfoMixin
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore.tests.test_cross_modulestore_import_export import MongoContentstoreBuilder
from xmodule.contentstore.content import StaticContent
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore.xml_exporter import export_course_to_xml
from xmodule.modulestore.django import SignalHandler
if not settings.configured:
settings.configure()
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator, LibraryLocator
from xmodule.exceptions import InvalidVersionError
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.draft_and_published import UnsupportedRevisionError, DIRECT_ONLY_CATEGORIES
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError, ReferentialIntegrityError, NoPathToItem
from xmodule.modulestore.mixed import MixedModuleStore
from xmodule.modulestore.search import path_to_location, navigation_index
from xmodule.modulestore.tests.factories import check_mongo_calls, check_exact_number_of_calls, \
mongo_uses_error_check
from xmodule.modulestore.tests.utils import create_modulestore_instance, LocationMixin, mock_tab_from_json
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
from xmodule.tests import DATA_DIR, CourseComparisonTest
log = logging.getLogger(__name__)
class CommonMixedModuleStoreSetup(CourseComparisonTest):
"""
Quasi-superclass which tests Location based apps against both split and mongo dbs (Locator and
Location-based dbs)
"""
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
COLLECTION = 'modulestore'
ASSET_COLLECTION = 'assetstore'
FS_ROOT = DATA_DIR
DEFAULT_CLASS = 'xmodule.raw_module.RawDescriptor'
RENDER_TEMPLATE = lambda t_n, d, ctx=None, nsp='main': ''
MONGO_COURSEID = 'MITx/999/2013_Spring'
XML_COURSEID1 = 'edX/toy/2012_Fall'
XML_COURSEID2 = 'edX/simple/2012_Fall'
BAD_COURSE_ID = 'edX/simple'
modulestore_options = {
'default_class': DEFAULT_CLASS,
'fs_root': DATA_DIR,
'render_template': RENDER_TEMPLATE,
'xblock_mixins': (EditInfoMixin, InheritanceMixin, LocationMixin, XModuleMixin),
}
DOC_STORE_CONFIG = {
'host': HOST,
'port': PORT,
'db': DB,
'collection': COLLECTION,
'asset_collection': ASSET_COLLECTION,
}
MAPPINGS = {
XML_COURSEID1: 'xml',
XML_COURSEID2: 'xml',
BAD_COURSE_ID: 'xml',
}
OPTIONS = {
'stores': [
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.draft.DraftModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': modulestore_options
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'xblock_mixins': modulestore_options['xblock_mixins'],
}
},
],
'xblock_mixins': modulestore_options['xblock_mixins'],
}
def _compare_ignore_version(self, loc1, loc2, msg=None):
"""
AssertEqual replacement for CourseLocator
"""
if loc1.for_branch(None) != loc2.for_branch(None):
self.fail(self._formatMessage(msg, u"{} != {}".format(unicode(loc1), unicode(loc2))))
def setUp(self):
"""
Set up the database for testing
"""
super(CommonMixedModuleStoreSetup, self).setUp()
self.exclude_field(None, 'wiki_slug')
self.exclude_field(None, 'xml_attributes')
self.exclude_field(None, 'parent')
self.ignore_asset_key('_id')
self.ignore_asset_key('uploadDate')
self.ignore_asset_key('content_son')
self.ignore_asset_key('thumbnail_location')
self.options = getattr(self, 'options', self.OPTIONS)
self.connection = pymongo.MongoClient(
host=self.HOST,
port=self.PORT,
tz_aware=True,
)
self.connection.drop_database(self.DB)
self.addCleanup(self.connection.drop_database, self.DB)
self.addCleanup(self.connection.close)
self.addTypeEqualityFunc(BlockUsageLocator, '_compare_ignore_version')
self.addTypeEqualityFunc(CourseLocator, '_compare_ignore_version')
# define attrs which get set in initdb to quell pylint
self.writable_chapter_location = self.store = self.fake_location = self.xml_chapter_location = None
self.course_locations = {}
self.user_id = ModuleStoreEnum.UserID.test
# pylint: disable=invalid-name
def _create_course(self, course_key):
"""
Create a course w/ one item in the persistence store using the given course & item location.
"""
# create course
with self.store.bulk_operations(course_key):
self.course = self.store.create_course(course_key.org, course_key.course, course_key.run, self.user_id)
if isinstance(self.course.id, CourseLocator):
self.course_locations[self.MONGO_COURSEID] = self.course.location
else:
self.assertEqual(self.course.id, course_key)
# create chapter
chapter = self.store.create_child(self.user_id, self.course.location, 'chapter', block_id='Overview')
self.writable_chapter_location = chapter.location
def _create_block_hierarchy(self):
"""
Creates a hierarchy of blocks for testing
Each block's (version_agnostic) location is assigned as a field of the class and can be easily accessed
"""
BlockInfo = namedtuple('BlockInfo', 'field_name, category, display_name, sub_tree')
trees = [
BlockInfo(
'chapter_x', 'chapter', 'Chapter_x', [
BlockInfo(
'sequential_x1', 'sequential', 'Sequential_x1', [
BlockInfo(
'vertical_x1a', 'vertical', 'Vertical_x1a', [
BlockInfo('problem_x1a_1', 'problem', 'Problem_x1a_1', []),
BlockInfo('problem_x1a_2', 'problem', 'Problem_x1a_2', []),
BlockInfo('problem_x1a_3', 'problem', 'Problem_x1a_3', []),
BlockInfo('html_x1a_1', 'html', 'HTML_x1a_1', []),
]
),
BlockInfo(
'vertical_x1b', 'vertical', 'Vertical_x1b', []
)
]
),
BlockInfo(
'sequential_x2', 'sequential', 'Sequential_x2', []
)
]
),
BlockInfo(
'chapter_y', 'chapter', 'Chapter_y', [
BlockInfo(
'sequential_y1', 'sequential', 'Sequential_y1', [
BlockInfo(
'vertical_y1a', 'vertical', 'Vertical_y1a', [
BlockInfo('problem_y1a_1', 'problem', 'Problem_y1a_1', []),
BlockInfo('problem_y1a_2', 'problem', 'Problem_y1a_2', []),
BlockInfo('problem_y1a_3', 'problem', 'Problem_y1a_3', []),
]
)
]
)
]
)
]
def create_sub_tree(parent, block_info):
"""
recursive function that creates the given block and its descendants
"""
block = self.store.create_child(
self.user_id, parent.location,
block_info.category, block_id=block_info.display_name,
fields={'display_name': block_info.display_name},
)
for tree in block_info.sub_tree:
create_sub_tree(block, tree)
setattr(self, block_info.field_name, block.location)
with self.store.bulk_operations(self.course.id):
for tree in trees:
create_sub_tree(self.course, tree)
def _course_key_from_string(self, string):
"""
Get the course key for the given course string
"""
return self.course_locations[string].course_key
def _has_changes(self, location):
"""
Helper function that loads the item before calling has_changes
"""
return self.store.has_changes(self.store.get_item(location))
# pylint: disable=dangerous-default-value
def _initialize_mixed(self, mappings=MAPPINGS, contentstore=None):
"""
initializes the mixed modulestore.
"""
self.store = MixedModuleStore(
contentstore, create_modulestore_instance=create_modulestore_instance,
mappings=mappings,
**self.options
)
self.addCleanup(self.store.close_all_connections)
def initdb(self, default):
"""
Initialize the database and create one test course in it
"""
# set the default modulestore
store_configs = self.options['stores']
for index in range(len(store_configs)):
if store_configs[index]['NAME'] == default:
if index > 0:
store_configs[index], store_configs[0] = store_configs[0], store_configs[index]
break
self._initialize_mixed()
# convert to CourseKeys
self.course_locations = {
course_id: CourseLocator.from_string(course_id)
for course_id in [self.MONGO_COURSEID, self.XML_COURSEID1, self.XML_COURSEID2]
}
# and then to the root UsageKey
self.course_locations = {
course_id: course_key.make_usage_key('course', course_key.run)
for course_id, course_key in self.course_locations.iteritems() # pylint: disable=maybe-no-member
}
mongo_course_key = self.course_locations[self.MONGO_COURSEID].course_key
self.fake_location = self.store.make_course_key(mongo_course_key.org, mongo_course_key.course, mongo_course_key.run).make_usage_key('vertical', 'fake')
self.xml_chapter_location = self.course_locations[self.XML_COURSEID1].replace(
category='chapter', name='Overview'
)
self._create_course(self.course_locations[self.MONGO_COURSEID].course_key)
@ddt.ddt
@attr('mongo')
class TestMixedModuleStore(CommonMixedModuleStoreSetup):
"""
Tests of the MixedModulestore interface methods.
"""
@ddt.data('draft', 'split')
def test_get_modulestore_type(self, default_ms):
"""
Make sure we get back the store type we expect for given mappings
"""
self.initdb(default_ms)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID1)), ModuleStoreEnum.Type.xml
)
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.XML_COURSEID2)), ModuleStoreEnum.Type.xml
)
mongo_ms_type = ModuleStoreEnum.Type.mongo if default_ms == 'draft' else ModuleStoreEnum.Type.split
self.assertEqual(self.store.get_modulestore_type(
self._course_key_from_string(self.MONGO_COURSEID)), mongo_ms_type
)
# try an unknown mapping, it should be the 'default' store
self.assertEqual(self.store.get_modulestore_type(
SlashSeparatedCourseKey('foo', 'bar', '2012_Fall')), mongo_ms_type
)
@ddt.data('draft', 'split')
def test_get_modulestore_cache(self, default_ms):
"""
Make sure we cache discovered course mappings
"""
self.initdb(default_ms)
# unset mappings
self.store.mappings = {}
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with check_exact_number_of_calls(self.store.default_modulestore, 'has_course', 1):
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
self.assertIn(course_key, self.store.mappings)
self.assertEqual(self.store.default_modulestore, self.store._get_modulestore_for_courselike(course_key)) # pylint: disable=protected-access
@ddt.data(*itertools.product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False)
))
@ddt.unpack
def test_duplicate_course_error(self, default_ms, reset_mixed_mappings):
"""
Make sure we get back the store type we expect for given mappings
"""
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
if reset_mixed_mappings:
self.store.mappings = {}
with self.assertRaises(DuplicateCourseError):
self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
# Draft:
# problem: One lookup to locate an item that exists
# fake: one w/ wildcard version
# split has one lookup for the course and then one for the course items
@ddt.data(('draft', [1, 1], 0), ('split', [2, 2], 0))
@ddt.unpack
def test_has_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertTrue(self.store.has_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
# try negative cases
self.assertFalse(self.store.has_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertFalse(self.store.has_item(self.fake_location))
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.has_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# draft queries:
# problem: find draft item, find all items pertinent to inheritance computation, find parent
# non-existent problem: find draft, find published
# split:
# problem: active_versions, structure
# non-existent problem: ditto
@ddt.data(('draft', [3, 2], 0), ('split', [2, 2], 0))
@ddt.unpack
def test_get_item(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
self.assertIsNotNone(self.store.get_item(self.course_locations[self.XML_COURSEID1]))
with check_mongo_calls(max_find.pop(0), max_send):
self.assertIsNotNone(self.store.get_item(self.problem_x1a_1))
# try negative cases
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.course_locations[self.XML_COURSEID1].replace(name='not_findable', category='problem')
)
with check_mongo_calls(max_find.pop(0), max_send):
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.fake_location)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_item(self.fake_location, revision=ModuleStoreEnum.RevisionOption.draft_preferred)
# Draft:
# wildcard query, 6! load pertinent items for inheritance calls, load parents, course root fetch (why)
# Split:
# active_versions (with regex), structure, and spurious active_versions refetch
@ddt.data(('draft', 14, 0), ('split', 3, 0))
@ddt.unpack
def test_get_items(self, default_ms, max_find, max_send):
self.initdb(default_ms)
self._create_block_hierarchy()
course_locn = self.course_locations[self.XML_COURSEID1]
# NOTE: use get_course if you just want the course. get_items is expensive
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'course'})
self.assertEqual(len(modules), 1)
self.assertEqual(modules[0].location, course_locn)
course_locn = self.course_locations[self.MONGO_COURSEID]
with check_mongo_calls(max_find, max_send):
modules = self.store.get_items(course_locn.course_key, qualifiers={'category': 'problem'})
self.assertEqual(len(modules), 6)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.get_items(
self.course_locations[self.MONGO_COURSEID].course_key,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
# draft: get draft, get ancestors up to course (2-6), compute inheritance
# sends: update problem and then each ancestor up to course (edit info)
# split: active_versions, definitions (calculator field), structures
# 2 sends to update index & structure (note, it would also be definition if a content field changed)
@ddt.data(('draft', 7, 5), ('split', 3, 2))
@ddt.unpack
def test_update_item(self, default_ms, max_find, max_send):
"""
Update should fail for r/o dbs and succeed for r/w ones
"""
self.initdb(default_ms)
self._create_block_hierarchy()
course = self.store.get_course(self.course_locations[self.XML_COURSEID1].course_key)
# if following raised, then the test is really a noop, change it
self.assertFalse(course.show_calculator, "Default changed making test meaningless")
course.show_calculator = True
with self.assertRaises(NotImplementedError): # ensure it doesn't allow writing
self.store.update_item(course, self.user_id)
# now do it for a r/w db
problem = self.store.get_item(self.problem_x1a_1)
# if following raised, then the test is really a noop, change it
self.assertNotEqual(problem.max_attempts, 2, "Default changed making test meaningless")
problem.max_attempts = 2
with check_mongo_calls(max_find, max_send):
problem = self.store.update_item(problem, self.user_id)
self.assertEqual(problem.max_attempts, 2, "Update didn't persist")
@ddt.data('draft', 'split')
def test_has_changes_direct_only(self, default_ms):
"""
Tests that has_changes() returns false when a new xblock in a direct only category is checked
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create dummy direct only xblocks
chapter = self.store.create_item(
self.user_id,
test_course.id,
'chapter',
block_id='vertical_container'
)
# Check that neither xblock has changes
self.assertFalse(self.store.has_changes(test_course))
self.assertFalse(self.store.has_changes(chapter))
@ddt.data('draft', 'split')
def test_has_changes(self, default_ms):
"""
Tests that has_changes() only returns true when changes are present
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
xblock = self.store.create_item(
self.user_id,
test_course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self.store.has_changes(xblock))
# Publish and verify that there are no unpublished changes
newXBlock = self.store.publish(xblock.location, self.user_id)
self.assertFalse(self.store.has_changes(newXBlock))
# Change the component, then check that there now are changes
component = self.store.get_item(xblock.location)
component.display_name = 'Changed Display Name'
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
# Publish and verify again
component = self.store.publish(component.location, self.user_id)
self.assertFalse(self.store.has_changes(component))
def setup_has_changes(self, default_ms):
"""
Common set up for has_changes tests below.
Returns a dictionary of useful location maps for testing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
locations = {
'grandparent': self.chapter_x,
'parent_sibling': self.sequential_x2,
'parent': self.sequential_x1,
'child_sibling': self.vertical_x1b,
'child': self.vertical_x1a,
}
# Publish the vertical units
self.store.publish(locations['parent_sibling'], self.user_id)
self.store.publish(locations['parent'], self.user_id)
return locations
@ddt.data('draft', 'split')
def test_has_changes_ancestors(self, default_ms):
"""
Tests that has_changes() returns true on ancestors when a child is changed
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change the child
child = self.store.get_item(locations['child'])
child.display_name = 'Changed Display Name'
self.store.update_item(child, self.user_id)
# All ancestors should have changes, but not siblings
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
self.assertTrue(self._has_changes(locations['child']))
self.assertFalse(self._has_changes(locations['parent_sibling']))
self.assertFalse(self._has_changes(locations['child_sibling']))
# Publish the unit with changes
self.store.publish(locations['parent'], self.user_id)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
@ddt.data('draft', 'split')
def test_has_changes_publish_ancestors(self, default_ms):
"""
Tests that has_changes() returns false after a child is published only if all children are unchanged
"""
locations = self.setup_has_changes(default_ms)
# Verify that there are no unpublished changes
for key in locations:
self.assertFalse(self._has_changes(locations[key]))
# Change both children
child = self.store.get_item(locations['child'])
child_sibling = self.store.get_item(locations['child_sibling'])
child.display_name = 'Changed Display Name'
child_sibling.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
self.store.update_item(child_sibling, user_id=self.user_id)
# Verify that ancestors have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish one child
self.store.publish(locations['child_sibling'], self.user_id)
# Verify that ancestors still have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Publish the other child
self.store.publish(locations['child'], self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data('draft', 'split')
def test_has_changes_add_remove_child(self, default_ms):
"""
Tests that has_changes() returns true for the parent when a child with changes is added
and false when that child is removed.
"""
locations = self.setup_has_changes(default_ms)
# Test that the ancestors don't have changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
# Create a new child and attach it to parent
self.store.create_child(
self.user_id,
locations['parent'],
'vertical',
block_id='new_child',
)
# Verify that the ancestors now have changes
self.assertTrue(self._has_changes(locations['grandparent']))
self.assertTrue(self._has_changes(locations['parent']))
# Remove the child from the parent
parent = self.store.get_item(locations['parent'])
parent.children = [locations['child'], locations['child_sibling']]
self.store.update_item(parent, user_id=self.user_id)
# Verify that ancestors now have no changes
self.assertFalse(self._has_changes(locations['grandparent']))
self.assertFalse(self._has_changes(locations['parent']))
@ddt.data('draft', 'split')
def test_has_changes_non_direct_only_children(self, default_ms):
"""
Tests that has_changes() returns true after editing the child of a vertical (both not direct only categories).
"""
self.initdb(default_ms)
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
child = self.store.create_child(
self.user_id,
parent.location,
'html',
block_id='child',
)
self.store.publish(parent.location, self.user_id)
# Verify that there are no changes
self.assertFalse(self._has_changes(parent.location))
self.assertFalse(self._has_changes(child.location))
# Change the child
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=self.user_id)
# Verify that both parent and child have changes
self.assertTrue(self._has_changes(parent.location))
self.assertTrue(self._has_changes(child.location))
@ddt.data(*itertools.product(
('draft', 'split'),
(ModuleStoreEnum.Branch.draft_preferred, ModuleStoreEnum.Branch.published_only)
))
@ddt.unpack
def test_has_changes_missing_child(self, default_ms, default_branch):
"""
Tests that has_changes() does not throw an exception when a child doesn't exist.
"""
self.initdb(default_ms)
with self.store.branch_setting(default_branch, self.course.id):
# Create the parent and point it to a fake child
parent = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='parent',
)
parent.children += [self.course.id.make_usage_key('vertical', 'does_not_exist')]
parent = self.store.update_item(parent, self.user_id)
# Check the parent for changes should return True and not throw an exception
self.assertTrue(self.store.has_changes(parent))
# Draft
# Find: find parents (definition.children query), get parent, get course (fill in run?),
# find parents of the parent (course), get inheritance items,
# get item (to delete subtree), get inheritance again.
# Sends: delete item, update parent
# Split
# Find: active_versions, 2 structures (published & draft), definition (unnecessary)
# Sends: updated draft and published structures and active_versions
@ddt.data(('draft', 7, 2), ('split', 4, 3))
@ddt.unpack
def test_delete_item(self, default_ms, max_find, max_send):
"""
Delete should reject on r/o db and work on r/w one
"""
self.initdb(default_ms)
if default_ms == 'draft' and mongo_uses_error_check(self.store):
max_find += 1
# r/o try deleting the chapter (is here to ensure it can't be deleted)
with self.assertRaises(NotImplementedError):
self.store.delete_item(self.xml_chapter_location, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, self.writable_chapter_location.course_key):
with check_mongo_calls(max_find, max_send):
self.store.delete_item(self.writable_chapter_location, self.user_id)
# verify it's gone
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location)
# verify it's gone from published too
with self.assertRaises(ItemNotFoundError):
self.store.get_item(self.writable_chapter_location, revision=ModuleStoreEnum.RevisionOption.published_only)
# Draft:
# queries: find parent (definition.children), count versions of item, get parent, count grandparents,
# inheritance items, draft item, draft child, inheritance
# sends: delete draft vertical and update parent
# Split:
# queries: active_versions, draft and published structures, definition (unnecessary)
# sends: update published (why?), draft, and active_versions
@ddt.data(('draft', 9, 2), ('split', 2, 2))
@ddt.unpack
def test_delete_private_vertical(self, default_ms, max_find, max_send):
"""
Because old mongo treated verticals as the first layer which could be draft, it has some interesting
behavioral properties which this deletion test gets at.
"""
self.initdb(default_ms)
if default_ms == 'draft' and mongo_uses_error_check(self.store):
max_find += 1
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID],
'vertical', block_id='private'
)
private_leaf = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, private_vert.location, 'html', block_id='private_leaf'
)
# verify pre delete state (just to verify that the test is valid)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(private_leaf.location.version_guid)
else:
vert_loc = private_vert.location
self.assertTrue(self.store.has_item(vert_loc))
self.assertTrue(self.store.has_item(private_leaf.location))
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
self.assertIn(vert_loc, course.children)
# delete the vertical and ensure the course no longer points to it
with check_mongo_calls(max_find, max_send):
self.store.delete_item(vert_loc, self.user_id)
course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key, 0)
if hasattr(private_vert.location, 'version_guid'):
# change to the HEAD version
vert_loc = private_vert.location.for_version(course.location.version_guid)
leaf_loc = private_leaf.location.for_version(course.location.version_guid)
else:
vert_loc = private_vert.location
leaf_loc = private_leaf.location
self.assertFalse(self.store.has_item(vert_loc))
self.assertFalse(self.store.has_item(leaf_loc))
self.assertNotIn(vert_loc, course.children)
# Draft:
# find: find parent (definition.children) 2x, find draft item, get inheritance items
# send: one delete query for specific item
# Split:
# find: active_version & structure
# send: update structure and active_versions
@ddt.data(('draft', 4, 1), ('split', 2, 2))
@ddt.unpack
def test_delete_draft_vertical(self, default_ms, max_find, max_send):
"""
Test deleting a draft vertical which has a published version.
"""
self.initdb(default_ms)
# reproduce bug STUD-1965
# create and delete a private vertical with private children
private_vert = self.store.create_child(
# don't use course_location as it may not be the repr
self.user_id, self.course_locations[self.MONGO_COURSEID], 'vertical', block_id='publish'
)
private_leaf = self.store.create_child(
self.user_id, private_vert.location, 'html', block_id='bug_leaf'
)
# verify that an error is raised when the revision is not valid
with self.assertRaises(UnsupportedRevisionError):
self.store.delete_item(
private_leaf.location,
self.user_id,
revision=ModuleStoreEnum.RevisionOption.draft_preferred
)
self.store.publish(private_vert.location, self.user_id)
private_leaf.display_name = 'change me'
private_leaf = self.store.update_item(private_leaf, self.user_id)
# test succeeds if delete succeeds w/o error
if default_ms == 'draft' and mongo_uses_error_check(self.store):
max_find += 1
with check_mongo_calls(max_find, max_send):
self.store.delete_item(private_leaf.location, self.user_id)
# Draft:
# 1) find all courses (wildcard),
# 2) get each course 1 at a time (1 course),
# 3) wildcard split if it has any (1) but it doesn't
# Split:
# 1) wildcard split search,
# 2-4) active_versions, structure, definition (s/b lazy; so, unnecessary)
# 5) wildcard draft mongo which has none
@ddt.data(('draft', 3, 0), ('split', 5, 0))
@ddt.unpack
def test_get_courses(self, default_ms, max_find, max_send):
self.initdb(default_ms)
# we should have 3 total courses across all stores
with check_mongo_calls(max_find, max_send):
courses = self.store.get_courses()
course_ids = [course.location for course in courses]
self.assertEqual(len(courses), 3, "Not 3 courses: {}".format(course_ids))
self.assertIn(self.course_locations[self.MONGO_COURSEID], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID1], course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2], course_ids)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
draft_courses = self.store.get_courses(remove_branch=True)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
published_courses = self.store.get_courses(remove_branch=True)
self.assertEquals([c.id for c in draft_courses], [c.id for c in published_courses])
@ddt.data('draft', 'split')
def test_create_child_detached_tabs(self, default_ms):
"""
test 'create_child' method with a detached category ('static_tab')
to check that new static tab is not a direct child of the course
"""
self.initdb(default_ms)
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
# create a static tab of the course
self.store.create_child(
self.user_id,
self.course.location,
'static_tab'
)
# now check that the course has same number of children
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(mongo_course.children), 1)
def test_xml_get_courses(self):
"""
Test that the xml modulestore only loaded the courses from the maps.
"""
self.initdb('draft')
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
courses = xml_store.get_courses()
self.assertEqual(len(courses), 2)
course_ids = [course.id for course in courses]
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, course_ids)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, course_ids)
# this course is in the directory from which we loaded courses but not in the map
self.assertNotIn("edX/toy/TT_2012_Fall", course_ids)
def test_xml_no_write(self):
"""
Test that the xml modulestore doesn't allow write ops.
"""
self.initdb('draft')
xml_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml) # pylint: disable=protected-access
# the important thing is not which exception it raises but that it raises an exception
with self.assertRaises(AttributeError):
xml_store.create_course("org", "course", "run", self.user_id)
# draft is 2: find out which ms owns course, get item
# split: active_versions, structure, definition (to load course wiki string)
@ddt.data(('draft', 2, 0), ('split', 3, 0))
@ddt.unpack
def test_get_course(self, default_ms, max_find, max_send):
"""
This test is here for the performance comparison not functionality. It tests the performance
of getting an item whose scope.content fields are looked at.
"""
self.initdb(default_ms)
with check_mongo_calls(max_find, max_send):
course = self.store.get_item(self.course_locations[self.MONGO_COURSEID])
self.assertEqual(course.id, self.course_locations[self.MONGO_COURSEID].course_key)
course = self.store.get_item(self.course_locations[self.XML_COURSEID1])
self.assertEqual(course.id, self.course_locations[self.XML_COURSEID1].course_key)
@ddt.data('draft', 'split')
def test_get_library(self, default_ms):
"""
Test that create_library and get_library work regardless of the default modulestore.
Other tests of MixedModulestore support are in test_libraries.py but this one must
be done here so we can test the configuration where Draft/old is the first modulestore.
"""
self.initdb(default_ms)
with self.store.default_store(ModuleStoreEnum.Type.split): # The CMS also wraps create_library like this
library = self.store.create_library("org", "lib", self.user_id, {"display_name": "Test Library"})
library_key = library.location.library_key
self.assertIsInstance(library_key, LibraryLocator)
# Now load with get_library and make sure it works:
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# Clear the mappings so we can test get_library code path without mapping set:
self.store.mappings.clear()
library = self.store.get_library(library_key)
self.assertEqual(library.location.library_key, library_key)
# notice this doesn't test getting a public item via draft_preferred which draft would have 2 hits (split
# still only 2)
# Draft: get_parent
# Split: active_versions, structure
@ddt.data(('draft', 1, 0), ('split', 2, 0))
@ddt.unpack
def test_get_parent_locations(self, default_ms, max_find, max_send):
"""
Test a simple get parent for a direct only category (i.e, always published)
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with check_mongo_calls(max_find, max_send):
parent = self.store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
parent = self.store.get_parent_location(self.xml_chapter_location)
self.assertEqual(parent, self.course_locations[self.XML_COURSEID1])
def verify_get_parent_locations_results(self, expected_results):
"""
Verifies the results of calling get_parent_locations matches expected_results.
"""
for child_location, parent_location, revision in expected_results:
self.assertEqual(
parent_location,
self.store.get_parent_location(child_location, revision=revision)
)
@ddt.data('draft', 'split')
def test_get_parent_locations_moved_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.course = self.store.publish(self.course.location, self.user_id)
with self.store.bulk_operations(self.course.id):
# make drafts of verticals
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# move child problem_x1a_1 to vertical_y1a
child_to_move_location = self.problem_x1a_1
new_parent_location = self.vertical_y1a
old_parent_location = self.vertical_x1a
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
old_parent = self.store.get_item(child_to_move_location).get_parent()
self.assertEqual(old_parent_location, old_parent.location)
child_to_move_contextualized = child_to_move_location.map_into_course(old_parent.location.course_key)
old_parent.children.remove(child_to_move_contextualized)
self.store.update_item(old_parent, self.user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, self.user_id)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.assertEqual(new_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertEqual(old_parent_location, self.store.get_item(child_to_move_location).get_parent().location)
old_parent_published_location = old_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, old_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
new_parent_published_location = new_parent_location.for_branch(ModuleStoreEnum.BranchName.published)
self.verify_get_parent_locations_results([
(child_to_move_location, new_parent_location, None),
(child_to_move_location, new_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_move_location, new_parent_published_location, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data('draft')
def test_get_parent_locations_deleted_child(self, default_ms):
self.initdb(default_ms)
self._create_block_hierarchy()
# publish the course
self.store.publish(self.course.location, self.user_id)
# make draft of vertical
self.store.convert_to_draft(self.vertical_y1a, self.user_id)
# delete child problem_y1a_1
child_to_delete_location = self.problem_y1a_1
old_parent_location = self.vertical_y1a
self.store.delete_item(child_to_delete_location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, old_parent_location, None),
# Note: The following could be an unexpected result, but we want to avoid an extra database call
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, old_parent_location, ModuleStoreEnum.RevisionOption.published_only),
])
# publish the course again
self.store.publish(self.course.location, self.user_id)
self.verify_get_parent_locations_results([
(child_to_delete_location, None, None),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.draft_preferred),
(child_to_delete_location, None, ModuleStoreEnum.RevisionOption.published_only),
])
@ddt.data('draft')
def test_get_parent_location_draft(self, default_ms):
"""
Test that "get_parent_location" method returns first published parent
for a draft component, if it has many possible parents (including
draft parents).
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
# add another parent (unit) "vertical_x1b" for problem "problem_x1a_1"
mongo_store.collection.update(
self.vertical_x1b.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# convert first parent (unit) "vertical_x1a" of problem "problem_x1a_1" to draft
self.store.convert_to_draft(self.vertical_x1a, self.user_id)
item = self.store.get_item(self.vertical_x1a)
self.assertTrue(self.store.has_published_version(item))
# now problem "problem_x1a_1" has 3 parents [vertical_x1a (draft),
# vertical_x1a (published), vertical_x1b (published)]
# check that "get_parent_location" method of draft branch returns first
# published parent "vertical_x1a" without raising "AssertionError" for
# problem location revision
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# Draft:
# Problem path:
# 1. Get problem
# 2-6. get parent and rest of ancestors up to course
# 7-8. get sequential, compute inheritance
# 8-9. get vertical, compute inheritance
# 10-11. get other vertical_x1b (why?) and compute inheritance
# Split: active_versions & structure
@ddt.data(('draft', [12, 3], 0), ('split', [2, 2], 0))
@ddt.unpack
def test_path_to_location(self, default_ms, num_finds, num_sends):
"""
Make sure that path_to_location works
"""
self.initdb(default_ms)
course_key = self.course_locations[self.MONGO_COURSEID].course_key
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self._create_block_hierarchy()
should_work = (
(self.problem_x1a_2,
(course_key, u"Chapter_x", u"Sequential_x1", '1')),
(self.chapter_x,
(course_key, "Chapter_x", None, None)),
)
for location, expected in should_work:
# each iteration has different find count, pop this iter's find count
with check_mongo_calls(num_finds.pop(0), num_sends):
self.assertEqual(path_to_location(self.store, location), expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
# Orphaned items should not be found.
orphan = course_key.make_usage_key('chapter', 'OrphanChapter')
self.store.create_item(
self.user_id,
orphan.course_key,
orphan.block_type,
block_id=orphan.block_id
)
with self.assertRaises(NoPathToItem):
path_to_location(self.store, orphan)
def test_xml_path_to_location(self):
"""
Make sure that path_to_location works: should be passed a modulestore
with the toy and simple courses loaded.
"""
# only needs course_locations set
self.initdb('draft')
course_key = self.course_locations[self.XML_COURSEID1].course_key
should_work = (
(course_key.make_usage_key('video', 'Welcome'),
(course_key, "Overview", "Welcome", None)),
(course_key.make_usage_key('chapter', 'Overview'),
(course_key, "Overview", None, None)),
)
for location, expected in should_work:
self.assertEqual(path_to_location(self.store, location), expected)
not_found = (
course_key.make_usage_key('video', 'WelcomeX'),
course_key.make_usage_key('course', 'NotHome'),
)
for location in not_found:
with self.assertRaises(ItemNotFoundError):
path_to_location(self.store, location)
def test_navigation_index(self):
"""
Make sure that navigation_index correctly parses the various position values that we might get from calls to
path_to_location
"""
self.assertEqual(1, navigation_index("1"))
self.assertEqual(10, navigation_index("10"))
self.assertEqual(None, navigation_index(None))
self.assertEqual(1, navigation_index("1_2"))
self.assertEqual(5, navigation_index("5_2"))
self.assertEqual(7, navigation_index("7_3_5_6_"))
@ddt.data('draft', 'split')
def test_revert_to_published_root_draft(self, default_ms):
"""
Test calling revert_to_published on draft vertical.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
vertical = self.store.get_item(self.vertical_x1a)
vertical_children_num = len(vertical.children)
self.store.publish(self.course.location, self.user_id)
self.assertFalse(self._has_changes(self.vertical_x1a))
# delete leaf problem (will make parent vertical a draft)
self.store.delete_item(self.problem_x1a_1, self.user_id)
self.assertTrue(self._has_changes(self.vertical_x1a))
draft_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num - 1, len(draft_parent.children))
published_parent = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_parent = self.store.get_item(self.vertical_x1a)
self.assertEqual(vertical_children_num, len(published_parent.children))
self.assertBlocksEqualByFields(reverted_parent, published_parent)
self.assertFalse(self._has_changes(self.vertical_x1a))
@ddt.data('draft', 'split')
def test_revert_to_published_root_published(self, default_ms):
"""
Test calling revert_to_published on a published vertical with a draft child.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
problem = self.store.get_item(self.problem_x1a_1)
orig_display_name = problem.display_name
# Change display name of problem and update just it (so parent remains published)
problem.display_name = "updated before calling revert"
self.store.update_item(problem, self.user_id)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_problem = self.store.get_item(self.problem_x1a_1)
self.assertEqual(orig_display_name, reverted_problem.display_name)
@ddt.data('draft', 'split')
def test_revert_to_published_no_draft(self, default_ms):
"""
Test calling revert_to_published on vertical with no draft content does nothing.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
orig_vertical = self.store.get_item(self.vertical_x1a)
self.store.revert_to_published(self.vertical_x1a, self.user_id)
reverted_vertical = self.store.get_item(self.vertical_x1a)
self.assertBlocksEqualByFields(orig_vertical, reverted_vertical)
@ddt.data('draft', 'split')
def test_revert_to_published_no_published(self, default_ms):
"""
Test calling revert_to_published on vertical with no published version errors.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
with self.assertRaises(InvalidVersionError):
self.store.revert_to_published(self.vertical_x1a, self.user_id)
@ddt.data('draft', 'split')
def test_revert_to_published_direct_only(self, default_ms):
"""
Test calling revert_to_published on a direct-only item is a no-op.
"""
self.initdb(default_ms)
self._create_block_hierarchy()
num_children = len(self.store.get_item(self.sequential_x1).children)
self.store.revert_to_published(self.sequential_x1, self.user_id)
reverted_parent = self.store.get_item(self.sequential_x1)
# It does not discard the child vertical, even though that child is a draft (with no published version)
self.assertEqual(num_children, len(reverted_parent.children))
# Draft: get all items which can be or should have parents
# Split: active_versions, structure
@ddt.data(('draft', 1, 0), ('split', 2, 0))
@ddt.unpack
def test_get_orphans(self, default_ms, max_find, max_send):
"""
Test finding orphans.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
# orphans
orphan_locations = [
course_id.make_usage_key('chapter', 'OrphanChapter'),
course_id.make_usage_key('vertical', 'OrphanVertical'),
course_id.make_usage_key('problem', 'OrphanProblem'),
course_id.make_usage_key('html', 'OrphanHTML'),
]
# detached items (not considered as orphans)
detached_locations = [
course_id.make_usage_key('static_tab', 'StaticTab'),
course_id.make_usage_key('course_info', 'updates'),
]
for location in (orphan_locations + detached_locations):
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
with check_mongo_calls(max_find, max_send):
found_orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertItemsEqual(found_orphans, orphan_locations)
@ddt.data('draft')
def test_get_non_orphan_parents(self, default_ms):
"""
Test finding non orphan parents from many possible parents.
"""
self.initdb(default_ms)
course_id = self.course_locations[self.MONGO_COURSEID].course_key
# create parented children
self._create_block_hierarchy()
self.store.publish(self.course.location, self.user_id)
# test that problem "problem_x1a_1" has only one published parent
mongo_store = self.store._get_modulestore_for_courselike(course_id) # pylint: disable=protected-access
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# add some published orphans
orphan_sequential = course_id.make_usage_key('sequential', 'OrphanSequential')
orphan_vertical = course_id.make_usage_key('vertical', 'OrphanVertical')
orphan_locations = [orphan_sequential, orphan_vertical]
for location in orphan_locations:
self.store.create_item(
self.user_id,
location.course_key,
location.block_type,
block_id=location.block_id
)
self.store.publish(location, self.user_id)
found_orphans = mongo_store.get_orphans(course_id)
self.assertEqual(set(found_orphans), set(orphan_locations))
self.assertEqual(len(set(found_orphans)), 2)
# add orphan vertical and sequential as another parents of problem "problem_x1a_1"
mongo_store.collection.update(
orphan_sequential.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
mongo_store.collection.update(
orphan_vertical.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# test that "get_parent_location" method of published branch still returns the correct non-orphan parent for
# problem "problem_x1a_1" since the two other parents are orphans
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
parent = mongo_store.get_parent_location(self.problem_x1a_1)
self.assertEqual(parent, self.vertical_x1a)
# now add valid published vertical as another parent of problem
mongo_store.collection.update(
self.sequential_x1.to_deprecated_son('_id.'),
{'$push': {'definition.children': unicode(self.problem_x1a_1)}}
)
# now check that "get_parent_location" method of published branch raises "ReferentialIntegrityError" for
# problem "problem_x1a_1" since it has now 2 valid published parents
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_id):
self.assertTrue(self.store.has_item(self.problem_x1a_1))
with self.assertRaises(ReferentialIntegrityError):
self.store.get_parent_location(self.problem_x1a_1)
@ddt.data('draft')
def test_create_item_from_parent_location(self, default_ms):
"""
Test a code path missed by the above: passing an old-style location as parent but no
new location for the child
"""
self.initdb(default_ms)
self.store.create_child(
self.user_id,
self.course_locations[self.MONGO_COURSEID],
'problem',
block_id='orphan'
)
orphans = self.store.get_orphans(self.course_locations[self.MONGO_COURSEID].course_key)
self.assertEqual(len(orphans), 0, "unexpected orphans: {}".format(orphans))
@ddt.data('draft', 'split')
def test_create_item_populates_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.edited_on)
@ddt.data('draft', 'split')
def test_create_item_populates_subtree_edited_info(self, default_ms):
self.initdb(default_ms)
block = self.store.create_item(
self.user_id,
self.course.location.course_key,
'problem'
)
self.assertEqual(self.user_id, block.subtree_edited_by)
self.assertGreater(datetime.datetime.now(UTC), block.subtree_edited_on)
# Draft: wildcard search of draft and split
# Split: wildcard search of draft and split
@ddt.data(('draft', 2, 0), ('split', 2, 0))
@ddt.unpack
def test_get_courses_for_wiki(self, default_ms, max_find, max_send):
"""
Test the get_courses_for_wiki method
"""
self.initdb(default_ms)
# Test XML wikis
wiki_courses = self.store.get_courses_for_wiki('toy')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID1].course_key, wiki_courses)
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# Test Mongo wiki
with check_mongo_calls(max_find, max_send):
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
self.assertEqual(len(self.store.get_courses_for_wiki('edX.simple.2012_Fall')), 0)
self.assertEqual(len(self.store.get_courses_for_wiki('no_such_wiki')), 0)
# Draft:
# Find: find vertical, find children
# Sends:
# 1. delete all of the published nodes in subtree
# 2. insert vertical as published (deleted in step 1) w/ the deleted problems as children
# 3-6. insert the 3 problems and 1 html as published
# Split: active_versions, 2 structures (pre & post published?)
# Sends:
# - insert structure
# - write index entry
@ddt.data(('draft', 2, 6), ('split', 3, 2))
@ddt.unpack
def test_unpublish(self, default_ms, max_find, max_send):
"""
Test calling unpublish
"""
self.initdb(default_ms)
if default_ms == 'draft' and mongo_uses_error_check(self.store):
max_find += 1
self._create_block_hierarchy()
# publish
self.store.publish(self.course.location, self.user_id)
published_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
self.assertIsNotNone(published_xblock)
# unpublish
with check_mongo_calls(max_find, max_send):
self.store.unpublish(self.vertical_x1a, self.user_id)
with self.assertRaises(ItemNotFoundError):
self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.published_only
)
# make sure draft version still exists
draft_xblock = self.store.get_item(
self.vertical_x1a,
revision=ModuleStoreEnum.RevisionOption.draft_only
)
self.assertIsNotNone(draft_xblock)
# Draft: specific query for revision None
# Split: active_versions, structure
@ddt.data(('draft', 1, 0), ('split', 2, 0))
@ddt.unpack
def test_has_published_version(self, default_ms, max_find, max_send):
"""
Test the has_published_version method
"""
self.initdb(default_ms)
self._create_block_hierarchy()
# start off as Private
item = self.store.create_child(self.user_id, self.writable_chapter_location, 'problem', 'test_compute_publish_state')
item_location = item.location
with check_mongo_calls(max_find, max_send):
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Private
self.store.unpublish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertFalse(self.store.has_published_version(item))
# Private -> Public
self.store.publish(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Public -> Draft with NO changes
self.store.convert_to_draft(item_location, self.user_id)
item = self.store.get_item(item_location)
self.assertTrue(self.store.has_published_version(item))
# Draft WITH changes
item.display_name = 'new name'
item = self.store.update_item(item, self.user_id)
self.assertTrue(self.store.has_changes(item))
self.assertTrue(self.store.has_published_version(item))
@ddt.data('draft', 'split')
def test_update_edit_info_ancestors(self, default_ms):
"""
Tests that edited_on, edited_by, subtree_edited_on, and subtree_edited_by are set correctly during update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
def check_node(location_key, after, before, edited_by, subtree_after, subtree_before, subtree_by):
"""
Checks that the node given by location_key matches the given edit_info constraints.
"""
node = self.store.get_item(location_key)
if after:
self.assertLess(after, node.edited_on)
self.assertLess(node.edited_on, before)
self.assertEqual(node.edited_by, edited_by)
if subtree_after:
self.assertLess(subtree_after, node.subtree_edited_on)
self.assertLess(node.subtree_edited_on, subtree_before)
self.assertEqual(node.subtree_edited_by, subtree_by)
with self.store.bulk_operations(test_course.id):
# Create a dummy vertical & html to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
block_id='test_vertical'
)
child = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html'
)
sibling = self.store.create_child(
self.user_id,
component.location,
'html',
block_id='test_html_no_change'
)
after_create = datetime.datetime.now(UTC)
# Verify that all nodes were last edited in the past by create_user
for block in [component, child, sibling]:
check_node(block.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the component, then check that there now are changes
component.display_name = 'Changed Display Name'
editing_user = self.user_id - 2
with self.store.bulk_operations(test_course.id): # TNL-764 bulk ops disabled ancestor updates
component = self.store.update_item(component, editing_user)
after_edit = datetime.datetime.now(UTC)
check_node(component.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# but child didn't change
check_node(child.location, None, after_create, self.user_id, None, after_create, self.user_id)
# Change the child
child = self.store.get_item(child.location)
child.display_name = 'Changed Display Name'
self.store.update_item(child, user_id=editing_user)
after_edit = datetime.datetime.now(UTC)
# Verify that child was last edited between after_create and after_edit by edit_user
check_node(child.location, after_create, after_edit, editing_user, after_create, after_edit, editing_user)
# Verify that ancestors edit info is unchanged, but their subtree edit info matches child
check_node(test_course.location, None, after_create, self.user_id, after_create, after_edit, editing_user)
# Verify that others have unchanged edit info
check_node(sibling.location, None, after_create, self.user_id, None, after_create, self.user_id)
@ddt.data('draft', 'split')
def test_update_edit_info(self, default_ms):
"""
Tests that edited_on and edited_by are set correctly during an update
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current edit time and verify that user created the component
self.assertEqual(component.edited_by, self.user_id)
old_edited_on = component.edited_on
edit_user = self.user_id - 2
# Change the component
component.display_name = 'Changed'
self.store.update_item(component, edit_user)
updated_component = self.store.get_item(component.location)
# Verify the ordering of edit times and that dummy_user made the edit
self.assertLess(old_edited_on, updated_component.edited_on)
self.assertEqual(updated_component.edited_by, edit_user)
@ddt.data('draft', 'split')
def test_update_published_info(self, default_ms):
"""
Tests that published_on and published_by are set correctly
"""
self.initdb(default_ms)
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
publish_user = 456
# Create a dummy component to test against
component = self.store.create_child(
self.user_id,
test_course.location,
'vertical',
)
# Store the current time, then publish
old_time = datetime.datetime.now(UTC)
self.store.publish(component.location, publish_user)
updated_component = self.store.get_item(component.location)
# Verify the time order and that publish_user caused publication
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
# Verify that changing the item doesn't unset the published info
updated_component.display_name = 'changed'
self.store.update_item(updated_component, self.user_id)
updated_component = self.store.get_item(updated_component.location)
self.assertLessEqual(old_time, updated_component.published_on)
self.assertEqual(updated_component.published_by, publish_user)
@ddt.data('draft', 'split')
def test_auto_publish(self, default_ms):
"""
Test that the correct things have been published automatically
Assumptions:
* we auto-publish courses, chapters, sequentials
* we don't auto-publish problems
"""
self.initdb(default_ms)
# test create_course to make sure we are autopublishing
test_course = self.store.create_course('testx', 'GreekHero', 'test_run', self.user_id)
self.assertTrue(self.store.has_published_version(test_course))
test_course_key = test_course.id
# test create_item of direct-only category to make sure we are autopublishing
chapter = self.store.create_child(self.user_id, test_course.location, 'chapter', 'Overview')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
chapter.location,
self.store.get_item(test_course.location).children,
)
self.assertTrue(self.store.has_published_version(chapter))
chapter_location = chapter.location
# test create_child of direct-only category to make sure we are autopublishing
sequential = self.store.create_child(self.user_id, chapter_location, 'sequential', 'Sequence')
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
self.assertTrue(self.store.has_published_version(sequential))
# test update_item of direct-only category to make sure we are autopublishing
sequential.display_name = 'sequential1'
sequential = self.store.update_item(sequential, self.user_id)
self.assertTrue(self.store.has_published_version(sequential))
# test delete_item of direct-only category to make sure we are autopublishing
self.store.delete_item(sequential.location, self.user_id, revision=ModuleStoreEnum.RevisionOption.all)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
self.assertNotIn(
sequential.location,
self.store.get_item(chapter_location).children,
)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
# test create_child of NOT direct-only category to make sure we aren't autopublishing
problem_child = self.store.create_child(self.user_id, chapter_location, 'problem', 'Problem_Child')
self.assertFalse(self.store.has_published_version(problem_child))
# test create_item of NOT direct-only category to make sure we aren't autopublishing
problem_item = self.store.create_item(self.user_id, test_course_key, 'problem', 'Problem_Item')
self.assertFalse(self.store.has_published_version(problem_item))
# test update_item of NOT direct-only category to make sure we aren't autopublishing
problem_item.display_name = 'Problem_Item1'
problem_item = self.store.update_item(problem_item, self.user_id)
self.assertFalse(self.store.has_published_version(problem_item))
# test delete_item of NOT direct-only category to make sure we aren't autopublishing
self.store.delete_item(problem_child.location, self.user_id)
chapter = self.store.get_item(chapter.location.for_branch(None))
self.assertTrue(self.store.has_published_version(chapter))
@ddt.data('draft', 'split')
def test_get_courses_for_wiki_shared(self, default_ms):
"""
Test two courses sharing the same wiki
"""
self.initdb(default_ms)
# verify initial state - initially, we should have a wiki for the Mongo course
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None), # Branch agnostic
wiki_courses
)
# set Mongo course to share the wiki with simple course
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'simple'
self.store.update_item(mongo_course, self.user_id)
# now mongo_course should not be retrievable with old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('999')
self.assertEqual(len(wiki_courses), 0)
# but there should be two courses with wiki_slug 'simple'
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 2)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(self.course_locations[self.XML_COURSEID2].course_key, wiki_courses)
# configure mongo course to use unique wiki_slug.
mongo_course = self.store.get_course(self.course_locations[self.MONGO_COURSEID].course_key)
mongo_course.wiki_slug = 'MITx.999.2013_Spring'
self.store.update_item(mongo_course, self.user_id)
# it should be retrievable with its new wiki_slug
wiki_courses = self.store.get_courses_for_wiki('MITx.999.2013_Spring')
self.assertEqual(len(wiki_courses), 1)
self.assertIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
# and NOT retriveable with its old wiki_slug
wiki_courses = self.store.get_courses_for_wiki('simple')
self.assertEqual(len(wiki_courses), 1)
self.assertNotIn(
self.course_locations[self.MONGO_COURSEID].course_key.replace(branch=None),
wiki_courses
)
self.assertIn(
self.course_locations[self.XML_COURSEID2].course_key,
wiki_courses
)
@ddt.data('draft', 'split')
def test_branch_setting(self, default_ms):
"""
Test the branch_setting context manager
"""
self.initdb(default_ms)
self._create_block_hierarchy()
problem_location = self.problem_x1a_1.for_branch(None)
problem_original_name = 'Problem_x1a_1'
course_key = problem_location.course_key
problem_new_name = 'New Problem Name'
def assertNumProblems(display_name, expected_number):
"""
Asserts the number of problems with the given display name is the given expected number.
"""
self.assertEquals(
len(self.store.get_items(course_key.for_branch(None), settings={'display_name': display_name})),
expected_number
)
def assertProblemNameEquals(expected_display_name):
"""
Asserts the display_name of the xblock at problem_location matches the given expected value.
"""
# check the display_name of the problem
problem = self.store.get_item(problem_location)
self.assertEquals(problem.display_name, expected_display_name)
# there should be only 1 problem with the expected_display_name
assertNumProblems(expected_display_name, 1)
# verify Draft problem
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Published problem doesn't exist
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertFalse(self.store.has_item(problem_location))
with self.assertRaises(ItemNotFoundError):
self.store.get_item(problem_location)
# PUBLISH the problem
self.store.publish(self.vertical_x1a, self.user_id)
self.store.publish(problem_location, self.user_id)
# verify Published problem
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
self.assertTrue(self.store.has_item(problem_location))
assertProblemNameEquals(problem_original_name)
# verify Draft-preferred
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_original_name)
# EDIT name
problem = self.store.get_item(problem_location)
problem.display_name = problem_new_name
self.store.update_item(problem, self.user_id)
# verify Draft problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course_key):
assertProblemNameEquals(problem_new_name)
# verify Published problem still has old name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_original_name)
# there should be no published problems with the new name
assertNumProblems(problem_new_name, 0)
# PUBLISH the problem
self.store.publish(problem_location, self.user_id)
# verify Published problem has new name
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, course_key):
assertProblemNameEquals(problem_new_name)
# there should be no published problems with the old name
assertNumProblems(problem_original_name, 0)
def verify_default_store(self, store_type):
"""
Verifies the default_store property
"""
self.assertEquals(self.store.default_modulestore.get_modulestore_type(), store_type)
# verify internal helper method
store = self.store._get_modulestore_for_courselike() # pylint: disable=protected-access
self.assertEquals(store.get_modulestore_type(), store_type)
# verify store used for creating a course
try:
course = self.store.create_course("org", "course{}".format(uuid4().hex[:5]), "run", self.user_id)
self.assertEquals(course.system.modulestore.get_modulestore_type(), store_type)
except NotImplementedError:
self.assertEquals(store_type, ModuleStoreEnum.Type.xml)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.xml)
def test_default_store(self, default_ms):
"""
Test the default store context manager
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(default_ms):
self.verify_default_store(default_ms)
def test_default_store_nested(self):
"""
Test the default store context manager, nested within one another
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
with self.store.default_store(ModuleStoreEnum.Type.mongo):
self.verify_default_store(ModuleStoreEnum.Type.mongo)
with self.store.default_store(ModuleStoreEnum.Type.split):
self.verify_default_store(ModuleStoreEnum.Type.split)
with self.store.default_store(ModuleStoreEnum.Type.xml):
self.verify_default_store(ModuleStoreEnum.Type.xml)
self.verify_default_store(ModuleStoreEnum.Type.split)
self.verify_default_store(ModuleStoreEnum.Type.mongo)
def test_default_store_fake(self):
"""
Test the default store context manager, asking for a fake store
"""
# initialize the mixed modulestore
self._initialize_mixed(mappings={})
fake_store = "fake"
with self.assertRaisesRegexp(Exception, "Cannot find store of type {}".format(fake_store)):
with self.store.default_store(fake_store):
pass # pragma: no cover
def save_asset(self, asset_key):
"""
Load and save the given file. (taken from test_contentstore)
"""
with open("{}/static/{}".format(DATA_DIR, asset_key.block_id), "rb") as f:
content = StaticContent(
asset_key, "Funky Pix", mimetypes.guess_type(asset_key.block_id)[0], f.read(),
)
self.store.contentstore.save(content)
@ddt.data(
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.mongo],
[ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split],
[ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.split]
)
@ddt.unpack
def test_clone_course(self, source_modulestore, destination_modulestore):
"""
Test clone course
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(source_modulestore):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
self.save_asset(source_course_key.make_asset_key('asset', 'picture1.jpg'))
with self.store.default_store(destination_modulestore):
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(source_course_key, dest_course_id, self.user_id)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(source_modulestore)
dest_store = self.store._get_modulestore_by_type(destination_modulestore)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
def test_clone_xml_split(self):
"""
Can clone xml courses to split; so, test it.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={self.XML_COURSEID2: 'xml', })
source_course_key = CourseKey.from_string(self.XML_COURSEID2)
with self.store.default_store(ModuleStoreEnum.Type.split):
dest_course_id = CourseLocator("org.other", "course.other", "run.other")
self.store.clone_course(
source_course_key, dest_course_id, ModuleStoreEnum.UserID.test
)
# pylint: disable=protected-access
source_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.xml)
dest_store = self.store._get_modulestore_by_type(ModuleStoreEnum.Type.split)
self.assertCoursesEqual(source_store, source_course_key, dest_store, dest_course_id)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
self.assertEqual(receiver.call_count, 1)
course_key = course.id
# Test non-draftable block types. The block should be published with every change.
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
receiver.reset_mock()
block = self.store.create_item(self.user_id, course_key, block_type)
self.assertEqual(receiver.call_count, 1)
block.display_name = block_type
self.store.update_item(block, self.user_id)
self.assertEqual(receiver.call_count, 2)
self.store.publish(block.location, self.user_id)
self.assertEqual(receiver.call_count, 3)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_rerun_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
self.assertEqual(receiver.call_count, 1)
course_key = course.id
# Test course re-runs
receiver.reset_mock()
dest_course_id = self.store.make_course_key("org.other", "course.other", "run.other")
self.store.clone_course(course_key, dest_course_id, self.user_id)
self.assertEqual(receiver.call_count, 1)
@patch('xmodule.tabs.CourseTab.from_json', side_effect=mock_tab_from_json)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_import_firing(self, default, _from_json):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Test course imports
# Note: The signal is fired once when the course is created and
# a second time after the actual data import.
receiver.reset_mock()
import_course_from_xml(
self.store, self.user_id, DATA_DIR, ['toy'], load_error_modules=False,
static_content_store=contentstore,
create_if_not_present=True,
)
self.assertEqual(receiver.call_count, 2)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
self.assertEqual(receiver.call_count, 1)
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
receiver.reset_mock()
section = self.store.create_item(self.user_id, course.id, 'chapter')
self.assertEqual(receiver.call_count, 1)
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
self.assertEqual(receiver.call_count, 2)
# 'units' and 'blocks' are draftable types
receiver.reset_mock()
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
self.assertEqual(receiver.call_count, 0)
block = self.store.create_child(self.user_id, unit.location, 'problem')
self.assertEqual(receiver.call_count, 0)
self.store.update_item(block, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.store.publish(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 1)
self.store.unpublish(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 2)
self.store.delete_item(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 3)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_direct_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
self.assertEqual(receiver.call_count, 1)
course_key = course.id
# Test non-draftable block types. No signals should be received until
receiver.reset_mock()
with self.store.bulk_operations(course_key):
categories = DIRECT_ONLY_CATEGORIES
for block_type in categories:
log.debug('Testing with block type %s', block_type)
block = self.store.create_item(self.user_id, course_key, block_type)
self.assertEqual(receiver.call_count, 0)
block.display_name = block_type
self.store.update_item(block, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.store.publish(block.location, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.assertEqual(receiver.call_count, 1)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_bulk_course_publish_signal_publish_firing(self, default):
with MongoContentstoreBuilder().build() as contentstore:
self.store = MixedModuleStore(
contentstore=contentstore,
create_modulestore_instance=create_modulestore_instance,
mappings={},
signal_handler=SignalHandler(MixedModuleStore),
**self.OPTIONS
)
self.addCleanup(self.store.close_all_connections)
with self.store.default_store(default):
self.assertIsNotNone(self.store.thread_cache.default_store.signal_handler)
with mock_signal_receiver(SignalHandler.course_published) as receiver:
self.assertEqual(receiver.call_count, 0)
# Course creation and publication should fire the signal
course = self.store.create_course('org_x', 'course_y', 'run_z', self.user_id)
self.assertEqual(receiver.call_count, 1)
course_key = course.id
# Test a draftable block type, which needs to be explicitly published, and nest it within the
# normal structure - this is important because some implementors change the parent when adding a
# non-published child; if parent is in DIRECT_ONLY_CATEGORIES then this should not fire the event
receiver.reset_mock()
with self.store.bulk_operations(course_key):
section = self.store.create_item(self.user_id, course_key, 'chapter')
self.assertEqual(receiver.call_count, 0)
subsection = self.store.create_child(self.user_id, section.location, 'sequential')
self.assertEqual(receiver.call_count, 0)
# 'units' and 'blocks' are draftable types
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
self.assertEqual(receiver.call_count, 0)
block = self.store.create_child(self.user_id, unit.location, 'problem')
self.assertEqual(receiver.call_count, 0)
self.store.update_item(block, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.store.publish(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.store.unpublish(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.store.delete_item(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.assertEqual(receiver.call_count, 1)
# Test editing draftable block type without publish
receiver.reset_mock()
with self.store.bulk_operations(course_key):
unit = self.store.create_child(self.user_id, subsection.location, 'vertical')
self.assertEqual(receiver.call_count, 0)
block = self.store.create_child(self.user_id, unit.location, 'problem')
self.assertEqual(receiver.call_count, 0)
self.store.publish(unit.location, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.assertEqual(receiver.call_count, 1)
receiver.reset_mock()
with self.store.bulk_operations(course_key):
self.assertEqual(receiver.call_count, 0)
unit.display_name = "Change this unit"
self.store.update_item(unit, self.user_id)
self.assertEqual(receiver.call_count, 0)
self.assertEqual(receiver.call_count, 0)
@ddt.ddt
@attr('mongo')
class TestPublishOverExportImport(CommonMixedModuleStoreSetup):
"""
Tests which publish (or don't publish) items - and then export/import the course,
checking the state of the imported items.
"""
def setUp(self):
"""
Set up the database for testing
"""
super(TestPublishOverExportImport, self).setUp()
self.user_id = ModuleStoreEnum.UserID.test
self.export_dir = mkdtemp()
self.addCleanup(rmtree, self.export_dir, ignore_errors=True)
def _export_import_course_round_trip(self, modulestore, contentstore, source_course_key, export_dir):
"""
Export the course from a modulestore and then re-import the course.
"""
top_level_export_dir = 'exported_source_course'
export_course_to_xml(
modulestore,
contentstore,
source_course_key,
export_dir,
top_level_export_dir,
)
import_course_from_xml(
modulestore,
'test_user',
export_dir,
source_dirs=[top_level_export_dir],
static_content_store=contentstore,
target_id=source_course_key,
create_if_not_present=True,
raise_on_failure=True,
)
@contextmanager
def _build_store(self, default_ms):
"""
Perform the modulestore-building and course creation steps for a mixed modulestore test.
"""
with MongoContentstoreBuilder().build() as contentstore:
# initialize the mixed modulestore
self._initialize_mixed(contentstore=contentstore, mappings={})
with self.store.default_store(default_ms):
source_course_key = self.store.make_course_key("org.source", "course.source", "run.source")
self._create_course(source_course_key)
yield contentstore, source_course_key
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_draft_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an unpublished unit remains with no changes across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and don't publish it.
draft_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
# Not yet published, so changes are present
self.assertTrue(self._has_changes(draft_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Verify that the imported block still is a draft, i.e. has changes.
self.assertTrue(self._has_changes(draft_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that it still is published, i.e. has no changes.
self.assertFalse(self._has_changes(published_xblock.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_changed_published_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# Create a dummy component to test against and publish it.
published_xblock = self.store.create_item(
self.user_id,
self.course.id,
'vertical',
block_id='test_vertical'
)
self.store.publish(published_xblock.location, self.user_id)
# Retrieve the published block and make sure it's published.
self.assertFalse(self._has_changes(published_xblock.location))
updated_display_name = 'Changed Display Name'
component = self.store.get_item(published_xblock.location)
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self.store.has_changes(component))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(published_xblock.location))
# Verify that the changes in the draft vertical still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(published_xblock.location)
self.assertEqual(component.display_name, updated_display_name)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_seq_with_unpublished_vertical_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical - don't publish it!
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Retrieve the published block and make sure it's published.
# Chapter is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(chapter.location))
# Sequential is published - but the changes in vertical below means it "has_changes".
self.assertTrue(self._has_changes(sequential.location))
# Vertical is unpublished - so it "has_changes".
self.assertTrue(self._has_changes(vertical.location))
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_draft_and_published_unit_has_changes_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit with an unpublished draft remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Vertical has a new child -and- unit is unpublished. So both have changes.
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Publishing the unit separately has no effect on whether it has changes - it's already published.
self.store.publish(unit.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Retrieve the published block and make sure it's published.
self.store.publish(chapter.location, self.user_id)
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Now make changes to the unit - but don't publish them.
component = self.store.get_item(unit.location)
updated_display_name = 'Changed Display Name'
component.display_name = updated_display_name
component = self.store.update_item(component, self.user_id)
self.assertTrue(self._has_changes(component.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertTrue(self._has_changes(chapter.location))
self.assertTrue(self._has_changes(sequential.location))
self.assertTrue(self._has_changes(vertical.location))
self.assertTrue(self._has_changes(unit.location))
# Verify that the changes in the draft unit still exist.
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
# Verify that the draft changes don't exist in the published unit - it still uses the default name.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, 'Text')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_vertical_with_published_unit_remains_published_before_export_and_after_import(self, default_ms):
"""
Tests that an published unit remains published across export and re-import.
"""
with self._build_store(default_ms) as (contentstore, source_course_key):
# create chapter
chapter = self.store.create_child(
self.user_id, self.course.location, 'chapter', block_id='section_one'
)
self.store.publish(chapter.location, self.user_id)
# create sequential
sequential = self.store.create_child(
self.user_id, chapter.location, 'sequential', block_id='subsection_one'
)
self.store.publish(sequential.location, self.user_id)
# create vertical
vertical = self.store.create_child(
self.user_id, sequential.location, 'vertical', block_id='moon_unit'
)
# Vertical has changes until it is actually published.
self.assertTrue(self._has_changes(vertical.location))
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
# create unit
unit = self.store.create_child(
self.user_id, vertical.location, 'html', block_id='html_unit'
)
# Now make changes to the unit.
updated_display_name = 'Changed Display Name'
unit.display_name = updated_display_name
unit = self.store.update_item(unit, self.user_id)
self.assertTrue(self._has_changes(unit.location))
# Publishing the vertical also publishes its unit child.
self.store.publish(vertical.location, self.user_id)
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Export the course - then import the course export.
self._export_import_course_round_trip(
self.store, contentstore, source_course_key, self.export_dir
)
# Get the published xblock from the imported course.
# Verify that the published block still has a draft block, i.e. has changes.
self.assertFalse(self._has_changes(chapter.location))
self.assertFalse(self._has_changes(sequential.location))
self.assertFalse(self._has_changes(vertical.location))
self.assertFalse(self._has_changes(unit.location))
# Verify that the published changes exist in the published unit.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, source_course_key):
component = self.store.get_item(unit.location)
self.assertEqual(component.display_name, updated_display_name)
|
agpl-3.0
|
robcza/intelmq
|
intelmq/tests/bots/experts/modify/test_expert.py
|
1
|
2270
|
# -*- coding: utf-8 -*-
"""
Testing modify expert bot.
"""
import unittest
from pkg_resources import resource_filename
import intelmq.lib.test as test
from intelmq.bots.experts.modify.expert import ModifyExpertBot
EVENT_TEMPL = {"__type": "Event",
"feed.name": "Spamhaus Cert",
"feed.url": "https://portal.spamhaus.org/cert/api.php?cert="
"<CERTNAME>&key=<APIKEY>",
"classification.type": "botnet drone",
"time.observation": "2015-01-01T00:00:00+00:00",
"raw": "",
}
INPUT = [{'malware.name': 'confickerab'},
{'malware.name': 'gozi2'},
{'feed.name': 'Abuse.ch',
'feed.url': 'https://feodotracker.abuse.ch/blocklist/?download=domainblocklist'},
{'malware.name': 'zeus_gameover_us'},
{'malware.name': 'foobar', 'feed.name': 'Other Feed'},
{'feed.name': '', 'source.port': 80},
]
OUTPUT = [{'classification.identifier': 'conficker'},
{'classification.identifier': 'gozi'},
{'classification.identifier': 'feodo'},
{'classification.identifier': 'zeus'},
{},
{'protocol.application': 'http'},
]
for index in range(len(INPUT)):
copy1 = EVENT_TEMPL.copy()
copy2 = EVENT_TEMPL.copy()
copy1.update(INPUT[index])
copy2.update(INPUT[index])
copy2.update(OUTPUT[index])
INPUT[index] = copy1
OUTPUT[index] = copy2
class TestModifyExpertBot(test.BotTestCase, unittest.TestCase):
"""
A TestCase for ModifyExpertBot.
"""
@classmethod
def set_bot(cls):
cls.bot_reference = ModifyExpertBot
config_path = resource_filename('intelmq',
'bots/experts/modify/modify.conf')
cls.sysconfig = {'configuration_path': config_path
}
cls.default_input_message = {'__type': 'Event'}
def test_events(self):
""" Test if correct Events have been produced. """
self.input_message = INPUT
self.run_bot(iterations=len(INPUT))
for position, event_out in enumerate(OUTPUT):
self.assertMessageEqual(position, event_out)
if __name__ == '__main__':
unittest.main()
|
agpl-3.0
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/botocore/vendored/requests/packages/urllib3/response.py
|
478
|
16459
|
try:
import http.client as httplib
except ImportError:
import httplib
import zlib
import io
from socket import timeout as SocketTimeout
from ._collections import HTTPHeaderDict
from .exceptions import (
ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked
)
from .packages.six import string_types as basestring, binary_type, PY3
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
return self._obj.decompress(data)
def _get_decoder(mode):
if mode == 'gzip':
return GzipDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get('transfer-encoding', '').lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# We certainly don't want to preload content when the response is chunked.
if not self.chunked and preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessar.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
content_encoding = self.headers.get('content-encoding', '').lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
return data
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if 'read operation timed out' not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked:
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3: # Python 3
headers = HTTPHeaderDict(headers.items())
else: # Python 2
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
resp = ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b';', 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if self._original_response and self._original_response._method.upper() == 'HEAD':
# Don't bother reading the body of a HEAD request.
# FIXME: Can we do this somehow without accessing private httplib _method?
self._original_response.close()
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
yield self._decode(chunk, decode_content=decode_content,
flush_decoder=True)
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
self.release_conn()
|
mit
|
windskyer/nova
|
nova/objects/tag.py
|
11
|
2493
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import db
from nova import objects
from nova.objects import base
from nova.objects import fields
@base.NovaObjectRegistry.register
class Tag(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added method exists()
VERSION = '1.1'
fields = {
'resource_id': fields.StringField(),
'tag': fields.StringField(),
}
@staticmethod
def _from_db_object(context, tag, db_tag):
for key in tag.fields:
setattr(tag, key, db_tag[key])
tag.obj_reset_changes()
tag._context = context
return tag
@base.remotable
def create(self):
db_tag = db.instance_tag_add(self._context, self.resource_id, self.tag)
self._from_db_object(self._context, self, db_tag)
@base.remotable_classmethod
def destroy(cls, context, resource_id, name):
db.instance_tag_delete(context, resource_id, name)
@base.remotable_classmethod
def exists(cls, context, resource_id, name):
return db.instance_tag_exists(context, resource_id, name)
@base.NovaObjectRegistry.register
class TagList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Tag <= version 1.1
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Tag'),
}
@base.remotable_classmethod
def get_by_resource_id(cls, context, resource_id):
db_tags = db.instance_tag_get_by_instance_uuid(context, resource_id)
return base.obj_make_list(context, cls(), objects.Tag, db_tags)
@base.remotable_classmethod
def create(cls, context, resource_id, tags):
db_tags = db.instance_tag_set(context, resource_id, tags)
return base.obj_make_list(context, cls(), objects.Tag, db_tags)
@base.remotable_classmethod
def destroy(cls, context, resource_id):
db.instance_tag_delete_all(context, resource_id)
|
gpl-2.0
|
katsikas/gnuradio
|
build/gr-pager/swig/pager_swig.py
|
1
|
21424
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_pager_swig', [dirname(__file__)])
except ImportError:
import _pager_swig
return _pager_swig
if fp is not None:
try:
_mod = imp.load_module('_pager_swig', fp, pathname, description)
finally:
fp.close()
return _mod
_pager_swig = swig_import_helper()
del swig_import_helper
else:
import _pager_swig
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class SwigPyIterator(object):
"""Proxy of C++ swig::SwigPyIterator class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _pager_swig.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self):
"""value(self) -> PyObject"""
return _pager_swig.SwigPyIterator_value(self)
def incr(self, n = 1):
"""incr(self, size_t n = 1) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator_incr(self, n)
def decr(self, n = 1):
"""decr(self, size_t n = 1) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator_decr(self, n)
def distance(self, *args, **kwargs):
"""distance(self, SwigPyIterator x) -> ptrdiff_t"""
return _pager_swig.SwigPyIterator_distance(self, *args, **kwargs)
def equal(self, *args, **kwargs):
"""equal(self, SwigPyIterator x) -> bool"""
return _pager_swig.SwigPyIterator_equal(self, *args, **kwargs)
def copy(self):
"""copy(self) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator_copy(self)
def next(self):
"""next(self) -> PyObject"""
return _pager_swig.SwigPyIterator_next(self)
def __next__(self):
"""__next__(self) -> PyObject"""
return _pager_swig.SwigPyIterator___next__(self)
def previous(self):
"""previous(self) -> PyObject"""
return _pager_swig.SwigPyIterator_previous(self)
def advance(self, *args, **kwargs):
"""advance(self, ptrdiff_t n) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator_advance(self, *args, **kwargs)
def __eq__(self, *args, **kwargs):
"""__eq__(self, SwigPyIterator x) -> bool"""
return _pager_swig.SwigPyIterator___eq__(self, *args, **kwargs)
def __ne__(self, *args, **kwargs):
"""__ne__(self, SwigPyIterator x) -> bool"""
return _pager_swig.SwigPyIterator___ne__(self, *args, **kwargs)
def __iadd__(self, *args, **kwargs):
"""__iadd__(self, ptrdiff_t n) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator___iadd__(self, *args, **kwargs)
def __isub__(self, *args, **kwargs):
"""__isub__(self, ptrdiff_t n) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator___isub__(self, *args, **kwargs)
def __add__(self, *args, **kwargs):
"""__add__(self, ptrdiff_t n) -> SwigPyIterator"""
return _pager_swig.SwigPyIterator___add__(self, *args, **kwargs)
def __sub__(self, *args):
"""
__sub__(self, ptrdiff_t n) -> SwigPyIterator
__sub__(self, SwigPyIterator x) -> ptrdiff_t
"""
return _pager_swig.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _pager_swig.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class pager_flex_frame_sptr(object):
"""Proxy of C++ boost::shared_ptr<(pager_flex_frame)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> pager_flex_frame_sptr
__init__(self, pager_flex_frame p) -> pager_flex_frame_sptr
"""
this = _pager_swig.new_pager_flex_frame_sptr(*args)
try: self.this.append(this)
except: self.this = this
def __deref__(self):
"""__deref__(self) -> pager_flex_frame"""
return _pager_swig.pager_flex_frame_sptr___deref__(self)
__swig_destroy__ = _pager_swig.delete_pager_flex_frame_sptr
__del__ = lambda self : None;
pager_flex_frame_sptr_swigregister = _pager_swig.pager_flex_frame_sptr_swigregister
pager_flex_frame_sptr_swigregister(pager_flex_frame_sptr)
def flex_frame():
"""
flex_frame() -> pager_flex_frame_sptr
flex_frame.
public constructor for pager_flex_frame
Params: (NONE)
"""
return _pager_swig.flex_frame()
class pager_flex_frame(object):
"""
flex_frame.
public constructor for pager_flex_frame
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _pager_swig.delete_pager_flex_frame
__del__ = lambda self : None;
pager_flex_frame_swigregister = _pager_swig.pager_flex_frame_swigregister
pager_flex_frame_swigregister(pager_flex_frame)
class pager_slicer_fb_sptr(object):
"""Proxy of C++ boost::shared_ptr<(pager_slicer_fb)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> pager_slicer_fb_sptr
__init__(self, p) -> pager_slicer_fb_sptr
"""
this = _pager_swig.new_pager_slicer_fb_sptr(*args)
try: self.this.append(this)
except: self.this = this
def __deref__(self):
"""__deref__(self)"""
return _pager_swig.pager_slicer_fb_sptr___deref__(self)
__swig_destroy__ = _pager_swig.delete_pager_slicer_fb_sptr
__del__ = lambda self : None;
def dc_offset(self):
"""
dc_offset(self) -> float
Params: (NONE)
"""
return _pager_swig.pager_slicer_fb_sptr_dc_offset(self)
def history(self):
"""history(self) -> unsigned int"""
return _pager_swig.pager_slicer_fb_sptr_history(self)
def output_multiple(self):
"""output_multiple(self) -> int"""
return _pager_swig.pager_slicer_fb_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(self) -> double"""
return _pager_swig.pager_slicer_fb_sptr_relative_rate(self)
def start(self):
"""start(self) -> bool"""
return _pager_swig.pager_slicer_fb_sptr_start(self)
def stop(self):
"""stop(self) -> bool"""
return _pager_swig.pager_slicer_fb_sptr_stop(self)
def nitems_read(self, *args, **kwargs):
"""nitems_read(self, unsigned int which_input) -> uint64_t"""
return _pager_swig.pager_slicer_fb_sptr_nitems_read(self, *args, **kwargs)
def nitems_written(self, *args, **kwargs):
"""nitems_written(self, unsigned int which_output) -> uint64_t"""
return _pager_swig.pager_slicer_fb_sptr_nitems_written(self, *args, **kwargs)
def detail(self):
"""detail(self) -> gr_block_detail_sptr"""
return _pager_swig.pager_slicer_fb_sptr_detail(self)
def set_detail(self, *args, **kwargs):
"""set_detail(self, gr_block_detail_sptr detail)"""
return _pager_swig.pager_slicer_fb_sptr_set_detail(self, *args, **kwargs)
def name(self):
"""name(self) -> string"""
return _pager_swig.pager_slicer_fb_sptr_name(self)
def input_signature(self):
"""input_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_slicer_fb_sptr_input_signature(self)
def output_signature(self):
"""output_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_slicer_fb_sptr_output_signature(self)
def unique_id(self):
"""unique_id(self) -> long"""
return _pager_swig.pager_slicer_fb_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(self) -> gr_basic_block_sptr"""
return _pager_swig.pager_slicer_fb_sptr_to_basic_block(self)
def check_topology(self, *args, **kwargs):
"""check_topology(self, int ninputs, int noutputs) -> bool"""
return _pager_swig.pager_slicer_fb_sptr_check_topology(self, *args, **kwargs)
pager_slicer_fb_sptr_swigregister = _pager_swig.pager_slicer_fb_sptr_swigregister
pager_slicer_fb_sptr_swigregister(pager_slicer_fb_sptr)
pager_slicer_fb_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id ())
def slicer_fb(*args, **kwargs):
"""
slicer_fb(float alpha) -> pager_slicer_fb_sptr
slicer description
Params: (alpha)
"""
return _pager_swig.slicer_fb(*args, **kwargs)
class pager_flex_sync_sptr(object):
"""Proxy of C++ boost::shared_ptr<(pager_flex_sync)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> pager_flex_sync_sptr
__init__(self, p) -> pager_flex_sync_sptr
"""
this = _pager_swig.new_pager_flex_sync_sptr(*args)
try: self.this.append(this)
except: self.this = this
def __deref__(self):
"""__deref__(self)"""
return _pager_swig.pager_flex_sync_sptr___deref__(self)
__swig_destroy__ = _pager_swig.delete_pager_flex_sync_sptr
__del__ = lambda self : None;
def history(self):
"""history(self) -> unsigned int"""
return _pager_swig.pager_flex_sync_sptr_history(self)
def output_multiple(self):
"""output_multiple(self) -> int"""
return _pager_swig.pager_flex_sync_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(self) -> double"""
return _pager_swig.pager_flex_sync_sptr_relative_rate(self)
def start(self):
"""start(self) -> bool"""
return _pager_swig.pager_flex_sync_sptr_start(self)
def stop(self):
"""stop(self) -> bool"""
return _pager_swig.pager_flex_sync_sptr_stop(self)
def nitems_read(self, *args, **kwargs):
"""nitems_read(self, unsigned int which_input) -> uint64_t"""
return _pager_swig.pager_flex_sync_sptr_nitems_read(self, *args, **kwargs)
def nitems_written(self, *args, **kwargs):
"""nitems_written(self, unsigned int which_output) -> uint64_t"""
return _pager_swig.pager_flex_sync_sptr_nitems_written(self, *args, **kwargs)
def detail(self):
"""detail(self) -> gr_block_detail_sptr"""
return _pager_swig.pager_flex_sync_sptr_detail(self)
def set_detail(self, *args, **kwargs):
"""set_detail(self, gr_block_detail_sptr detail)"""
return _pager_swig.pager_flex_sync_sptr_set_detail(self, *args, **kwargs)
def name(self):
"""name(self) -> string"""
return _pager_swig.pager_flex_sync_sptr_name(self)
def input_signature(self):
"""input_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_sync_sptr_input_signature(self)
def output_signature(self):
"""output_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_sync_sptr_output_signature(self)
def unique_id(self):
"""unique_id(self) -> long"""
return _pager_swig.pager_flex_sync_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(self) -> gr_basic_block_sptr"""
return _pager_swig.pager_flex_sync_sptr_to_basic_block(self)
def check_topology(self, *args, **kwargs):
"""check_topology(self, int ninputs, int noutputs) -> bool"""
return _pager_swig.pager_flex_sync_sptr_check_topology(self, *args, **kwargs)
pager_flex_sync_sptr_swigregister = _pager_swig.pager_flex_sync_sptr_swigregister
pager_flex_sync_sptr_swigregister(pager_flex_sync_sptr)
pager_flex_sync_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id ())
def flex_sync():
"""
flex_sync() -> pager_flex_sync_sptr
flex sync description
Params: (NONE)
"""
return _pager_swig.flex_sync()
class pager_flex_deinterleave_sptr(object):
"""Proxy of C++ boost::shared_ptr<(pager_flex_deinterleave)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> pager_flex_deinterleave_sptr
__init__(self, p) -> pager_flex_deinterleave_sptr
"""
this = _pager_swig.new_pager_flex_deinterleave_sptr(*args)
try: self.this.append(this)
except: self.this = this
def __deref__(self):
"""__deref__(self)"""
return _pager_swig.pager_flex_deinterleave_sptr___deref__(self)
__swig_destroy__ = _pager_swig.delete_pager_flex_deinterleave_sptr
__del__ = lambda self : None;
def history(self):
"""history(self) -> unsigned int"""
return _pager_swig.pager_flex_deinterleave_sptr_history(self)
def output_multiple(self):
"""output_multiple(self) -> int"""
return _pager_swig.pager_flex_deinterleave_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(self) -> double"""
return _pager_swig.pager_flex_deinterleave_sptr_relative_rate(self)
def start(self):
"""start(self) -> bool"""
return _pager_swig.pager_flex_deinterleave_sptr_start(self)
def stop(self):
"""stop(self) -> bool"""
return _pager_swig.pager_flex_deinterleave_sptr_stop(self)
def nitems_read(self, *args, **kwargs):
"""nitems_read(self, unsigned int which_input) -> uint64_t"""
return _pager_swig.pager_flex_deinterleave_sptr_nitems_read(self, *args, **kwargs)
def nitems_written(self, *args, **kwargs):
"""nitems_written(self, unsigned int which_output) -> uint64_t"""
return _pager_swig.pager_flex_deinterleave_sptr_nitems_written(self, *args, **kwargs)
def detail(self):
"""detail(self) -> gr_block_detail_sptr"""
return _pager_swig.pager_flex_deinterleave_sptr_detail(self)
def set_detail(self, *args, **kwargs):
"""set_detail(self, gr_block_detail_sptr detail)"""
return _pager_swig.pager_flex_deinterleave_sptr_set_detail(self, *args, **kwargs)
def name(self):
"""name(self) -> string"""
return _pager_swig.pager_flex_deinterleave_sptr_name(self)
def input_signature(self):
"""input_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_deinterleave_sptr_input_signature(self)
def output_signature(self):
"""output_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_deinterleave_sptr_output_signature(self)
def unique_id(self):
"""unique_id(self) -> long"""
return _pager_swig.pager_flex_deinterleave_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(self) -> gr_basic_block_sptr"""
return _pager_swig.pager_flex_deinterleave_sptr_to_basic_block(self)
def check_topology(self, *args, **kwargs):
"""check_topology(self, int ninputs, int noutputs) -> bool"""
return _pager_swig.pager_flex_deinterleave_sptr_check_topology(self, *args, **kwargs)
pager_flex_deinterleave_sptr_swigregister = _pager_swig.pager_flex_deinterleave_sptr_swigregister
pager_flex_deinterleave_sptr_swigregister(pager_flex_deinterleave_sptr)
pager_flex_deinterleave_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id ())
def flex_deinterleave():
"""
flex_deinterleave() -> pager_flex_deinterleave_sptr
flex deinterleave description
Params: (NONE)
"""
return _pager_swig.flex_deinterleave()
class pager_flex_parse_sptr(object):
"""Proxy of C++ boost::shared_ptr<(pager_flex_parse)> class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> pager_flex_parse_sptr
__init__(self, p) -> pager_flex_parse_sptr
"""
this = _pager_swig.new_pager_flex_parse_sptr(*args)
try: self.this.append(this)
except: self.this = this
def __deref__(self):
"""__deref__(self)"""
return _pager_swig.pager_flex_parse_sptr___deref__(self)
__swig_destroy__ = _pager_swig.delete_pager_flex_parse_sptr
__del__ = lambda self : None;
def history(self):
"""history(self) -> unsigned int"""
return _pager_swig.pager_flex_parse_sptr_history(self)
def output_multiple(self):
"""output_multiple(self) -> int"""
return _pager_swig.pager_flex_parse_sptr_output_multiple(self)
def relative_rate(self):
"""relative_rate(self) -> double"""
return _pager_swig.pager_flex_parse_sptr_relative_rate(self)
def start(self):
"""start(self) -> bool"""
return _pager_swig.pager_flex_parse_sptr_start(self)
def stop(self):
"""stop(self) -> bool"""
return _pager_swig.pager_flex_parse_sptr_stop(self)
def nitems_read(self, *args, **kwargs):
"""nitems_read(self, unsigned int which_input) -> uint64_t"""
return _pager_swig.pager_flex_parse_sptr_nitems_read(self, *args, **kwargs)
def nitems_written(self, *args, **kwargs):
"""nitems_written(self, unsigned int which_output) -> uint64_t"""
return _pager_swig.pager_flex_parse_sptr_nitems_written(self, *args, **kwargs)
def detail(self):
"""detail(self) -> gr_block_detail_sptr"""
return _pager_swig.pager_flex_parse_sptr_detail(self)
def set_detail(self, *args, **kwargs):
"""set_detail(self, gr_block_detail_sptr detail)"""
return _pager_swig.pager_flex_parse_sptr_set_detail(self, *args, **kwargs)
def name(self):
"""name(self) -> string"""
return _pager_swig.pager_flex_parse_sptr_name(self)
def input_signature(self):
"""input_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_parse_sptr_input_signature(self)
def output_signature(self):
"""output_signature(self) -> gr_io_signature_sptr"""
return _pager_swig.pager_flex_parse_sptr_output_signature(self)
def unique_id(self):
"""unique_id(self) -> long"""
return _pager_swig.pager_flex_parse_sptr_unique_id(self)
def to_basic_block(self):
"""to_basic_block(self) -> gr_basic_block_sptr"""
return _pager_swig.pager_flex_parse_sptr_to_basic_block(self)
def check_topology(self, *args, **kwargs):
"""check_topology(self, int ninputs, int noutputs) -> bool"""
return _pager_swig.pager_flex_parse_sptr_check_topology(self, *args, **kwargs)
pager_flex_parse_sptr_swigregister = _pager_swig.pager_flex_parse_sptr_swigregister
pager_flex_parse_sptr_swigregister(pager_flex_parse_sptr)
pager_flex_parse_sptr.__repr__ = lambda self: "<gr_block %s (%d)>" % (self.name(), self.unique_id ())
def flex_parse(*args, **kwargs):
"""
flex_parse(gr_msg_queue_sptr queue, float freq) -> pager_flex_parse_sptr
flex parse description
Params: (queue, freq)
"""
return _pager_swig.flex_parse(*args, **kwargs)
|
gpl-3.0
|
tvansteenburgh/PerfKitBenchmarker
|
perfkitbenchmarker/packages/openmpi.py
|
6
|
1879
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing OpenMPI installation and cleanup functions."""
from perfkitbenchmarker import vm_util
MPI_DIR = '%s/openmpi-1.6.5' % vm_util.VM_TMP_DIR
MPI_TAR = 'openmpi-1.6.5.tar.gz'
MPI_URL = 'http://www.open-mpi.org/software/ompi/v1.6/downloads/' + MPI_TAR
def _Install(vm):
"""Installs the OpenMPI package on the VM."""
vm.Install('build_tools')
vm.Install('wget')
vm.RemoteCommand('wget %s -P %s' % (MPI_URL, vm_util.VM_TMP_DIR))
vm.RemoteCommand('cd %s && tar xvfz %s' % (vm_util.VM_TMP_DIR, MPI_TAR))
make_jobs = vm.num_cpus
config_cmd = ('./configure --enable-static --disable-shared --disable-dlopen '
'--prefix=/usr')
vm.RobustRemoteCommand(
'cd %s && %s && make -j %s && sudo make install' %
(MPI_DIR, config_cmd, make_jobs))
def YumInstall(vm):
"""Installs the OpenMPI package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the OpenMPI package on the VM."""
_Install(vm)
def _Uninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(MPI_DIR))
def YumUninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
_Uninstall(vm)
def AptUninstall(vm):
"""Uninstalls the OpenMPI package on the VM."""
_Uninstall(vm)
|
apache-2.0
|
robintw/scikit-image
|
doc/examples/plot_holes_and_peaks.py
|
15
|
2623
|
"""
===============================
Filling holes and finding peaks
===============================
In this example, we fill holes (i.e. isolated, dark spots) in an image using
morphological reconstruction by erosion. Erosion expands the minimal values of
the seed image until it encounters a mask image. Thus, the seed image and mask
image represent the maximum and minimum possible values of the reconstructed
image.
We start with an image containing both peaks and holes:
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import rescale_intensity
image = data.moon()
# Rescale image intensity so that we can see dim features.
image = rescale_intensity(image, in_range=(50, 200))
# convenience function for plotting images
def imshow(image, title, **kwargs):
fig, ax = plt.subplots(figsize=(5, 4))
ax.imshow(image, **kwargs)
ax.axis('off')
ax.set_title(title)
imshow(image, 'Original image')
"""
.. image:: PLOT2RST.current_figure
Now we need to create the seed image, where the minima represent the starting
points for erosion. To fill holes, we initialize the seed image to the maximum
value of the original image. Along the borders, however, we use the original
values of the image. These border pixels will be the starting points for the
erosion process. We then limit the erosion by setting the mask to the values
of the original image.
"""
import numpy as np
from skimage.morphology import reconstruction
seed = np.copy(image)
seed[1:-1, 1:-1] = image.max()
mask = image
filled = reconstruction(seed, mask, method='erosion')
imshow(filled, 'after filling holes',vmin=image.min(), vmax=image.max())
"""
.. image:: PLOT2RST.current_figure
As shown above, eroding inward from the edges removes holes, since (by
definition) holes are surrounded by pixels of brighter value. Finally, we can
isolate the dark regions by subtracting the reconstructed image from the
original image.
"""
imshow(image - filled, 'holes')
# plt.title('holes')
"""
.. image:: PLOT2RST.current_figure
Alternatively, we can find bright spots in an image using morphological
reconstruction by dilation. Dilation is the inverse of erosion and expands the
*maximal* values of the seed image until it encounters a mask image. Since this
is an inverse operation, we initialize the seed image to the minimum image
intensity instead of the maximum. The remainder of the process is the same.
"""
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
rec = reconstruction(seed, mask, method='dilation')
imshow(image - rec, 'peaks')
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
|
bsd-3-clause
|
lenstr/rethinkdb
|
test/memcached_workloads/many_rows.py
|
34
|
5440
|
#!/usr/bin/env python
# Copyright 2010-2015 RethinkDB, all rights reserved.
import os, random, sys, time
try:
xrange
except NameError:
xrange = range
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import rdb_workload_common, vcoptparse, utils
errorToleranceSecs = 5
batchSize = 100
op = rdb_workload_common.option_parser_for_connect()
op["num_rows"] = vcoptparse.IntFlag("--num-rows", 5000)
op["sequential"] = vcoptparse.BoolFlag("--sequential")
op["phase"] = vcoptparse.ChoiceFlag("--phase", ["w", "r", "wr"], "wr")
op["tolerate_errors"] = vcoptparse.BoolFlag("--tolerate-errors", invert=True)
opts = op.parse(sys.argv)
with rdb_workload_common.make_table_and_connection(opts) as (table, conn):
keys = None
if "w" in opts["phase"]:
utils.print_with_time("Inserting rows")
# - generate the ids
keys = []
if opts["sequential"]:
keys = xrange(opts["num_rows"])
else:
keys = [x for x in xrange(opts["num_rows"])]
random.shuffle(keys)
# - open key file if not in 'wr' mode (pwd is the test output folder)
keys_file = None
if "r" not in opts["phase"]:
keys_file = open("keys", "w")
# - insert keys
currentBatch = []
for key in keys:
deadline = time.time() + errorToleranceSecs
lastError = None
response = None
# - collect a batch
currentBatch.append(key)
# - process the batch
if len(currentBatch) >= batchSize:
while time.time() < deadline:
try:
response = table.insert([{'id': x, 'val': x} for x in currentBatch], durability="soft").run(conn)
break
except conn._r.ReqlError as e:
if opts["tolerate_errors"]:
lastError = e
time.sleep(.1)
else:
raise
else:
if lastError:
raise lastError
assert response['inserted'] == len(currentBatch), 'Batch insert failed: %r' % response
if keys_file:
keys_file.write(''.join(['%r\n' % x for x in currentBatch]))
utils.print_with_time('\tWrote keys %r to %r... %d records' % (currentBatch[0], currentBatch[-1], len(currentBatch)))
currentBatch = []
# - clean up
response = table.sync().run(conn)
assert response['synced'] == 1, "Failed to sync table: %r" % response
if keys_file:
keys_file.close()
if "r" in opts["phase"]:
if keys:
utils.print_with_time("Checking records")
else:
utils.print_with_time("Checking records using ids from disk")
currentBatch = []
if keys is None:
keys = open("keys", "r")
def readAndCompare(keys, table, conn):
'''fetch all of the records for keys and comapre their values'''
if not keys:
return
# - check that all records look correct
deadline = time.time() + errorToleranceSecs
lastError = None
while time.time() < deadline:
try:
startKey = keys[0]
endKey = keys[-1]
keyCount = len(keys)
for row in table.get_all(*keys).run(conn):
assert row['id'] in keys, 'Unexpected id in fetched result: %r' % row
assert 'val' in row and row['id'] == row['id'] == row['val'], 'Val did not match id: %r' % row
keys.remove(row['id'])
assert keys == [], 'Database did not have all expected values, missing at least: %r' % keys
utils.print_with_time('\tVerified keys %r to %r... %d records' % (startKey, endKey, keyCount))
break
except conn._r.ReqlError as e:
if opts["tolerate_errors"]:
lastError = e
time.sleep(.1)
else:
raise
else:
if lastError:
raise lastError
for key in keys:
# - eval to transform keys back into values
if hasattr(keys, 'read'):
try:
key = eval(key.strip())
except Exception as e:
raise ValueError('Unusable key value: %r (%s)' % (key, str(e)))
# - collect a batch
currentBatch.append(key)
# - process the batch
if len(currentBatch) >= batchSize:
readAndCompare(currentBatch, table, conn)
currentBatch = []
else:
# - process the final batch
readAndCompare(currentBatch, table, conn)
if hasattr(keys, 'read'):
keys.close()
utils.print_with_time("many_keys success")
|
agpl-3.0
|
icebreaker/dotfiles
|
gnome/gnome2/gedit/plugins.symlink/advanced-bookmarks/plugin.py
|
1
|
5011
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008 - Eugene Khorev
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
import pygtk
pygtk.require("2.0")
import gtk
import gedit
import time
import os
import sys
import gettext
import ConfigParser
import bookmarks
import window_helper
APP_NAME = "plugin"
LOC_PATH = os.path.join(os.path.expanduser("~/.gnome2/gedit/plugins/advanced-bookmarks/lang"))
gettext.find(APP_NAME, LOC_PATH)
gettext.install(APP_NAME, LOC_PATH, True)
class AdvancedBookmarksPlugin(gedit.Plugin):
def __init__(self):
gedit.Plugin.__init__(self)
self._instances = {}
# Setup configuration file path
conf_path = os.path.join(os.path.expanduser("~/.gnome2/gedit/plugins/"), "advanced-bookmarks/plugin.conf")
# Check if configuration file does not exists
if not os.path.exists(conf_path):
# Create configuration file
conf_file = file(conf_path, "wt")
conf_file.close()
# Create configuration dictionary
self.read_config(conf_path)
# Create bookmark list
self._bookmarks = bookmarks.bookmark_list(self._config)
def activate(self, window):
# Create window helper for an instance
self._instances[window] = window_helper.window_helper(self, window, self._bookmarks, self._config)
def deactivate(self, window):
self._instances[window].deactivate()
del self._instances[window]
def update_ui(self, window):
self._instances[window].update_ui()
def create_configure_dialog(self):
# Create configuration dialog
self._dlg_config_glade = gtk.glade.XML(os.path.dirname( __file__ ) + "/config_dlg.glade")
# Get dialog window
self._dlg_config = self._dlg_config_glade.get_widget("config_dialog")
# Setup signals
self._dlg_config_glade.signal_autoconnect(self)
# Setup values of dialog widgets
highlighting = self._config.getboolean("common", "highlighting")
chk = self._dlg_config_glade.get_widget("chk_highlight")
chk.set_active(highlighting)
color = self._config.get("common", "highlight_color")
btn = self._dlg_config_glade.get_widget("btn_color")
try:
btn.set_color(gtk.gdk.color_parse(color))
except:
btn.set_color(gtk.gdk.color_parse("#FFF0DC"))
return self._dlg_config
def on_btn_cancel_clicked(self, btn):
self._dlg_config.response(gtk.RESPONSE_CANCEL)
def on_btn_ok_clicked(self, btn):
self._dlg_config.response(gtk.RESPONSE_OK)
def on_config_dialog_response(self, dlg, res):
if res == gtk.RESPONSE_OK:
# Save configuration
highlight = self._dlg_config_glade.get_widget("chk_highlight").get_active()
self._config.set("common", "highlighting", highlight and "on" or "off")
color = self._dlg_config_glade.get_widget("btn_color").get_color().to_string()
self._config.set("common", "highlight_color", color)
self.write_config()
# Remove bookmark markup in all documents if necessary
for window in self._instances:
self._instances[window].setup_highlighting(highlight)
dlg.hide()
def read_config(self, conf_path): # Reads configuration from a file
self._conf_file = file(conf_path, "r+")
self._config = ConfigParser.ConfigParser()
self._config.readfp(self._conf_file)
# Check if there is no necessary options in config
if not self._config.has_section("common"):
self._config.add_section("common")
if not self._config.has_option("common", "highlighting"):
self._config.set("common", "highlighting", "on")
if not self._config.has_option("common", "highlight_color"):
self._config.set("common", "highlight_color", "#FFF0DC")
def write_config(self): # Saves configuration to a file
self._conf_file.truncate(0)
self._conf_file.seek(0)
self._config.write(self._conf_file)
#ex:ts=4:et:
|
mit
|
larrybradley/astropy
|
astropy/coordinates/builtin_frames/icrs_observed_transforms.py
|
3
|
4323
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to "observed" systems from ICRS.
"""
import erfa
from astropy import units as u
from astropy.coordinates.builtin_frames.utils import atciqz, aticq
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from astropy.coordinates.representation import (SphericalRepresentation,
CartesianRepresentation,
UnitSphericalRepresentation)
from .icrs import ICRS
from .altaz import AltAz
from .hadec import HADec
from .utils import PIOVER2
from ..erfa_astrom import erfa_astrom
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, AltAz)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, HADec)
def icrs_to_observed(icrs_coo, observed_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(icrs_coo.data, UnitSphericalRepresentation) or
icrs_coo.cartesian.x.unit == u.one)
# first set up the astrometry context for ICRS<->observed
astrom = erfa_astrom.get().apco(observed_frame)
# correct for parallax to find BCRS direction from observer (as in erfa.pmpx)
if is_unitspherical:
srepr = icrs_coo.spherical
else:
observer_icrs = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False)
srepr = (icrs_coo.cartesian - observer_icrs).represent_as(
SphericalRepresentation)
# convert to topocentric CIRS
cirs_ra, cirs_dec = atciqz(srepr, astrom)
# now perform observed conversion
if isinstance(observed_frame, AltAz):
lon, zen, _, _, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
lat = PIOVER2 - zen
else:
_, _, lon, lat, _ = erfa.atioq(cirs_ra, cirs_dec, astrom)
if is_unitspherical:
obs_srepr = UnitSphericalRepresentation(lon << u.radian, lat << u.radian, copy=False)
else:
obs_srepr = SphericalRepresentation(lon << u.radian, lat << u.radian, srepr.distance, copy=False)
return observed_frame.realize_frame(obs_srepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, AltAz, ICRS)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HADec, ICRS)
def observed_to_icrs(observed_coo, icrs_frame):
# if the data are UnitSphericalRepresentation, we can skip the distance calculations
is_unitspherical = (isinstance(observed_coo.data, UnitSphericalRepresentation) or
observed_coo.cartesian.x.unit == u.one)
usrepr = observed_coo.represent_as(UnitSphericalRepresentation)
lon = usrepr.lon.to_value(u.radian)
lat = usrepr.lat.to_value(u.radian)
if isinstance(observed_coo, AltAz):
# the 'A' indicates zen/az inputs
coord_type = 'A'
lat = PIOVER2 - lat
else:
coord_type = 'H'
# first set up the astrometry context for ICRS<->CIRS at the observed_coo time
astrom = erfa_astrom.get().apco(observed_coo)
# Topocentric CIRS
cirs_ra, cirs_dec = erfa.atoiq(coord_type, lon, lat, astrom) << u.radian
if is_unitspherical:
srepr = SphericalRepresentation(cirs_ra, cirs_dec, 1, copy=False)
else:
srepr = SphericalRepresentation(lon=cirs_ra, lat=cirs_dec,
distance=observed_coo.distance, copy=False)
# BCRS (Astrometric) direction to source
bcrs_ra, bcrs_dec = aticq(srepr, astrom) << u.radian
# Correct for parallax to get ICRS representation
if is_unitspherical:
icrs_srepr = UnitSphericalRepresentation(bcrs_ra, bcrs_dec, copy=False)
else:
icrs_srepr = SphericalRepresentation(lon=bcrs_ra, lat=bcrs_dec,
distance=observed_coo.distance, copy=False)
observer_icrs = CartesianRepresentation(astrom['eb'], unit=u.au, xyz_axis=-1, copy=False)
newrepr = icrs_srepr.to_cartesian() + observer_icrs
icrs_srepr = newrepr.represent_as(SphericalRepresentation)
return icrs_frame.realize_frame(icrs_srepr)
|
bsd-3-clause
|
sarvex/tensorflow
|
tensorflow/python/keras/tests/model_architectures.py
|
6
|
10308
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving/loading function for keras Model."""
import collections
from tensorflow.python import keras
# Declaring namedtuple()
ModelFn = collections.namedtuple('ModelFn',
['model', 'input_shape', 'target_shape'])
def basic_sequential():
"""Basic sequential model."""
model = keras.Sequential([
keras.layers.Dense(3, activation='relu', input_shape=(3,)),
keras.layers.Dense(2, activation='softmax'),
])
return ModelFn(model, (None, 3), (None, 2))
def basic_sequential_deferred():
"""Sequential model with deferred input shape."""
model = keras.Sequential([
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax'),
])
return ModelFn(model, (None, 3), (None, 2))
def stacked_rnn():
"""Stacked RNN model."""
inputs = keras.Input((None, 3))
layer = keras.layers.RNN([keras.layers.LSTMCell(2) for _ in range(3)])
x = layer(inputs)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def lstm():
"""LSTM model."""
inputs = keras.Input((None, 3))
x = keras.layers.LSTM(4, return_sequences=True)(inputs)
x = keras.layers.LSTM(3, return_sequences=True)(x)
x = keras.layers.LSTM(2, return_sequences=False)(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def multi_input_multi_output():
"""Multi-input Multi-output model."""
body_input = keras.Input(shape=(None,), name='body')
tags_input = keras.Input(shape=(2,), name='tags')
x = keras.layers.Embedding(10, 4)(body_input)
body_features = keras.layers.LSTM(5)(x)
x = keras.layers.concatenate([body_features, tags_input])
pred_1 = keras.layers.Dense(2, activation='sigmoid', name='priority')(x)
pred_2 = keras.layers.Dense(3, activation='softmax', name='department')(x)
model = keras.Model(
inputs=[body_input, tags_input], outputs=[pred_1, pred_2])
return ModelFn(model, [(None, 1), (None, 2)], [(None, 2), (None, 3)])
def nested_sequential_in_functional():
"""A sequential model nested in a functional model."""
inner_model = keras.Sequential([
keras.layers.Dense(3, activation='relu', input_shape=(3,)),
keras.layers.Dense(2, activation='relu'),
])
inputs = keras.Input(shape=(3,))
x = inner_model(inputs)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def seq_to_seq():
"""Sequence to sequence model."""
num_encoder_tokens = 3
num_decoder_tokens = 3
latent_dim = 2
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
encoder = keras.layers.LSTM(latent_dim, return_state=True)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
decoder_lstm = keras.layers.LSTM(
latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(
decoder_inputs, initial_state=encoder_states)
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
return ModelFn(
model, [(None, 2, num_encoder_tokens), (None, 2, num_decoder_tokens)],
(None, 2, num_decoder_tokens))
def shared_layer_functional():
"""Shared layer in a functional model."""
main_input = keras.Input(shape=(10,), dtype='int32', name='main_input')
x = keras.layers.Embedding(
output_dim=5, input_dim=4, input_length=10)(main_input)
lstm_out = keras.layers.LSTM(3)(x)
auxiliary_output = keras.layers.Dense(
1, activation='sigmoid', name='aux_output')(lstm_out)
auxiliary_input = keras.Input(shape=(5,), name='aux_input')
x = keras.layers.concatenate([lstm_out, auxiliary_input])
x = keras.layers.Dense(2, activation='relu')(x)
main_output = keras.layers.Dense(
1, activation='sigmoid', name='main_output')(x)
model = keras.Model(
inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output])
return ModelFn(model, [(None, 10), (None, 5)], [(None, 1), (None, 1)])
def shared_sequential():
"""Shared sequential model in a functional model."""
inner_model = keras.Sequential([
keras.layers.Conv2D(2, 3, activation='relu'),
keras.layers.Conv2D(2, 3, activation='relu'),
])
inputs_1 = keras.Input((5, 5, 3))
inputs_2 = keras.Input((5, 5, 3))
x1 = inner_model(inputs_1)
x2 = inner_model(inputs_2)
x = keras.layers.concatenate([x1, x2])
outputs = keras.layers.GlobalAveragePooling2D()(x)
model = keras.Model([inputs_1, inputs_2], outputs)
return ModelFn(model, [(None, 5, 5, 3), (None, 5, 5, 3)], (None, 4))
class MySubclassModel(keras.Model):
"""A subclass model."""
def __init__(self, input_dim=3):
super(MySubclassModel, self).__init__(name='my_subclass_model')
self._config = {'input_dim': input_dim}
self.dense1 = keras.layers.Dense(8, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='softmax')
self.bn = keras.layers.BatchNormalization()
self.dp = keras.layers.Dropout(0.5)
def call(self, inputs, **kwargs):
x = self.dense1(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense2(x)
def get_config(self):
return self._config
@classmethod
def from_config(cls, config):
return cls(**config)
def nested_subclassed_model():
"""A subclass model nested in another subclass model."""
class NestedSubclassModel(keras.Model):
"""A nested subclass model."""
def __init__(self):
super(NestedSubclassModel, self).__init__()
self.dense1 = keras.layers.Dense(4, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.inner_subclass_model = MySubclassModel()
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.inner_subclass_model(x)
return self.dense2(x)
return ModelFn(NestedSubclassModel(), (None, 3), (None, 2))
def nested_subclassed_in_functional_model():
"""A subclass model nested in a functional model."""
inner_subclass_model = MySubclassModel()
inputs = keras.Input(shape=(3,))
x = inner_subclass_model(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def nested_functional_in_subclassed_model():
"""A functional model nested in a subclass model."""
def get_functional_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(4, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
return keras.Model(inputs, outputs)
class NestedFunctionalInSubclassModel(keras.Model):
"""A functional nested in subclass model."""
def __init__(self):
super(NestedFunctionalInSubclassModel, self).__init__(
name='nested_functional_in_subclassed_model')
self.dense1 = keras.layers.Dense(4, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='relu')
self.inner_functional_model = get_functional_model()
def call(self, inputs):
x = self.dense1(inputs)
x = self.inner_functional_model(x)
return self.dense2(x)
return ModelFn(NestedFunctionalInSubclassModel(), (None, 3), (None, 2))
def shared_layer_subclassed_model():
"""Shared layer in a subclass model."""
class SharedLayerSubclassModel(keras.Model):
"""A subclass model with shared layers."""
def __init__(self):
super(SharedLayerSubclassModel, self).__init__(
name='shared_layer_subclass_model')
self.dense = keras.layers.Dense(3, activation='relu')
self.dp = keras.layers.Dropout(0.5)
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense(x)
return ModelFn(SharedLayerSubclassModel(), (None, 3), (None, 3))
def functional_with_keyword_args():
"""A functional model with keyword args."""
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(4)(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs, name='m', trainable=False)
return ModelFn(model, (None, 3), (None, 2))
ALL_MODELS = [
('basic_sequential', basic_sequential),
('basic_sequential_deferred', basic_sequential_deferred),
('stacked_rnn', stacked_rnn),
('lstm', lstm),
('multi_input_multi_output', multi_input_multi_output),
('nested_sequential_in_functional', nested_sequential_in_functional),
('seq_to_seq', seq_to_seq),
('shared_layer_functional', shared_layer_functional),
('shared_sequential', shared_sequential),
('nested_subclassed_model', nested_subclassed_model),
('nested_subclassed_in_functional_model',
nested_subclassed_in_functional_model),
('nested_functional_in_subclassed_model',
nested_functional_in_subclassed_model),
('shared_layer_subclassed_model', shared_layer_subclassed_model),
('functional_with_keyword_args', functional_with_keyword_args)
]
def get_models(exclude_models=None):
"""Get all models excluding the specified ones."""
models = [model for model in ALL_MODELS
if model[0] not in exclude_models]
return models
|
apache-2.0
|
kennethlove/django
|
django/utils/importlib.py
|
445
|
1229
|
# Taken from Python 2.7 with permission from/by the original author.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
bsd-3-clause
|
hyperized/ansible
|
lib/ansible/plugins/lookup/indexed_items.py
|
100
|
1579
|
# (c) 2012, Michael DeHaan <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: indexed_items
author: Michael DeHaan <[email protected]>
version_added: "1.3"
short_description: rewrites lists to return 'indexed items'
description:
- use this lookup if you want to loop over an array and also get the numeric index of where you are in the array as you go
- any list given will be transformed with each resulting element having the it's previous position in item.0 and its value in item.1
options:
_terms:
description: list of items
required: True
"""
EXAMPLES = """
- name: indexed loop demo
debug:
msg: "at array position {{ item.0 }} there is a value {{ item.1 }}"
with_indexed_items:
- "{{ some_list }}"
"""
RETURN = """
_raw:
description:
- list with each item.0 giving you the position and item.1 the value
type: list
"""
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_indexed_items expects a list")
items = self._flatten(terms)
return list(zip(range(len(items)), items))
|
gpl-3.0
|
ConeyLiu/spark
|
python/pyspark/shell.py
|
37
|
2333
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An interactive shell.
This file is designed to be launched as a PYTHONSTARTUP script.
"""
import atexit
import os
import platform
import warnings
import py4j
from pyspark import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, SQLContext
if os.environ.get("SPARK_EXECUTOR_URI"):
SparkContext.setSystemProperty("spark.executor.uri", os.environ["SPARK_EXECUTOR_URI"])
SparkContext._ensure_initialized()
try:
spark = SparkSession._create_shell_session()
except Exception:
import sys
import traceback
warnings.warn("Failed to initialize Spark session.")
traceback.print_exc(file=sys.stderr)
sys.exit(1)
sc = spark.sparkContext
sql = spark.sql
atexit.register(lambda: sc.stop())
# for compatibility
sqlContext = spark._wrapped
sqlCtx = sqlContext
print(r"""Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/__ / .__/\_,_/_/ /_/\_\ version %s
/_/
""" % sc.version)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
print("SparkSession available as 'spark'.")
# The ./bin/pyspark script stores the old PYTHONSTARTUP value in OLD_PYTHONSTARTUP,
# which allows us to execute the user's PYTHONSTARTUP file:
_pythonstartup = os.environ.get('OLD_PYTHONSTARTUP')
if _pythonstartup and os.path.isfile(_pythonstartup):
with open(_pythonstartup) as f:
code = compile(f.read(), _pythonstartup, 'exec')
exec(code)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.